summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
authorJani Nikula <jani.nikula@intel.com>2020-06-25 18:05:03 +0300
committerJani Nikula <jani.nikula@intel.com>2020-06-25 18:05:03 +0300
commit0f69403d2535ffc7200a8414cf3ca66a49b0d741 (patch)
tree3ce85dd08359ea872aa8fb9bd12072efdb80a787 /drivers/gpu
parent580fbdc5136822208f107500682e50a1cb232e94 (diff)
parent0a19b068acc47d05212f03e494381926dc0381e2 (diff)
Merge drm/drm-next into drm-intel-next-queued
Catch up with upstream, in particular to get c1e8d7c6a7a6 ("mmap locking API: convert mmap_sem comments"). Signed-off-by: Jani Nikula <jani.nikula@intel.com>
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig4
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c71
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c151
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c444
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c185
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h (renamed from drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h)18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c75
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c732
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c447
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c408
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c120
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c142
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cikd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c2907
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c211
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c4
-rwxr-xr-x[-rw-r--r--]drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c97
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c302
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nvd.h49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c145
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c419
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c262
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c471
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c55
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c47
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h6
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c636
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c73
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c103
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c644
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_sink.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h100
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c225
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c299
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c319
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h (renamed from drivers/gpu/drm/amd/display/dc/basics/log_helpers.c)25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c205
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c97
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c323
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h21
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h67
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/transform.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h (renamed from drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h)23
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h14
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h15
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h6
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h11
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c26
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c33
-rw-r--r--drivers/gpu/drm/amd/display/include/hdcp_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_interface.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c37
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c101
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h30
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c482
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_shared.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c103
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/stats/stats.c448
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c7
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h14
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h4
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h27
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h30
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h33
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h114
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c336
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c49
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c184
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c61
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c140
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c40
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c157
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c71
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c141
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c149
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c181
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c107
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c151
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c35
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c31
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c42
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c48
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c69
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c15
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c68
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c106
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c4
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c19
-rw-r--r--drivers/gpu/drm/arm/Kconfig4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c6
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c27
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c22
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c48
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c8
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c6
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c10
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c2
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c33
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c2
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c13
-rw-r--r--drivers/gpu/drm/ast/ast_main.c8
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c26
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c11
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c16
-rw-r--r--drivers/gpu/drm/bridge/Kconfig42
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c26
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c3
-rw-r--r--drivers/gpu/drm/bridge/chrontel-ch7033.c620
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c1213
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.h144
-rw-r--r--drivers/gpu/drm/bridge/panel.c7
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c2
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c2
-rw-r--r--drivers/gpu/drm/bridge/sii9234.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c86
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c323
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig19
-rw-r--r--drivers/gpu/drm/cirrus/Makefile2
-rw-r--r--drivers/gpu/drm/drm_atomic.c8
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_auth.c107
-rw-r--r--drivers/gpu/drm/drm_blend.c16
-rw-r--r--drivers/gpu/drm/drm_bufs.c2
-rw-r--r--drivers/gpu/drm/drm_client.c43
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c42
-rw-r--r--drivers/gpu/drm/drm_connector.c40
-rw-r--r--drivers/gpu/drm/drm_crtc.c27
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h4
-rw-r--r--drivers/gpu/drm/drm_debugfs.c53
-rw-r--r--drivers/gpu/drm/drm_dma.c2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c501
-rw-r--r--drivers/gpu/drm/drm_drv.c230
-rw-r--r--drivers/gpu/drm/drm_edid.c444
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c15
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c35
-rw-r--r--drivers/gpu/drm/drm_file.c19
-rw-r--r--drivers/gpu/drm/drm_format_helper.c61
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c8
-rw-r--r--drivers/gpu/drm/drm_gem.c107
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c20
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c230
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c172
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c2
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c134
-rw-r--r--drivers/gpu/drm/drm_hdcp.c8
-rw-r--r--drivers/gpu/drm/drm_internal.h7
-rw-r--r--drivers/gpu/drm/drm_ioc32.c4
-rw-r--r--drivers/gpu/drm/drm_ioctl.c17
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_managed.c277
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c35
-rw-r--r--drivers/gpu/drm/drm_mm.c131
-rw-r--r--drivers/gpu/drm/drm_mode_config.c110
-rw-r--r--drivers/gpu/drm/drm_mode_object.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c66
-rw-r--r--drivers/gpu/drm/drm_pci.c4
-rw-r--r--drivers/gpu/drm/drm_plane.c9
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c5
-rw-r--r--drivers/gpu/drm/drm_scatter.c11
-rw-r--r--drivers/gpu/drm/drm_sysfs.c3
-rw-r--r--drivers/gpu/drm/drm_vblank.c309
-rw-r--r--drivers/gpu/drm/drm_vm.c7
-rw-r--r--drivers/gpu/drm/drm_vram_helper_common.c94
-rw-r--r--drivers/gpu/drm/drm_writeback.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c28
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c28
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c186
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c34
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c8
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c11
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c14
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c14
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c47
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c100
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c18
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_dpi.c11
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c31
-rw-r--r--drivers/gpu/drm/gma500/mdfld_output.h1
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tmd_vid.c6
-rw-r--r--drivers/gpu/drm/gma500/mdfld_tpo_vid.c6
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c19
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c5
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h20
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c18
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c110
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c8
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c13
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c43
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h1
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c1
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c7
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c44
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c48
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c33
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c14
-rw-r--r--drivers/gpu/drm/i915/i915_mm.c1
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c18
-rw-r--r--drivers/gpu/drm/i915/i915_query.c62
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c32
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c8
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c22
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c8
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c8
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c8
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c63
-rw-r--r--drivers/gpu/drm/lima/Kconfig2
-rw-r--r--drivers/gpu/drm/lima/Makefile4
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c25
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h2
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c3
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h5
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.c257
-rw-r--r--drivers/gpu/drm/lima/lima_devfreq.h44
-rw-r--r--drivers/gpu/drm/lima/lima_device.c228
-rw-r--r--drivers/gpu/drm/lima/lima_device.h17
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.c17
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c141
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_dump.h77
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c10
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c21
-rw-r--r--drivers/gpu/drm/lima/lima_gp.h2
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c38
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.h2
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c49
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c77
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.h2
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c31
-rw-r--r--drivers/gpu/drm/lima/lima_pp.h4
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c195
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h11
-rw-r--r--drivers/gpu/drm/lima/lima_trace.c7
-rw-r--r--drivers/gpu/drm/lima/lima_trace.h50
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h3
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c64
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c16
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c57
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c259
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c58
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c22
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c24
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c54
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.h4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c28
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c53
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h6
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c6
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c2
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c16
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h3
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig4
-rw-r--r--drivers/gpu/drm/mgag200/Makefile3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c319
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c206
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h69
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c211
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c127
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c968
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_reg.h11
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c70
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c83
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c418
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h48
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c72
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c123
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h50
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c35
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c23
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c95
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c48
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c129
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h100
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c58
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h10
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c20
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c80
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c6
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c35
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c2
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c23
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.h2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c14
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h15
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c55
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h1
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c234
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c42
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c51
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/msm_gpummu.c10
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h5
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c11
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core.h7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c15
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core827d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core907d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core917d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec57d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c158
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.h5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc37d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/headc57d.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/pior507d.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor507d.c7
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sor907d.c11
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/sorc37d.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c86
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c17
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl5070.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c98
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.h8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c218
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c450
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c31
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c175
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.h5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/memory.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c60
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c (renamed from drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c)27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c37
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c36
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c65
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Kconfig2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c33
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c43
-rw-r--r--drivers/gpu/drm/omapdrm/omap_debugfs.c29
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c4
-rw-r--r--drivers/gpu/drm/panel/Kconfig29
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c4
-rw-r--r--drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c366
-rw-r--r--drivers/gpu/drm/panel/panel-boe-himax8279d.c3
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c36
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c3
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c3
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c3
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c11
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c3
-rw-r--r--drivers/gpu/drm/panel/panel-innolux-p079zca.c4
-rw-r--r--drivers/gpu/drm/panel/panel-jdi-lt070me05000.c3
-rw-r--r--drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c3
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c691
-rw-r--r--drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c5
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lb035q02.c1
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c3
-rw-r--r--drivers/gpu/drm/panel/panel-nec-nl8048hl11.c1
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c1
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt39016.c46
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c3
-rw-r--r--drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c3
-rw-r--r--drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c3
-rw-r--r--drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c4
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm67191.c3
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c3
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c5
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c1
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c4
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c3
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c3
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c3
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c369
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7701.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c3
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c2
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx565akm.c1
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c1
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td043mtea1.c1
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c5
-rw-r--r--drivers/gpu/drm/panel/panel-truly-nt35597.c3
-rw-r--r--drivers/gpu/drm/panel/panel-visionox-rm69299.c302
-rw-r--r--drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c8
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c4
-rw-r--r--drivers/gpu/drm/pl111/Makefile1
-rw-r--r--drivers/gpu/drm/pl111/pl111_debugfs.c8
-rw-r--r--drivers/gpu/drm/pl111/pl111_drm.h2
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c13
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c148
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c138
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.h29
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_debugfs.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c44
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h20
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c23
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c15
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c6
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c8
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c3
-rw-r--r--drivers/gpu/drm/radeon/Makefile35
-rw-r--r--drivers/gpu/drm/radeon/atom.c3
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c18
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c16
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c14
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c9
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c13
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-reg.c6
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c8
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c43
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c143
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h20
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c10
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c92
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c2
-rw-r--r--drivers/gpu/drm/selftests/drm_mm_selftests.h1
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c126
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c14
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c13
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_kms.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c6
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h4
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c14
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c28
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c13
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c7
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c14
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c10
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c13
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c8
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/stm/drv.c21
-rw-r--r--drivers/gpu/drm/stm/ltdc.c102
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c124
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h10
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_ui_layer.c14
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c40
-rw-r--r--drivers/gpu/drm/tegra/dc.c11
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c23
-rw-r--r--drivers/gpu/drm/tegra/drm.h4
-rw-r--r--drivers/gpu/drm/tegra/dsi.c21
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c21
-rw-r--r--drivers/gpu/drm/tegra/output.c6
-rw-r--r--drivers/gpu/drm/tegra/rgb.c8
-rw-r--r--drivers/gpu/drm/tegra/sor.c20
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c32
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c11
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h6
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c27
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h4
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c14
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c12
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c21
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h1
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c30
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c33
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c10
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c33
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c3
-rw-r--r--drivers/gpu/drm/tiny/Kconfig19
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c (renamed from drivers/gpu/drm/cirrus/cirrus.c)82
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c242
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c18
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c18
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c18
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c18
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c18
-rw-r--r--drivers/gpu/drm/tiny/repaper.c30
-rw-r--r--drivers/gpu/drm/tiny/st7586.c18
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c18
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c56
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/tve200/tve200_drv.c12
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c4
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c49
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h5
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c106
-rw-r--r--drivers/gpu/drm/udl/udl_main.c10
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c31
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c6
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c20
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c53
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h9
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c21
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c18
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c10
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c32
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c2
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c29
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c10
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c12
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c82
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h28
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c41
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c8
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c21
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h37
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c22
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c18
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c21
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h5
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c11
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c17
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c8
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c11
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c8
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c8
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c8
-rw-r--r--drivers/gpu/host1x/dev.c59
1018 files changed, 28137 insertions, 14933 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 43594978958e..c4fd57d8b717 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -161,7 +161,7 @@ config DRM_LOAD_EDID_FIRMWARE
monitor are unable to provide appropriate EDID data. Since this
feature is provided as a workaround for broken hardware, the
default case is N. Details and instructions how to build your own
- EDID data are given in Documentation/driver-api/edid.rst.
+ EDID data are given in Documentation/admin-guide/edid.rst.
config DRM_DP_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
@@ -310,8 +310,6 @@ source "drivers/gpu/drm/ast/Kconfig"
source "drivers/gpu/drm/mgag200/Kconfig"
-source "drivers/gpu/drm/cirrus/Kconfig"
-
source "drivers/gpu/drm/armada/Kconfig"
source "drivers/gpu/drm/atmel-hlcdc/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 7f72ef5e7811..2c0e5a7e5953 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -17,7 +17,8 @@ drm-y := drm_auth.o drm_cache.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
- drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o
+ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
+ drm_managed.o
drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
@@ -32,8 +33,7 @@ drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
-drm_vram_helper-y := drm_gem_vram_helper.o \
- drm_vram_helper_common.o
+drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
@@ -74,7 +74,6 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_MGAG200) += mgag200/
obj-$(CONFIG_DRM_V3D) += v3d/
obj-$(CONFIG_DRM_VC4) += vc4/
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index c2bbcdd9c875..210d57a4afc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -55,7 +55,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
- amdgpu_umc.o smu_v11_0_i2c.o
+ amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2992a49ad4a5..cd913986863e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -28,6 +28,18 @@
#ifndef __AMDGPU_H__
#define __AMDGPU_H__
+#ifdef pr_fmt
+#undef pr_fmt
+#endif
+
+#define pr_fmt(fmt) "amdgpu: " fmt
+
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+
+#define dev_fmt(fmt) "amdgpu: " fmt
+
#include "amdgpu_ctx.h"
#include <linux/atomic.h>
@@ -161,6 +173,7 @@ extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_dc_debug_mask;
extern uint amdgpu_dm_abm_level;
extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
@@ -177,6 +190,8 @@ extern int sched_policy;
static const int sched_policy = KFD_SCHED_POLICY_HWS;
#endif
+extern int amdgpu_tmz;
+
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
#endif
@@ -190,8 +205,6 @@ extern int amdgpu_cik_support;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
-/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
-#define AMDGPU_IB_POOL_SIZE 16
#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
#define AMDGPUFB_CONN_LIMIT 4
#define AMDGPU_BIOS_NUM_SCRATCH 16
@@ -439,7 +452,9 @@ struct amdgpu_fpriv {
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv);
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib);
+ unsigned size,
+ enum amdgpu_ib_pool_type pool,
+ struct amdgpu_ib *ib);
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
struct dma_fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
@@ -512,7 +527,7 @@ static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
/*
* Writeback
*/
-#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
+#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */
struct amdgpu_wb {
struct amdgpu_bo *wb_obj;
@@ -724,6 +739,7 @@ struct amdgpu_device {
uint32_t rev_id;
uint32_t external_rev_id;
unsigned long flags;
+ unsigned long apu_flags;
int usec_timeout;
const struct amdgpu_asic_funcs *asic_funcs;
bool shutdown;
@@ -751,7 +767,6 @@ struct amdgpu_device {
uint8_t *bios;
uint32_t bios_size;
struct amdgpu_bo *stolen_vga_memory;
- struct amdgpu_bo *discovery_memory;
uint32_t bios_scratch_reg_offset;
uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
@@ -843,7 +858,8 @@ struct amdgpu_device {
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
- struct amdgpu_sa_manager ring_tmp_bo;
+ struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX];
+ struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX];
/* interrupts */
struct amdgpu_irq irq;
@@ -903,7 +919,9 @@ struct amdgpu_device {
struct amdgpu_display_manager dm;
/* discovery */
- uint8_t *discovery;
+ uint8_t *discovery_bin;
+ uint32_t discovery_tmr_size;
+ struct amdgpu_bo *discovery_memory;
/* mes */
bool enable_mes;
@@ -923,7 +941,7 @@ struct amdgpu_device {
atomic64_t gart_pin_size;
/* soc15 register offset based on ip, instance and segment */
- uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
+ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
/* delayed work_func for deferring clockgating during resume */
struct delayed_work delayed_init_work;
@@ -935,9 +953,6 @@ struct amdgpu_device {
/* link all shadow bo */
struct list_head shadow_list;
struct mutex shadow_list_lock;
- /* keep an lru list of rings by HW IP */
- struct list_head ring_lru_list;
- spinlock_t ring_lru_list_lock;
/* record hw reset is performed */
bool has_hw_reset;
@@ -945,9 +960,8 @@ struct amdgpu_device {
/* s3/s4 mask */
bool in_suspend;
+ bool in_hibernate;
- /* record last mm index being written through WREG32*/
- unsigned long last_mm_index;
bool in_gpu_reset;
enum pp_mp1_state mp1_state;
struct mutex lock_reset;
@@ -966,14 +980,19 @@ struct amdgpu_device {
uint64_t unique_id;
uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS];
- /* device pstate */
- int pstate;
/* enable runtime pm on the device */
bool runpm;
bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
+
+ /* Chip product information */
+ char product_number[16];
+ char product_name[32];
+ char serial[16];
+
+ struct amdgpu_autodump autodump;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
@@ -990,10 +1009,10 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags);
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
@@ -1010,25 +1029,20 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
/*
* Registers read & write functions.
*/
-
-#define AMDGPU_REGS_IDX (1<<0)
#define AMDGPU_REGS_NO_KIQ (1<<1)
-#define AMDGPU_REGS_KIQ (1<<2)
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
-#define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ)
-#define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ)
+#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
+#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
-#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1065,7 +1079,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
tmp_ |= ((val) & ~(mask)); \
WREG32_PLL(reg, tmp_); \
} while (0)
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
@@ -1248,5 +1262,9 @@ _name##_show(struct device *dev, \
\
static struct device_attribute pmu_attr_##_name = __ATTR_RO(_name)
-#endif
+static inline bool amdgpu_is_tmz(struct amdgpu_device *adev)
+{
+ return adev->gmc.tmz_enabled;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 1e41367ef74e..956cbbda4793 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -444,7 +444,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- /* todo: add DC handling */
if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
!amdgpu_device_has_dc_support(adev)) {
struct amdgpu_encoder *enc = atif->encoder_for_bl;
@@ -463,6 +462,27 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
#endif
}
}
+#if defined(CONFIG_DRM_AMD_DC)
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_display_manager *dm = &adev->dm;
+ struct backlight_device *bd = dm->backlight_dev;
+
+ if (bd) {
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
+
+ /*
+ * XXX backlight_device_set_brightness() is
+ * hardwired to post BACKLIGHT_UPDATE_SYSFS.
+ * It probably should accept 'reason' parameter.
+ */
+ backlight_device_set_brightness(bd, req.backlight_level);
+ }
+ }
+#endif
+#endif
if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
if (adev->flags & AMD_IS_PX) {
pm_runtime_get_sync(adev->ddev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index abfbe89e805e..ad59ac4423b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -564,6 +564,13 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd)
return adev->gds.gws_size;
}
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->rev_id;
+}
+
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 13feb313e9b3..ffe149aafc39 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
#include <linux/mm.h>
+#include <linux/kthread.h>
#include <linux/workqueue.h>
#include <kgd_kfd_interface.h>
#include <drm/ttm/ttm_execbuf_util.h>
@@ -65,6 +66,7 @@ struct kgd_mem {
struct amdgpu_sync sync;
bool aql_queue;
+ bool is_imported;
};
/* KFD Memory Eviction */
@@ -148,6 +150,9 @@ int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit);
+
/* Shared API */
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -175,13 +180,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
+uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
* this is called in hqd_load functions, so it should never fault in
* the first place. This resolves a circular lock dependency involving
- * four locks, including the DQM lock and mmap_sem.
+ * four locks, including the DQM lock and mmap_lock.
*/
#define read_user_wptr(mmptr, wptr, dst) \
({ \
@@ -190,10 +196,10 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
pagefault_disable(); \
if ((mmptr) == current->mm) { \
valid = !get_user((dst), (wptr)); \
- } else if (current->mm == NULL) { \
- use_mm(mmptr); \
+ } else if (current->flags & PF_KTHREAD) { \
+ kthread_use_mm(mmptr); \
valid = !get_user((dst), (wptr)); \
- unuse_mm(mmptr); \
+ kthread_unuse_mm(mmptr); \
} \
pagefault_enable(); \
} \
@@ -218,7 +224,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
void *vm, struct kgd_mem **mem,
uint64_t *offset, uint32_t flags);
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem);
+ struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 6529caca88fe..35d4a5ab0228 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -22,7 +22,6 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/mmu_context.h>
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index 4ec6d0c03201..bf927f432506 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -19,7 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_10_1_0_offset.h"
@@ -543,6 +542,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v10_compute_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
#if 0
unsigned long flags;
int retry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 0b7e78748540..744366c7ee85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
@@ -237,7 +235,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index ccd635b812b5..feab4cc6e836 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -20,8 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gfx_v8_0.h"
@@ -224,7 +222,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
- /* read_user_ptr may take the mm->mmap_sem.
+ /* read_user_ptr may take the mm->mmap_lock.
* release srbm_mutex to avoid circular dependency between
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index df841c2ac5e7..c7fd0c47b254 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -19,8 +19,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <linux/mmu_context.h>
-
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "gc/gc_9_0_offset.h"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 9dff792c9290..9015c7b76d60 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -362,13 +362,13 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
ret = amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_amdkfd_validate,
&param);
if (ret) {
- pr_err("amdgpu: failed to validate PT BOs\n");
+ pr_err("failed to validate PT BOs\n");
return ret;
}
ret = amdgpu_amdkfd_validate(&param, pd);
if (ret) {
- pr_err("amdgpu: failed to validate PD\n");
+ pr_err("failed to validate PD\n");
return ret;
}
@@ -377,7 +377,7 @@ static int vm_validate_pt_pd_bos(struct amdgpu_vm *vm)
if (vm->use_cpu_for_update) {
ret = amdgpu_bo_kmap(pd, NULL);
if (ret) {
- pr_err("amdgpu: failed to kmap PD, ret=%d\n", ret);
+ pr_err("failed to kmap PD, ret=%d\n", ret);
return ret;
}
}
@@ -660,15 +660,15 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
- if (!ret)
- ctx->reserved = true;
- else {
- pr_err("Failed to reserve buffers in ttm\n");
+ if (ret) {
+ pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
+ return ret;
}
- return ret;
+ ctx->reserved = true;
+ return 0;
}
/**
@@ -733,17 +733,15 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
false, &ctx->duplicates);
- if (!ret)
- ctx->reserved = true;
- else
- pr_err("Failed to reserve buffers in ttm.\n");
-
if (ret) {
+ pr_err("Failed to reserve buffers in ttm.\n");
kfree(ctx->vm_pd);
ctx->vm_pd = NULL;
+ return ret;
}
- return ret;
+ ctx->reserved = true;
+ return 0;
}
/**
@@ -1279,31 +1277,30 @@ err:
}
int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
- struct kgd_dev *kgd, struct kgd_mem *mem)
+ struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size)
{
struct amdkfd_process_info *process_info = mem->process_info;
unsigned long bo_size = mem->bo->tbo.mem.size;
struct kfd_bo_va_list *entry, *tmp;
struct bo_vm_reservation_context ctx;
struct ttm_validate_buffer *bo_list_entry;
+ unsigned int mapped_to_gpu_memory;
int ret;
+ bool is_imported = 0;
mutex_lock(&mem->lock);
-
- if (mem->mapped_to_gpu_memory > 0) {
- pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
- mem->va, bo_size);
- mutex_unlock(&mem->lock);
- return -EBUSY;
- }
-
+ mapped_to_gpu_memory = mem->mapped_to_gpu_memory;
+ is_imported = mem->is_imported;
mutex_unlock(&mem->lock);
/* lock is not needed after this, since mem is unused and will
* be freed anyway
*/
- /* No more MMU notifiers */
- amdgpu_mn_unregister(mem->bo);
+ if (mapped_to_gpu_memory > 0) {
+ pr_debug("BO VA 0x%llx size 0x%lx is still mapped.\n",
+ mem->va, bo_size);
+ return -EBUSY;
+ }
/* Make sure restore workers don't access the BO any more */
bo_list_entry = &mem->validate_list;
@@ -1311,6 +1308,9 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
list_del(&bo_list_entry->head);
mutex_unlock(&process_info->lock);
+ /* No more MMU notifiers */
+ amdgpu_mn_unregister(mem->bo);
+
ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx);
if (unlikely(ret))
return ret;
@@ -1342,8 +1342,19 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
kfree(mem->bo->tbo.sg);
}
+ /* Update the size of the BO being freed if it was allocated from
+ * VRAM and is not imported.
+ */
+ if (size) {
+ if ((mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM) &&
+ (!is_imported))
+ *size = bo_size;
+ else
+ *size = 0;
+ }
+
/* Free the BO*/
- amdgpu_bo_unref(&mem->bo);
+ drm_gem_object_put(&mem->bo->tbo.base);
mutex_destroy(&mem->lock);
kfree(mem);
@@ -1382,9 +1393,9 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* concurrently and the queues are actually stopped
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) {
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
is_invalid_userptr = atomic_read(&mem->invalid);
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
}
mutex_lock(&mem->lock);
@@ -1688,7 +1699,8 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
| KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
- (*mem)->bo = amdgpu_bo_ref(bo);
+ drm_gem_object_get(&bo->tbo.base);
+ (*mem)->bo = bo;
(*mem)->va = va;
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
@@ -1696,6 +1708,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
(*mem)->process_info = avm->process_info;
add_kgd_mem_to_kfd_bo_list(*mem, avm->process_info, false);
amdgpu_sync_create(&(*mem)->sync);
+ (*mem)->is_imported = true;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index d1495e1c9289..d9b35df33806 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -40,7 +40,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
for (i = 0; i < n; i++) {
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence,
- false, false);
+ false, false, false);
if (r)
goto exit_do_move;
r = dma_fence_wait(fence, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 85b0515c0fdc..4053597b3af2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -102,7 +102,7 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
}
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 031b094607bd..78ac6dbe70d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -60,8 +60,6 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return RREG32_IDX(index);
case CGS_IND_REG__PCIE:
return RREG32_PCIE(index);
case CGS_IND_REG__SMC:
@@ -77,6 +75,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return 0;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
return 0;
@@ -88,8 +88,6 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
{
CGS_FUNC_ADEV;
switch (space) {
- case CGS_IND_REG__MMIO:
- return WREG32_IDX(index, value);
case CGS_IND_REG__PCIE:
return WREG32_PCIE(index, value);
case CGS_IND_REG__SMC:
@@ -105,6 +103,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
case CGS_IND_REG__AUDIO_ENDPT:
DRM_ERROR("audio endpt register access not implemented.\n");
return;
+ default:
+ BUG();
}
WARN(1, "Invalid indirect register space");
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index af91627b19b0..a25fb59c127c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -57,7 +57,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
/* One for TTM and one for the CS job */
p->uf_entry.tv.num_shared = 2;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
size = amdgpu_bo_size(bo);
if (size != PAGE_SIZE || (data->offset + 8) > size) {
@@ -924,7 +924,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ring = to_amdgpu_ring(entity->rq->sched);
r = amdgpu_ib_get(adev, vm, ring->funcs->parse_cs ?
- chunk_ib->ib_bytes : 0, ib);
+ chunk_ib->ib_bytes : 0,
+ AMDGPU_IB_POOL_DELAYED, ib);
if (r) {
DRM_ERROR("Failed to get ib !\n");
return r;
@@ -1207,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
- enum drm_sched_priority priority;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1257,7 +1257,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 6ed36a2c5f73..8842c55d4490 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_sched.h"
#include "amdgpu_ras.h"
+#include <linux/nospec.h>
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
@@ -72,13 +73,30 @@ static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sch
}
}
-static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
+static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
+ enum drm_sched_priority prio,
+ u32 hw_ip)
+{
+ unsigned int hw_prio;
+
+ hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
+ amdgpu_ctx_sched_prio_to_compute_prio(prio) :
+ AMDGPU_RING_PRIO_DEFAULT;
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+
+ return hw_prio;
+}
+
+static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
+ const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
- enum gfx_pipe_priority hw_prio;
+ unsigned int hw_prio;
enum drm_sched_priority priority;
int r;
@@ -90,52 +108,16 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
entity->sequence = 1;
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
- switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
- scheds = adev->gfx.compute_prio_sched[hw_prio];
- num_scheds = adev->gfx.num_compute_sched[hw_prio];
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
- adev->vcn.num_vcn_dec_sched);
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
- adev->vcn.num_vcn_enc_sched);
+ hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+
+ hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+
+ if (hw_ip == AMDGPU_HW_IP_VCN_ENC || hw_ip == AMDGPU_HW_IP_VCN_DEC) {
+ sched = drm_sched_pick_best(scheds, num_scheds);
scheds = &sched;
num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -178,7 +160,6 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
return 0;
-
}
static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
@@ -525,7 +506,7 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
struct amdgpu_device *adev = ctx->adev;
- enum gfx_pipe_priority hw_prio;
+ unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;
@@ -534,9 +515,11 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
- hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
- scheds = adev->gfx.compute_prio_sched[hw_prio];
- num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
+ AMDGPU_HW_IP_COMPUTE);
+ hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
+ scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
+ num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
drm_sched_entity_modify_sched(&aentity->entity, scheds,
num_scheds);
}
@@ -665,78 +648,3 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
-
-
-static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
-{
- int num_compute_sched_normal = 0;
- int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
- int i;
-
- /* use one drm sched array, gfx.compute_sched to store both high and
- * normal priority drm compute schedulers */
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- if (!adev->gfx.compute_ring[i].has_high_prio)
- adev->gfx.compute_sched[num_compute_sched_normal++] =
- &adev->gfx.compute_ring[i].sched;
- else
- adev->gfx.compute_sched[num_compute_sched_high--] =
- &adev->gfx.compute_ring[i].sched;
- }
-
- /* compute ring only has two priority for now */
- i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
- adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
- adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
-
- i = AMDGPU_GFX_PIPE_PRIO_HIGH;
- if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
- /* When compute has no high priority rings then use */
- /* normal priority sched array */
- adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
- adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
- } else {
- adev->gfx.compute_prio_sched[i] =
- &adev->gfx.compute_sched[num_compute_sched_high - 1];
- adev->gfx.num_compute_sched[i] =
- adev->gfx.num_compute_rings - num_compute_sched_normal;
- }
-}
-
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
-{
- int i, j;
-
- amdgpu_ctx_init_compute_sched(adev);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
- adev->gfx.num_gfx_sched++;
- }
-
- for (i = 0; i < adev->sdma.num_instances; i++) {
- adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
- adev->sdma.num_sdma_sched++;
- }
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] =
- &adev->vcn.inst[i].ring_dec.sched;
- }
-
- for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- for (j = 0; j < adev->vcn.num_enc_rings; ++j)
- adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] =
- &adev->vcn.inst[i].ring_enc[j].sched;
- }
-
- for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
- if (adev->jpeg.harvest_config & (1 << i))
- continue;
- adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] =
- &adev->jpeg.inst[i].ring_dec.sched;
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index de490f183af2..f54e10314661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -88,7 +88,4 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_init_sched(struct amdgpu_device *adev);
-
-
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index c0f9a651dc06..d33cb344be69 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -27,7 +27,7 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-
+#include <linux/poll.h>
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
@@ -74,8 +74,82 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
return 0;
}
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned long timeout = 600 * HZ;
+ int ret;
+
+ wake_up_interruptible(&adev->autodump.gpu_hang);
+
+ ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
+ if (ret == 0) {
+ pr_err("autodump: timeout, move on to gpu recovery\n");
+ return -ETIMEDOUT;
+ }
+#endif
+ return 0;
+}
+
#if defined(CONFIG_DEBUG_FS)
+static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_device *adev = inode->i_private;
+ int ret;
+
+ file->private_data = adev;
+
+ mutex_lock(&adev->lock_reset);
+ if (adev->autodump.dumping.done) {
+ reinit_completion(&adev->autodump.dumping);
+ ret = 0;
+ } else {
+ ret = -EBUSY;
+ }
+ mutex_unlock(&adev->lock_reset);
+
+ return ret;
+}
+
+static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_device *adev = file->private_data;
+
+ complete_all(&adev->autodump.dumping);
+ return 0;
+}
+
+static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
+{
+ struct amdgpu_device *adev = file->private_data;
+
+ poll_wait(file, &adev->autodump.gpu_hang, poll_table);
+
+ if (adev->in_gpu_reset)
+ return POLLIN | POLLRDNORM | POLLWRNORM;
+
+ return 0;
+}
+
+static const struct file_operations autodump_debug_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_debugfs_autodump_open,
+ .poll = amdgpu_debugfs_autodump_poll,
+ .release = amdgpu_debugfs_autodump_release,
+};
+
+static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
+{
+ init_completion(&adev->autodump.dumping);
+ complete_all(&adev->autodump.dumping);
+ init_waitqueue_head(&adev->autodump.gpu_hang);
+
+ debugfs_create_file("amdgpu_autodump", 0600,
+ adev->ddev->primary->debugfs_root,
+ adev, &autodump_debug_fops);
+}
+
/**
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
*
@@ -152,11 +226,16 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
(se_bank != 0xFFFFFFFF && se_bank >= adev->gfx.config.max_shader_engines)) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
}
mutex_lock(&adev->grbm_idx_mutex);
@@ -207,6 +286,7 @@ end:
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -255,6 +335,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -263,6 +347,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -275,6 +360,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -304,6 +390,10 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -311,6 +401,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -325,6 +416,7 @@ static ssize_t amdgpu_debugfs_regs_pcie_write(struct file *f, const char __user
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -354,6 +446,10 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -362,6 +458,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -374,6 +471,7 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -403,6 +501,10 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -410,6 +512,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -424,6 +527,7 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -453,6 +557,10 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -461,6 +569,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -473,6 +582,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -502,6 +612,10 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
while (size) {
uint32_t value;
@@ -509,6 +623,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
if (r) {
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
}
@@ -523,6 +638,7 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -651,16 +767,24 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
- if (r)
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
+ }
- if (size > valuesize)
+ if (size > valuesize) {
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
+ }
outsize = 0;
x = 0;
@@ -673,6 +797,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
}
}
+ amdgpu_virt_disable_access_debugfs(adev);
return !r ? outsize : r;
}
@@ -720,6 +845,10 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -734,16 +863,20 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
pm_runtime_mark_last_busy(adev->ddev->dev);
pm_runtime_put_autosuspend(adev->ddev->dev);
- if (!x)
+ if (!x) {
+ amdgpu_virt_disable_access_debugfs(adev);
return -EINVAL;
+ }
while (size && (offset < x * 4)) {
uint32_t value;
value = data[offset >> 2];
r = put_user(value, (uint32_t *)buf);
- if (r)
+ if (r) {
+ amdgpu_virt_disable_access_debugfs(adev);
return r;
+ }
result += 4;
buf += 4;
@@ -751,6 +884,7 @@ static ssize_t amdgpu_debugfs_wave_read(struct file *f, char __user *buf,
size -= 4;
}
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -805,6 +939,10 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
if (r < 0)
return r;
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0)
+ return r;
+
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
amdgpu_gfx_select_se_sh(adev, se, sh, cu);
@@ -840,6 +978,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
err:
kfree(data);
+ amdgpu_virt_disable_access_debugfs(adev);
return result;
}
@@ -1369,6 +1508,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
amdgpu_ras_debugfs_create_all(adev);
+ amdgpu_debugfs_autodump_init(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index de12d1101526..2803884d338d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -31,6 +31,11 @@ struct amdgpu_debugfs {
unsigned num_files;
};
+struct amdgpu_autodump {
+ struct completion dumping;
+ struct wait_queue_head gpu_hang;
+};
+
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
void amdgpu_debugfs_fini(struct amdgpu_device *adev);
@@ -40,3 +45,4 @@ int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
int amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
int amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
+int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 559dc24ef436..a027a8f7b281 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -64,9 +64,11 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_ras.h"
#include "amdgpu_pmu.h"
+#include "amdgpu_fru_eeprom.h"
#include <linux/suspend.h>
#include <drm/task_barrier.h>
+#include <linux/pm_runtime.h>
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -138,6 +140,72 @@ static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);
/**
+ * DOC: product_name
+ *
+ * The amdgpu driver provides a sysfs API for reporting the product name
+ * for the device
+ * The file serial_number is used for this and returns the product name
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_name(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
+}
+
+static DEVICE_ATTR(product_name, S_IRUGO,
+ amdgpu_device_get_product_name, NULL);
+
+/**
+ * DOC: product_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the part number
+ * for the device
+ * The file serial_number is used for this and returns the part number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_product_number(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
+}
+
+static DEVICE_ATTR(product_number, S_IRUGO,
+ amdgpu_device_get_product_number, NULL);
+
+/**
+ * DOC: serial_number
+ *
+ * The amdgpu driver provides a sysfs API for reporting the serial number
+ * for the device
+ * The file serial_number is used for this and returns the serial number
+ * as returned from the FRU.
+ * NOTE: This is only available for certain server cards
+ */
+
+static ssize_t amdgpu_device_get_serial_number(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
+}
+
+static DEVICE_ATTR(serial_number, S_IRUGO,
+ amdgpu_device_get_serial_number, NULL);
+
+/**
* amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
@@ -231,10 +299,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
}
/*
- * MMIO register access helper functions.
+ * device register access helper functions.
*/
/**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -242,25 +310,19 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
*
* Returns the 32 bit value from the offset specified.
*/
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
- uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t acc_flags)
{
uint32_t ret;
- if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_kiq_rreg(adev, reg);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ if ((reg * 4) < adev->rmmio_size)
ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
- trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
+ else
+ ret = adev->pcie_rreg(adev, (reg * 4));
+ trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
return ret;
}
@@ -306,28 +368,19 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
-void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+void static inline amdgpu_device_wreg_no_kiq(struct amdgpu_device *adev, uint32_t reg,
+ uint32_t v, uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+ trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ if ((reg * 4) < adev->rmmio_size)
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
-
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
- }
+ else
+ adev->pcie_wreg(adev, (reg * 4), v);
}
/**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a register
*
* @adev: amdgpu_device pointer
* @reg: dword aligned register offset
@@ -336,17 +389,13 @@ void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg,
*
* Writes the value specified to the offset specified.
*/
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
- uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
{
- if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
- adev->last_mm_index = v;
- }
-
- if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
+ if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))
return amdgpu_kiq_wreg(adev, reg, v);
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
}
/*
@@ -365,7 +414,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
- amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+ amdgpu_device_wreg_no_kiq(adev, reg, v, acc_flags);
}
/**
@@ -397,20 +446,12 @@ u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg)
*/
void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
{
- if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
- adev->last_mm_index = v;
- }
-
if ((reg * 4) < adev->rio_mem_size)
iowrite32(v, adev->rio_mem + (reg * 4));
else {
iowrite32((reg * 4), adev->rio_mem + (mmMM_INDEX * 4));
iowrite32(v, adev->rio_mem + (mmMM_DATA * 4));
}
-
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
- }
}
/**
@@ -1126,6 +1167,8 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
+ amdgpu_gmc_tmz_set(adev);
+
return 0;
}
@@ -1147,7 +1190,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
return;
if (state == VGA_SWITCHEROO_ON) {
- pr_info("amdgpu: switched on\n");
+ pr_info("switched on\n");
/* don't suspend or resume card normally */
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -1161,7 +1204,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero
dev->switch_power_state = DRM_SWITCH_POWER_ON;
drm_kms_helper_poll_enable(dev);
} else {
- pr_info("amdgpu: switched off\n");
+ pr_info("switched off\n");
drm_kms_helper_poll_disable(dev);
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
amdgpu_device_suspend(dev, true);
@@ -1524,9 +1567,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
chip_name = "vega12";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -1574,8 +1617,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
+ if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
+ amdgpu_discovery_get_gfx_info(adev);
goto parse_soc_bounding_box;
+ }
adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
@@ -1721,19 +1766,31 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EINVAL;
}
- r = amdgpu_device_parse_gpu_info_fw(adev);
- if (r)
- return r;
-
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
- amdgpu_discovery_get_gfx_info(adev);
-
amdgpu_amdkfd_device_probe(adev);
if (amdgpu_sriov_vf(adev)) {
+ /* handle vbios stuff prior full access mode for new handshake */
+ if (adev->virt.req_init_data_ver == 1) {
+ if (!amdgpu_get_bios(adev)) {
+ DRM_ERROR("failed to get vbios\n");
+ return -EINVAL;
+ }
+
+ r = amdgpu_atombios_init(adev);
+ if (r) {
+ dev_err(adev->dev, "amdgpu_atombios_init failed\n");
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
+ return r;
+ }
+ }
+ }
+
+ /* we need to send REQ_GPU here for legacy handshaker otherwise the vbios
+ * will not be prepared by host for this VF */
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver < 1) {
r = amdgpu_virt_request_full_gpu(adev, true);
if (r)
- return -EAGAIN;
+ return r;
}
adev->pm.pp_feature = amdgpu_pp_feature_mask;
@@ -1763,6 +1820,14 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
}
/* get the vbios after the asic_funcs are set up */
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
+ r = amdgpu_device_parse_gpu_info_fw(adev);
+ if (r)
+ return r;
+
+ /* skip vbios handling for new handshake */
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver == 1)
+ continue;
+
/* Read BIOS */
if (!amdgpu_get_bios(adev))
return -EINVAL;
@@ -1889,6 +1954,12 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
if (r)
return r;
+ if (amdgpu_sriov_vf(adev) && adev->virt.req_init_data_ver > 0) {
+ r = amdgpu_virt_request_full_gpu(adev, true);
+ if (r)
+ return -EAGAIN;
+ }
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1975,6 +2046,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_xgmi_add_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_fru_get_product_info(adev);
+
init_failed:
if (amdgpu_sriov_vf(adev))
amdgpu_virt_release_full_gpu(adev, true);
@@ -2008,8 +2081,24 @@ static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
*/
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
{
- return !!memcmp(adev->gart.ptr, adev->reset_magic,
- AMDGPU_RESET_MAGIC_NUM);
+ if (memcmp(adev->gart.ptr, adev->reset_magic,
+ AMDGPU_RESET_MAGIC_NUM))
+ return true;
+
+ if (!adev->in_gpu_reset)
+ return false;
+
+ /*
+ * For all ASICs with baco/mode1 reset, the VRAM is
+ * always assumed to be lost.
+ */
+ switch (amdgpu_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_BACO:
+ case AMD_RESET_METHOD_MODE1:
+ return true;
+ default:
+ return false;
+ }
}
/**
@@ -2155,6 +2244,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = true;
}
+ amdgpu_ras_set_error_query_ready(adev, true);
+
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
@@ -2187,7 +2278,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
if (gpu_instance->adev->flags & AMD_IS_APU)
continue;
- r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0);
+ r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
+ AMDGPU_XGMI_PSTATE_MIN);
if (r) {
DRM_ERROR("pstate setting failed (%d).\n", r);
break;
@@ -2340,6 +2432,8 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@@ -2767,12 +2861,12 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
* By default timeout for non compute jobs is 10000.
* And there is no timeout enforced on compute jobs.
* In SR-IOV or passthrough mode, timeout for compute
- * jobs are 10000 by default.
+ * jobs are 60000 by default.
*/
adev->gfx_timeout = msecs_to_jiffies(10000);
adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
- adev->compute_timeout = adev->gfx_timeout;
+ adev->compute_timeout = msecs_to_jiffies(60000);
else
adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;
@@ -2823,6 +2917,14 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
return ret;
}
+static const struct attribute *amdgpu_dev_attributes[] = {
+ &dev_attr_product_name.attr,
+ &dev_attr_product_number.attr,
+ &dev_attr_serial_number.attr,
+ &dev_attr_pcie_replay_count.attr,
+ NULL
+};
+
/**
* amdgpu_device_init - initialize the driver
*
@@ -2924,9 +3026,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
- INIT_LIST_HEAD(&adev->ring_lru_list);
- spin_lock_init(&adev->ring_lru_list_lock);
-
INIT_DELAYED_WORK(&adev->delayed_init_work,
amdgpu_device_delayed_init_work_handler);
INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
@@ -2935,7 +3034,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);
adev->gfx.gfx_off_req_count = 1;
- adev->pm.ac_power = power_supply_is_system_supplied() > 0 ? true : false;
+ adev->pm.ac_power = power_supply_is_system_supplied() > 0;
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -2984,18 +3083,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
adev->enable_mes = true;
- if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) {
- r = amdgpu_discovery_init(adev);
- if (r) {
- dev_err(adev->dev, "amdgpu_discovery_init failed\n");
- return r;
- }
- }
-
- /* early init functions */
- r = amdgpu_device_ip_early_init(adev);
- if (r)
- return r;
+ /* detect hw virtualization here */
+ amdgpu_detect_virtualization(adev);
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
@@ -3003,6 +3092,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r;
}
+ /* early init functions */
+ r = amdgpu_device_ip_early_init(adev);
+ if (r)
+ return r;
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3109,14 +3203,13 @@ fence_driver_init:
goto failed;
}
- DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
+ dev_info(adev->dev,
+ "SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
adev->gfx.config.max_shader_engines,
adev->gfx.config.max_sh_per_se,
adev->gfx.config.max_cu_per_sh,
adev->gfx.cu_info.number);
- amdgpu_ctx_init_sched(adev);
-
adev->accel_working = true;
amdgpu_vm_check_compute_bug(adev);
@@ -3181,9 +3274,9 @@ fence_driver_init:
queue_delayed_work(system_wq, &adev->delayed_init_work,
msecs_to_jiffies(AMDGPU_RESUME_MS));
- r = device_create_file(adev->dev, &dev_attr_pcie_replay_count);
+ r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (r) {
- dev_err(adev->dev, "Could not create pcie_replay_count");
+ dev_err(adev->dev, "Could not create amdgpu device attr\n");
return r;
}
@@ -3266,9 +3359,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
+
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
@@ -3354,15 +3448,12 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
-
- amdgpu_amdkfd_suspend(adev, !fbcon);
-
amdgpu_ras_suspend(adev);
r = amdgpu_device_ip_suspend_phase1(adev);
+ amdgpu_amdkfd_suspend(adev, !fbcon);
+
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
@@ -3739,6 +3830,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3833,6 +3926,8 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
int i, r = 0;
bool need_full_reset = *need_full_reset_arg;
+ amdgpu_debugfs_wait_dump(adev);
+
/* block all schedulers and reset given job's ring */
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
struct amdgpu_ring *ring = adev->rings[i];
@@ -4037,6 +4132,64 @@ static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
mutex_unlock(&adev->lock_reset);
}
+static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
+{
+ struct pci_dev *p = NULL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (p) {
+ pm_runtime_enable(&(p->dev));
+ pm_runtime_resume(&(p->dev));
+ }
+}
+
+static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
+{
+ enum amd_reset_method reset_method;
+ struct pci_dev *p = NULL;
+ u64 expires;
+
+ /*
+ * For now, only BACO and mode1 reset are confirmed
+ * to suffer the audio issue without proper suspended.
+ */
+ reset_method = amdgpu_asic_reset_method(adev);
+ if ((reset_method != AMD_RESET_METHOD_BACO) &&
+ (reset_method != AMD_RESET_METHOD_MODE1))
+ return -EINVAL;
+
+ p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
+ adev->pdev->bus->number, 1);
+ if (!p)
+ return -ENODEV;
+
+ expires = pm_runtime_autosuspend_expiration(&(p->dev));
+ if (!expires)
+ /*
+ * If we cannot get the audio device autosuspend delay,
+ * a fixed 4S interval will be used. Considering 3S is
+ * the audio controller default autosuspend delay setting.
+ * 4S used here is guaranteed to cover that.
+ */
+ expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
+
+ while (!pm_runtime_status_suspended(&(p->dev))) {
+ if (!pm_runtime_suspend(&(p->dev)))
+ break;
+
+ if (expires < ktime_get_mono_fast_ns()) {
+ dev_warn(adev->dev, "failed to suspend display audio\n");
+ /* TODO: abort the succeeding gpu reset? */
+ return -ETIMEDOUT;
+ }
+ }
+
+ pm_runtime_disable(&(p->dev));
+
+ return 0;
+}
+
/**
* amdgpu_device_gpu_recover - reset the asic and recover scheduler
*
@@ -4052,7 +4205,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
struct amdgpu_job *job)
{
struct list_head device_list, *device_list_handle = NULL;
- bool need_full_reset, job_signaled;
+ bool need_full_reset = false;
+ bool job_signaled = false;
struct amdgpu_hive_info *hive = NULL;
struct amdgpu_device *tmp_adev = NULL;
int i, r = 0;
@@ -4060,6 +4214,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
bool use_baco =
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ?
true : false;
+ bool audio_suspended = false;
/*
* Flush RAM to disk so that after reboot
@@ -4073,16 +4228,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
emergency_restart();
}
- need_full_reset = job_signaled = false;
- INIT_LIST_HEAD(&device_list);
-
dev_info(adev->dev, "GPU %s begin!\n",
(in_ras_intr && !use_baco) ? "jobs stop":"reset");
- cancel_delayed_work_sync(&adev->delayed_init_work);
-
- hive = amdgpu_get_xgmi_hive(adev, false);
-
/*
* Here we trylock to avoid chain of resets executing from
* either trigger by jobs on different adevs in XGMI hive or jobs on
@@ -4090,39 +4238,25 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* We always reset all schedulers for device and all devices for XGMI
* hive so that should take care of them too.
*/
-
+ hive = amdgpu_get_xgmi_hive(adev, true);
if (hive && !mutex_trylock(&hive->reset_lock)) {
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
job ? job->base.id : -1, hive->hive_id);
+ mutex_unlock(&hive->hive_lock);
return 0;
}
- /* Start with adev pre asic reset first for soft reset check.*/
- if (!amdgpu_device_lock_adev(adev, !hive)) {
- DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
- job ? job->base.id : -1);
- return 0;
- }
-
- /* Block kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_pre_reset(adev);
-
- /* Build list of devices to reset */
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- if (!hive) {
- /*unlock kfd: SRIOV would do it separately */
- if (!amdgpu_sriov_vf(adev))
- amdgpu_amdkfd_post_reset(adev);
- amdgpu_device_unlock_adev(adev);
+ /*
+ * Build list of devices to reset.
+ * In case we are in XGMI hive mode, resort the device list
+ * to put adev in the 1st position.
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (!hive)
return -ENODEV;
- }
-
- /*
- * In case we are in XGMI hive mode device reset is done for all the
- * nodes in the hive to retrain all XGMI links and hence the reset
- * sequence is executed in loop on all nodes.
- */
+ if (!list_is_first(&adev->gmc.xgmi.head, &hive->device_list))
+ list_rotate_to_front(&adev->gmc.xgmi.head, &hive->device_list);
device_list_handle = &hive->device_list;
} else {
list_add_tail(&adev->gmc.xgmi.head, &device_list);
@@ -4131,19 +4265,40 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
/* block all schedulers and reset given job's ring */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
- if (tmp_adev != adev) {
- amdgpu_device_lock_adev(tmp_adev, false);
- if (!amdgpu_sriov_vf(tmp_adev))
- amdgpu_amdkfd_pre_reset(tmp_adev);
+ if (!amdgpu_device_lock_adev(tmp_adev, !hive)) {
+ DRM_INFO("Bailing on TDR for s_job:%llx, as another already in progress",
+ job ? job->base.id : -1);
+ mutex_unlock(&hive->hive_lock);
+ return 0;
}
/*
+ * Try to put the audio codec into suspend state
+ * before gpu reset started.
+ *
+ * Due to the power domain of the graphics device
+ * is shared with AZ power domain. Without this,
+ * we may change the audio hardware from behind
+ * the audio driver's back. That will trigger
+ * some audio codec errors.
+ */
+ if (!amdgpu_device_suspend_display_audio(tmp_adev))
+ audio_suspended = true;
+
+ amdgpu_ras_set_error_query_ready(tmp_adev, false);
+
+ cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
+
+ if (!amdgpu_sriov_vf(tmp_adev))
+ amdgpu_amdkfd_pre_reset(tmp_adev);
+
+ /*
* Mark these ASICs to be reseted as untracked first
* And add them back after reset completed
*/
amdgpu_unregister_gpu_instance(tmp_adev);
- amdgpu_fbdev_set_suspend(adev, 1);
+ amdgpu_fbdev_set_suspend(tmp_adev, 1);
/* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) &&
@@ -4163,7 +4318,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
}
}
-
if (in_ras_intr && !use_baco)
goto skip_sched_resume;
@@ -4174,30 +4328,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
* job->base holds a reference to parent fence
*/
if (job && job->base.s_fence->parent &&
- dma_fence_is_signaled(job->base.s_fence->parent))
+ dma_fence_is_signaled(job->base.s_fence->parent)) {
job_signaled = true;
-
- if (job_signaled) {
dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
goto skip_hw_reset;
}
-
- /* Guilty job will be freed after this*/
- r = amdgpu_device_pre_asic_reset(adev, job, &need_full_reset);
- if (r) {
- /*TODO Should we stop ?*/
- DRM_ERROR("GPU pre asic reset failed with err, %d for drm dev, %s ",
- r, adev->ddev->unique);
- adev->asic_reset_res = r;
- }
-
retry: /* Rest of adevs pre asic reset from XGMI hive. */
list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) {
-
- if (tmp_adev == adev)
- continue;
-
r = amdgpu_device_pre_asic_reset(tmp_adev,
NULL,
&need_full_reset);
@@ -4259,11 +4397,15 @@ skip_sched_resume:
/*unlock kfd: SRIOV would do it separately */
if (!(in_ras_intr && !use_baco) && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
+ if (audio_suspended)
+ amdgpu_device_resume_display_audio(tmp_adev);
amdgpu_device_unlock_adev(tmp_adev);
}
- if (hive)
+ if (hive) {
mutex_unlock(&hive->reset_lock);
+ mutex_unlock(&hive->hive_lock);
+ }
if (r)
dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 057f6ea645d7..61a26c15c8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,9 +52,6 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
- uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev,
- uint32_t df_inst);
- uint32_t (*get_df_inst_id)(struct amdgpu_device *adev);
};
struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 27d8ae19a7a4..b5d6274952a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -23,9 +23,7 @@
#include "amdgpu.h"
#include "amdgpu_discovery.h"
-#include "soc15_common.h"
#include "soc15_hw_ip.h"
-#include "nbio/nbio_2_3_offset.h"
#include "discovery.h"
#define mmRCC_CONFIG_MEMSIZE 0xde3
@@ -135,9 +133,10 @@ static int hw_id_map[MAX_HWIP] = {
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
{
uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
- uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ uint64_t pos = vram_size - adev->discovery_tmr_size;
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false);
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
+ adev->discovery_tmr_size, false);
return 0;
}
@@ -158,7 +157,7 @@ static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size
return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
}
-int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
struct binary_header *bhdr;
@@ -169,17 +168,18 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
uint16_t checksum;
int r;
- adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
- if (!adev->discovery)
+ adev->discovery_tmr_size = DISCOVERY_TMR_SIZE;
+ adev->discovery_bin = kzalloc(adev->discovery_tmr_size, GFP_KERNEL);
+ if (!adev->discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->discovery);
+ r = amdgpu_discovery_read_binary(adev, adev->discovery_bin);
if (r) {
DRM_ERROR("failed to read ip discovery binary\n");
goto out;
}
- bhdr = (struct binary_header *)adev->discovery;
+ bhdr = (struct binary_header *)adev->discovery_bin;
if (le32_to_cpu(bhdr->binary_signature) != BINARY_SIGNATURE) {
DRM_ERROR("invalid ip discovery binary signature\n");
@@ -192,7 +192,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
size = bhdr->binary_size - offset;
checksum = bhdr->binary_checksum;
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
size, checksum)) {
DRM_ERROR("invalid ip discovery binary checksum\n");
r = -EINVAL;
@@ -202,7 +202,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[IP_DISCOVERY];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ihdr = (struct ip_discovery_header *)(adev->discovery + offset);
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin + offset);
if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
DRM_ERROR("invalid ip discovery data table signature\n");
@@ -210,7 +210,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ihdr->size, checksum)) {
DRM_ERROR("invalid ip discovery data table checksum\n");
r = -EINVAL;
@@ -220,9 +220,9 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
info = &bhdr->table_list[GC];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
- ghdr = (struct gpu_info_header *)(adev->discovery + offset);
+ ghdr = (struct gpu_info_header *)(adev->discovery_bin + offset);
- if (!amdgpu_discovery_verify_checksum(adev->discovery + offset,
+ if (!amdgpu_discovery_verify_checksum(adev->discovery_bin + offset,
ghdr->size, checksum)) {
DRM_ERROR("invalid gc data table checksum\n");
r = -EINVAL;
@@ -232,16 +232,16 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
return 0;
out:
- kfree(adev->discovery);
- adev->discovery = NULL;
+ kfree(adev->discovery_bin);
+ adev->discovery_bin = NULL;
return r;
}
void amdgpu_discovery_fini(struct amdgpu_device *adev)
{
- kfree(adev->discovery);
- adev->discovery = NULL;
+ kfree(adev->discovery_bin);
+ adev->discovery_bin = NULL;
}
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
@@ -257,14 +257,16 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
uint8_t num_base_address;
int hw_ip;
int i, j, k;
+ int r;
- if (!adev->discovery) {
- DRM_ERROR("ip discovery uninitialized\n");
- return -EINVAL;
+ r = amdgpu_discovery_init(adev);
+ if (r) {
+ DRM_ERROR("amdgpu_discovery_init failed\n");
+ return r;
}
- bhdr = (struct binary_header *)adev->discovery;
- ihdr = (struct ip_discovery_header *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
@@ -272,7 +274,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery + die_offset);
+ dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
@@ -286,7 +288,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
le16_to_cpu(dhdr->die_id), num_ips);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery + ip_offset);
+ ip = (struct ip *)(adev->discovery_bin + ip_offset);
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -335,24 +337,24 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
uint16_t num_ips;
int i, j;
- if (!adev->discovery) {
+ if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery;
- ihdr = (struct ip_discovery_header *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ ihdr = (struct ip_discovery_header *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
num_dies = le16_to_cpu(ihdr->num_dies);
for (i = 0; i < num_dies; i++) {
die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
- dhdr = (struct die_header *)(adev->discovery + die_offset);
+ dhdr = (struct die_header *)(adev->discovery_bin + die_offset);
num_ips = le16_to_cpu(dhdr->num_ips);
ip_offset = die_offset + sizeof(*dhdr);
for (j = 0; j < num_ips; j++) {
- ip = (struct ip *)(adev->discovery + ip_offset);
+ ip = (struct ip *)(adev->discovery_bin + ip_offset);
if (le16_to_cpu(ip->hw_id) == hw_id) {
if (major)
@@ -375,13 +377,13 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct gc_info_v1_0 *gc_info;
- if (!adev->discovery) {
+ if (!adev->discovery_bin) {
DRM_ERROR("ip discovery uninitialized\n");
return -EINVAL;
}
- bhdr = (struct binary_header *)adev->discovery;
- gc_info = (struct gc_info_v1_0 *)(adev->discovery +
+ bhdr = (struct binary_header *)adev->discovery_bin;
+ gc_info = (struct gc_info_v1_0 *)(adev->discovery_bin +
le16_to_cpu(bhdr->table_list[GC].offset));
adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->gc_num_se);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index ba78e15d9b05..d50d597c45ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -26,7 +26,6 @@
#define DISCOVERY_TMR_SIZE (64 << 10)
-int amdgpu_discovery_init(struct amdgpu_device *adev);
void amdgpu_discovery_fini(struct amdgpu_device *adev);
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 84cee27cd7ef..c56438a6c9f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -523,7 +523,8 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
break;
case CHIP_RAVEN:
/* enable S/G on PCO and RV2 */
- if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO))
domain |= AMDGPU_GEM_DOMAIN_GTT;
break;
default:
@@ -575,14 +576,14 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}
ret = amdgpu_display_framebuffer_init(dev, amdgpu_fb, mode_cmd, obj);
if (ret) {
kfree(amdgpu_fb);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index ffeb20f11c07..43d8ed7dbd00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -38,6 +38,7 @@
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
#include <linux/dma-fence-array.h>
+#include <linux/pci-p2pdma.h>
/**
* amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation
@@ -179,6 +180,9 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
+ if (pci_p2pdma_distance_many(adev->pdev, &attach->dev, 1, true) < 0)
+ attach->peer2peer = false;
+
if (attach->dev->driver == adev->dev->driver)
return 0;
@@ -272,14 +276,21 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct dma_buf *dma_buf = attach->dmabuf;
struct drm_gem_object *obj = dma_buf->priv;
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct sg_table *sgt;
long r;
if (!bo->pin_count) {
- /* move buffer into GTT */
+ /* move buffer into GTT or VRAM */
struct ttm_operation_ctx ctx = { false, false };
+ unsigned domains = AMDGPU_GEM_DOMAIN_GTT;
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM &&
+ attach->peer2peer) {
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ domains |= AMDGPU_GEM_DOMAIN_VRAM;
+ }
+ amdgpu_bo_placement_from_domain(bo, domains);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return ERR_PTR(r);
@@ -289,20 +300,34 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
return ERR_PTR(-EBUSY);
}
- sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
- if (IS_ERR(sgt))
- return sgt;
-
- if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
- DMA_ATTR_SKIP_CPU_SYNC))
- goto error_free;
+ switch (bo->tbo.mem.mem_type) {
+ case TTM_PL_TT:
+ sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ bo->tbo.num_pages);
+ if (IS_ERR(sgt))
+ return sgt;
+
+ if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto error_free;
+ break;
+
+ case TTM_PL_VRAM:
+ r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, attach->dev,
+ dir, &sgt);
+ if (r)
+ return ERR_PTR(r);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
return sgt;
error_free:
sg_free_table(sgt);
kfree(sgt);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EBUSY);
}
/**
@@ -318,9 +343,18 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
- sg_free_table(sgt);
- kfree(sgt);
+ struct dma_buf *dma_buf = attach->dmabuf;
+ struct drm_gem_object *obj = dma_buf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ if (sgt->sgl->page_link) {
+ dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ sg_free_table(sgt);
+ kfree(sgt);
+ } else {
+ amdgpu_vram_mgr_free_sgt(adev, attach->dev, dir, sgt);
+ }
}
/**
@@ -514,6 +548,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
}
static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .allow_peer2peer = true,
.move_notify = amdgpu_dma_buf_move_notify
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index ba1bb95a3cf9..d2a105e3bf7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -856,7 +856,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
@@ -1188,3 +1188,13 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
+{
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ return smu_allow_xgmi_power_down(smu, en);
+
+ return 0;
+} \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 936d85aa0fbc..6a8aae70a0e6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -450,6 +450,7 @@ struct amdgpu_pm {
/* Used for I2C access to various EEPROMs on relevant ASICs */
struct i2c_adapter smu_i2c;
+ struct list_head pm_attr_list;
};
#define R600_SSTU_DFLT 0
@@ -538,4 +539,6 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
uint32_t cstate);
+int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8ea86ffdea0d..126e74758a34 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -85,9 +85,11 @@
* - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
* - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
* - 3.36.0 - Allow reading more status registers on si/cik
+ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 36
+#define KMS_DRIVER_MINOR 38
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -138,12 +140,14 @@ int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
/* FBC (bit 0) disabled by default*/
uint amdgpu_dc_feature_mask = 0;
+uint amdgpu_dc_debug_mask = 0;
int amdgpu_async_gfx_ring = 1;
int amdgpu_mcbp = 0;
int amdgpu_discovery = -1;
int amdgpu_mes = 0;
int amdgpu_noretry;
int amdgpu_force_asic_type = -1;
+int amdgpu_tmz = 0;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
@@ -687,13 +691,12 @@ MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (defau
/**
* DOC: hws_gws_support(bool)
- * Whether HWS support gws barriers. Default value: false (not supported)
- * This will be replaced with a MEC firmware version check once firmware
- * is ready
+ * Assume that HWS supports GWS barriers regardless of what firmware version
+ * check says. Default value: false (rely on MEC2 firmware version check).
*/
bool hws_gws_support;
module_param(hws_gws_support, bool, 0444);
-MODULE_PARM_DESC(hws_gws_support, "MEC FW support gws barriers (false = not supported (Default), true = supported)");
+MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
/**
* DOC: queue_preemption_timeout_ms (int)
@@ -713,6 +716,13 @@ MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
/**
+ * DOC: dcdebugmask (uint)
+ * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ */
+MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
+module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
+
+/**
* DOC: abmlevel (uint)
* Override the default ABM (Adaptive Backlight Management) level used for DC
* enabled hardware. Requires DMCU to be supported and loaded.
@@ -728,6 +738,16 @@ uint amdgpu_dm_abm_level = 0;
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);
+/**
+ * DOC: tmz (int)
+ * Trusted Memory Zone (TMZ) is a method to protect data being written
+ * to or read from memory.
+ *
+ * The default value: 0 (off). TODO: change to auto till it is completed.
+ */
+MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto, 0 = off (default), 1 = on)");
+module_param_named(tmz, amdgpu_tmz, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1163,14 +1183,6 @@ static int amdgpu_pmops_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- /* GPU comes up enabled by the bios on resume */
- if (amdgpu_device_supports_boco(drm_dev) ||
- amdgpu_device_supports_baco(drm_dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
return amdgpu_device_resume(drm_dev, true);
}
@@ -1180,7 +1192,9 @@ static int amdgpu_pmops_freeze(struct device *dev)
struct amdgpu_device *adev = drm_dev->dev_private;
int r;
+ adev->in_hibernate = true;
r = amdgpu_device_suspend(drm_dev, true);
+ adev->in_hibernate = false;
if (r)
return r;
return amdgpu_asic_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 9ae7b61f696a..db731f573f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -114,7 +114,7 @@ static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
amdgpu_bo_unpin(abo);
amdgpu_bo_unreserve(abo);
}
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
}
static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
u32 cpp;
u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED |
- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ AMDGPU_GEM_CREATE_VRAM_CLEARED;
info = drm_get_format_info(adev->ddev, mode_cmd);
cpp = info->cpp[0];
@@ -279,7 +278,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 7531527067df..d878fe7fee51 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -192,14 +192,22 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
* Used For polling fence.
* Returns 0 on success, -ENOMEM on failure.
*/
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s)
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout)
{
uint32_t seq;
+ signed long r;
if (!s)
return -EINVAL;
seq = ++ring->fence_drv.sync_seq;
+ r = amdgpu_fence_wait_polling(ring,
+ seq - ring->fence_drv.num_fences_mask,
+ timeout);
+ if (r < 1)
+ return -ETIMEDOUT;
+
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
new file mode 100644
index 000000000000..815c072ac4da
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/pci.h>
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "smu_v11_0_i2c.h"
+#include "atom.h"
+
+#define I2C_PRODUCT_INFO_ADDR 0xAC
+#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2
+#define I2C_PRODUCT_INFO_OFFSET 0xC0
+
+bool is_fru_eeprom_supported(struct amdgpu_device *adev)
+{
+ /* TODO: Gaming SKUs don't have the FRU EEPROM.
+ * Use this hack to address hangs on modprobe on gaming SKUs
+ * until a proper solution can be implemented by only supporting
+ * the explicit chip IDs for VG20 Server cards
+ *
+ * TODO: Add list of supported Arcturus DIDs once confirmed
+ */
+ if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
+ (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
+ (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
+ return true;
+ return false;
+}
+
+int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
+ unsigned char *buff)
+{
+ int ret, size;
+ struct i2c_msg msg = {
+ .addr = I2C_PRODUCT_INFO_ADDR,
+ .flags = I2C_M_RD,
+ .buf = buff,
+ };
+ buff[0] = 0;
+ buff[1] = addrptr;
+ msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+ if (ret < 1) {
+ DRM_WARN("FRU: Failed to get size field");
+ return ret;
+ }
+
+ /* The size returned by the i2c requires subtraction of 0xC0 since the
+ * size apparently always reports as 0xC0+actual size.
+ */
+ size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
+ /* Add 1 since address field was 1 byte */
+ buff[1] = addrptr + 1;
+
+ msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+
+ if (ret < 1) {
+ DRM_WARN("FRU: Failed to get data field");
+ return ret;
+ }
+
+ return size;
+}
+
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
+{
+ unsigned char buff[34];
+ int addrptr = 0, size = 0;
+
+ if (!is_fru_eeprom_supported(adev))
+ return 0;
+
+ /* If algo exists, it means that the i2c_adapter's initialized */
+ if (!adev->pm.smu_i2c.algo) {
+ DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
+ return 0;
+ }
+
+ /* There's a lot of repetition here. This is due to the FRU having
+ * variable-length fields. To get the information, we have to find the
+ * size of each field, and then keep reading along and reading along
+ * until we get all of the data that we want. We use addrptr to track
+ * the address as we go
+ */
+
+ /* The first fields are all of size 1-byte, from 0-7 are offsets that
+ * contain information that isn't useful to us.
+ * Bytes 8-a are all 1-byte and refer to the size of the entire struct,
+ * and the language field, so just start from 0xb, manufacturer size
+ */
+ addrptr = 0xb;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
+ return size;
+ }
+
+ /* Increment the addrptr by the size of the field, and 1 due to the
+ * size field being 1 byte. This pattern continues below.
+ */
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product name, ret:%d", size);
+ return size;
+ }
+
+ /* Product name should only be 32 characters. Any more,
+ * and something could be wrong. Cap it at 32 to be safe
+ */
+ if (size > 32) {
+ DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
+ size = 32;
+ }
+ /* Start at 2 due to buff using fields 0 and 1 for the address */
+ memcpy(adev->product_name, &buff[2], size);
+ adev->product_name[size] = '\0';
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product number, ret:%d", size);
+ return size;
+ }
+
+ /* Product number should only be 16 characters. Any more,
+ * and something could be wrong. Cap it at 16 to be safe
+ */
+ if (size > 16) {
+ DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
+ size = 16;
+ }
+ memcpy(adev->product_number, &buff[2], size);
+ adev->product_number[size] = '\0';
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU product version, ret:%d", size);
+ return size;
+ }
+
+ addrptr += size + 1;
+ size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
+
+ if (size < 1) {
+ DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
+ return size;
+ }
+
+ /* Serial number should only be 16 characters. Any more,
+ * and something could be wrong. Cap it at 16 to be safe
+ */
+ if (size > 16) {
+ DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
+ size = 16;
+ }
+ memcpy(adev->serial, &buff[2], size);
+ adev->serial[size] = '\0';
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
index f78cbae9db88..968115c97e33 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2020 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -19,19 +19,11 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: AMD
- *
*/
-#ifndef __DC_COMMON_DEFS_H__
-#define __DC_COMMON_DEFS_H__
-
-#include "dm_services.h"
-#include "dc_features.h"
-#include "display_mode_structs.h"
-#include "display_mode_enums.h"
-
+#ifndef __AMDGPU_PRODINFO_H__
+#define __AMDGPU_PRODINFO_H__
-double dml_round(double a);
+int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
-#endif /* __DC_COMMON_DEFS_H__ */
+#endif // __AMDGPU_PRODINFO_H__
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 4277125a79ee..de9784b0c19b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pci.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_debugfs.h>
@@ -105,7 +106,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev)
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
@@ -161,16 +162,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_bo_list_entry vm_pd;
struct list_head list, duplicates;
+ struct dma_fence *fence = NULL;
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct amdgpu_bo_va *bo_va;
- int r;
+ long r;
INIT_LIST_HEAD(&list);
INIT_LIST_HEAD(&duplicates);
tv.bo = &bo->tbo;
- tv.num_shared = 1;
+ tv.num_shared = 2;
list_add(&tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -178,28 +180,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
if (r) {
dev_err(adev->dev, "leaking bo va because "
- "we fail to reserve bo (%d)\n", r);
+ "we fail to reserve bo (%ld)\n", r);
return;
}
bo_va = amdgpu_vm_bo_find(vm, bo);
- if (bo_va && --bo_va->ref_count == 0) {
- amdgpu_vm_bo_rmv(adev, bo_va);
-
- if (amdgpu_vm_ready(vm)) {
- struct dma_fence *fence = NULL;
+ if (!bo_va || --bo_va->ref_count)
+ goto out_unlock;
- r = amdgpu_vm_clear_freed(adev, vm, &fence);
- if (unlikely(r)) {
- dev_err(adev->dev, "failed to clear page "
- "tables on GEM object close (%d)\n", r);
- }
+ amdgpu_vm_bo_rmv(adev, bo_va);
+ if (!amdgpu_vm_ready(vm))
+ goto out_unlock;
- if (fence) {
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
- }
- }
+ fence = dma_resv_get_excl(bo->tbo.base.resv);
+ if (fence) {
+ amdgpu_bo_fence(bo, fence, true);
+ fence = NULL;
}
+
+ r = amdgpu_vm_clear_freed(adev, vm, &fence);
+ if (r || !fence)
+ goto out_unlock;
+
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+out_unlock:
+ if (unlikely(r < 0))
+ dev_err(adev->dev, "failed to clear page "
+ "tables on GEM object close (%ld)\n", r);
ttm_eu_backoff_reservation(&ticket, &list);
}
@@ -226,7 +234,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
AMDGPU_GEM_CREATE_VRAM_CLEARED |
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID |
- AMDGPU_GEM_CREATE_EXPLICIT_SYNC))
+ AMDGPU_GEM_CREATE_EXPLICIT_SYNC |
+ AMDGPU_GEM_CREATE_ENCRYPTED))
return -EINVAL;
@@ -234,6 +243,11 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
if (args->in.domains & ~AMDGPU_GEM_DOMAIN_MASK)
return -EINVAL;
+ if (!amdgpu_is_tmz(adev) && (flags & AMDGPU_GEM_CREATE_ENCRYPTED)) {
+ DRM_NOTE_ONCE("Cannot allocate secure buffer since TMZ is disabled\n");
+ return -EINVAL;
+ }
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@@ -271,7 +285,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (r)
return r;
@@ -355,7 +369,7 @@ user_pages_done:
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
release_object:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -374,11 +388,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return -EPERM;
}
*offset_p = amdgpu_bo_mmap_offset(robj);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -448,7 +462,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
} else
r = ret;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -491,7 +505,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
unreserve:
amdgpu_bo_unreserve(robj);
out:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -690,7 +704,7 @@ error_backoff:
ttm_eu_backoff_reservation(&ticket, &list);
error_unref:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -766,7 +780,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
}
out:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -803,7 +817,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (r) {
return r;
}
@@ -854,7 +868,8 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data)
attachment = READ_ONCE(bo->tbo.base.import_attach);
if (attachment)
- seq_printf(m, " imported from %p", dma_buf);
+ seq_printf(m, " imported from %p%s", dma_buf,
+ attachment->peer2peer ? " P2P" : "");
else if (dma_buf)
seq_printf(m, " exported as %p", dma_buf);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 6b9c9193cdfa..d612033a23ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -48,7 +48,7 @@ int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
return bit;
}
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue)
{
*queue = bit % adev->gfx.mec.num_queue_per_pipe;
@@ -274,7 +274,7 @@ static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
continue;
- amdgpu_gfx_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
/*
* 1. Using pipes 2/3 from MEC 2 seems cause problems.
@@ -304,10 +304,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
spin_lock_init(&kiq->ring_lock);
- r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs);
- if (r)
- return r;
-
ring->adev = NULL;
ring->ring_obj = NULL;
ring->use_doorbell = true;
@@ -318,9 +314,11 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
return r;
ring->eop_gpu_addr = kiq->eop_gpu_addr;
+ ring->no_scheduler = true;
sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
r = amdgpu_ring_init(adev, ring, 1024,
- irq, AMDGPU_CP_KIQ_IRQ_DRIVER0);
+ irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
@@ -329,7 +327,6 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
{
- amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs);
amdgpu_ring_fini(ring);
}
@@ -488,6 +485,19 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
return amdgpu_ring_test_helper(kiq_ring);
}
+int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
+ int queue_bit)
+{
+ int mec, pipe, queue;
+ int set_resource_bit = 0;
+
+ amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
+
+ set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
+
+ return set_resource_bit;
+}
+
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
{
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
@@ -510,7 +520,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
break;
}
- queue_mask |= (1ull << i);
+ queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
}
DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
@@ -670,16 +680,23 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
signed long r, cnt = 0;
unsigned long flags;
- uint32_t seq;
+ uint32_t seq, reg_val_offs = 0, value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -705,9 +722,18 @@ uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
- return adev->wb.wb[kiq->reg_val_offs];
+ mb();
+ value = adev->wb.wb[reg_val_offs];
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read reg:%x\n", reg);
return ~0;
}
@@ -725,7 +751,10 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
spin_lock_irqsave(&kiq->ring_lock, flags);
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -754,6 +783,9 @@ void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
return;
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_write:
pr_err("failed to write reg:%x\n", reg);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index 5825692d07e4..d43c11671a38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -103,7 +103,6 @@ struct amdgpu_kiq {
struct amdgpu_ring ring;
struct amdgpu_irq_src irq;
const struct kiq_pm4_funcs *pmf;
- uint32_t reg_val_offs;
};
/*
@@ -286,13 +285,8 @@ struct amdgpu_gfx {
bool me_fw_write_wait;
bool cp_fw_write_wait;
struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
- struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS];
- uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
- struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
- struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -370,7 +364,7 @@ void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);
int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
int pipe, int queue);
-void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
+void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 5884ab590486..acabb57aa8af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -136,8 +136,8 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
/**
* amdgpu_gmc_vram_location - try to find VRAM location
*
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
* @base: base address at which to put VRAM
*
* Function will try to place VRAM at base address provided
@@ -165,8 +165,8 @@ void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
/**
* amdgpu_gmc_gart_location - try to find GART location
*
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
*
* Function will place try to place GART before or after VRAM.
*
@@ -207,8 +207,8 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
/**
* amdgpu_gmc_agp_location - try to find AGP location
- * @adev: amdgpu device structure holding all necessary informations
- * @mc: memory controller structure holding memory informations
+ * @adev: amdgpu device structure holding all necessary information
+ * @mc: memory controller structure holding memory information
*
* Function will place try to find a place for the AGP BAR in the MC address
* space.
@@ -373,3 +373,38 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
+
+/**
+ * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * @adev: amdgpu_device pointer
+ *
+ * Check and set if an the device @adev supports Trusted Memory
+ * Zones (TMZ).
+ */
+void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ /* Don't enable it by default yet.
+ */
+ if (amdgpu_tmz < 1) {
+ adev->gmc.tmz_enabled = false;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature disabled as experimental (default)\n");
+ } else {
+ adev->gmc.tmz_enabled = true;
+ dev_info(adev->dev,
+ "Trusted Memory Zone (TMZ) feature enabled as experimental (cmd line)\n");
+ }
+ break;
+ default:
+ adev->gmc.tmz_enabled = false;
+ dev_warn(adev->dev,
+ "Trusted Memory Zone (TMZ) feature not supported\n");
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 7546da0cc70c..2bd9423c1dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -213,6 +213,8 @@ struct amdgpu_gmc {
} fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+ bool tmz_enabled;
+
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
@@ -276,4 +278,6 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev);
void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
+extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index ccbd7acfc4cb..b91853fd66d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -61,12 +61,13 @@
* Returns 0 on success, error on failure.
*/
int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- unsigned size, struct amdgpu_ib *ib)
+ unsigned size, enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_ib *ib)
{
int r;
if (size) {
- r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
+ r = amdgpu_sa_bo_new(&adev->ib_pools[pool_type],
&ib->sa_bo, size, 256);
if (r) {
dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -131,6 +132,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
uint64_t fence_ctx;
uint32_t status = 0, alloc_size;
unsigned fence_flags = 0;
+ bool secure;
unsigned i;
int r = 0;
@@ -159,6 +161,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}
+ if ((ib->flags & AMDGPU_IB_FLAGS_SECURE) &&
+ (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)) {
+ dev_err(adev->dev, "secure submissions not supported on compute rings\n");
+ return -EINVAL;
+ }
+
alloc_size = ring->funcs->emit_frame_size + num_ibs *
ring->funcs->emit_ib_size;
@@ -181,6 +189,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
dma_fence_put(tmp);
}
+ if ((ib->flags & AMDGPU_IB_FLAG_EMIT_MEM_SYNC) && ring->funcs->emit_mem_sync)
+ ring->funcs->emit_mem_sync(ring);
+
if (ring->funcs->insert_start)
ring->funcs->insert_start(ring);
@@ -215,6 +226,14 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
amdgpu_ring_emit_cntxcntl(ring, status);
}
+ /* Setup initial TMZiness and send it off.
+ */
+ secure = false;
+ if (job && ring->funcs->emit_frame_cntl) {
+ secure = ib->flags & AMDGPU_IB_FLAGS_SECURE;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
@@ -226,12 +245,20 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
!amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
continue;
+ if (job && ring->funcs->emit_frame_cntl) {
+ if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
+ secure = !secure;
+ amdgpu_ring_emit_frame_cntl(ring, true, secure);
+ }
+ }
+
amdgpu_ring_emit_ib(ring, job, ib, status);
status &= ~AMDGPU_HAVE_CTX_SWITCH;
}
- if (ring->funcs->emit_tmz)
- amdgpu_ring_emit_tmz(ring, false);
+ if (job && ring->funcs->emit_frame_cntl)
+ amdgpu_ring_emit_frame_cntl(ring, false, secure);
#ifdef CONFIG_X86_64
if (!(adev->flags & AMD_IS_APU))
@@ -280,22 +307,32 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- int r;
+ unsigned size;
+ int r, i;
- if (adev->ib_pool_ready) {
+ if (adev->ib_pool_ready)
return 0;
- }
- r = amdgpu_sa_bo_manager_init(adev, &adev->ring_tmp_bo,
- AMDGPU_IB_POOL_SIZE*64*1024,
- AMDGPU_GPU_PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT);
- if (r) {
- return r;
- }
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
+ if (i == AMDGPU_IB_POOL_DIRECT)
+ size = PAGE_SIZE * 2;
+ else
+ size = AMDGPU_IB_POOL_SIZE;
+
+ r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
+ size, AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT);
+ if (r)
+ goto error;
+ }
adev->ib_pool_ready = true;
return 0;
+
+error:
+ while (i--)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ return r;
}
/**
@@ -308,10 +345,14 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
*/
void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
{
- if (adev->ib_pool_ready) {
- amdgpu_sa_bo_manager_fini(adev, &adev->ring_tmp_bo);
- adev->ib_pool_ready = false;
- }
+ int i;
+
+ if (!adev->ib_pool_ready)
+ return;
+
+ for (i = 0; i < AMDGPU_IB_POOL_MAX; i++)
+ amdgpu_sa_bo_manager_fini(adev, &adev->ib_pools[i]);
+ adev->ib_pool_ready = false;
}
/**
@@ -326,9 +367,9 @@ void amdgpu_ib_pool_fini(struct amdgpu_device *adev)
*/
int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
{
- unsigned i;
- int r, ret = 0;
long tmo_gfx, tmo_mm;
+ int r, ret = 0;
+ unsigned i;
tmo_mm = tmo_gfx = AMDGPU_IB_TEST_TIMEOUT;
if (amdgpu_sriov_vf(adev)) {
@@ -406,10 +447,16 @@ static int amdgpu_debugfs_sa_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- amdgpu_sa_bo_dump_debug_info(&adev->ring_tmp_bo, m);
+ seq_printf(m, "--------------------- DELAYED --------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DELAYED],
+ m);
+ seq_printf(m, "-------------------- IMMEDIATE -------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_IMMEDIATE],
+ m);
+ seq_printf(m, "--------------------- DIRECT ---------------------- \n");
+ amdgpu_sa_bo_dump_debug_info(&adev->ib_pools[AMDGPU_IB_POOL_DIRECT], m);
return 0;
-
}
static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
index 3a67f6c046d4..fe92dcd94d4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
!dma_fence_is_later(updates, (*id)->flushed_updates))
updates = NULL;
- if ((*id)->owner != vm->direct.fence_context ||
+ if ((*id)->owner != vm->immediate.fence_context ||
job->vm_pd_addr != (*id)->pd_gpu_addr ||
updates || !(*id)->last_flush ||
((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
struct dma_fence *flushed;
/* Check all the prerequisites to using this VMID */
- if ((*id)->owner != vm->direct.fence_context)
+ if ((*id)->owner != vm->immediate.fence_context)
continue;
if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -448,7 +448,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
}
id->pd_gpu_addr = job->vm_pd_addr;
- id->owner = vm->direct.fence_context;
+ id->owner = vm->immediate.fence_context;
if (job->vm_needs_flush) {
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 5ed4227f304b..0cc4c67f95f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -260,7 +260,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags);
if (nvec > 0) {
adev->irq.msi_enabled = true;
- dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n");
+ dev_dbg(adev->dev, "using MSI/MSI-X.\n");
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 4981e443a884..47207188c569 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -33,6 +33,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
struct amdgpu_task_info ti;
+ struct amdgpu_device *adev = ring->adev;
memset(&ti, 0, sizeof(struct amdgpu_task_info));
@@ -49,10 +50,13 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
DRM_ERROR("Process information: process %s pid %d thread %s pid %d\n",
ti.process_name, ti.tgid, ti.task_name, ti.pid);
- if (amdgpu_device_should_recover_gpu(ring->adev))
+ if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
- else
+ } else {
drm_sched_suspend_timeout(&ring->sched);
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.tdr_debug = true;
+ }
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -87,7 +91,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
}
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job)
+ enum amdgpu_ib_pool_type pool_type,
+ struct amdgpu_job **job)
{
int r;
@@ -95,7 +100,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
if (r)
return r;
- r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
+ r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
if (r)
kfree(*job);
@@ -140,7 +145,6 @@ void amdgpu_job_free(struct amdgpu_job *job)
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
- enum drm_sched_priority priority;
int r;
if (!f)
@@ -152,7 +156,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 3f7b8433d179..81caac9b958a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -38,6 +38,7 @@
#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
struct amdgpu_fence;
+enum amdgpu_ib_pool_type;
struct amdgpu_job {
struct drm_sched_job base;
@@ -61,14 +62,12 @@ struct amdgpu_job {
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
-
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
- struct amdgpu_job **job);
-
+ enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
void amdgpu_job_free_resources(struct amdgpu_job *job);
void amdgpu_job_free(struct amdgpu_job *job);
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
index 5727f00afc8e..d31d65e6b039 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
@@ -144,7 +144,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
const unsigned ib_size_dw = 16;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
index bd9ef9cc86de..5131a0a1bc8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
@@ -43,8 +43,6 @@ struct amdgpu_jpeg {
uint8_t num_jpeg_inst;
struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES];
struct amdgpu_jpeg_reg internal;
- struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES];
- uint32_t num_jpeg_sched;
unsigned harvest_config;
struct delayed_work idle_work;
enum amd_powergating_state cur_state;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fd1dc3236eca..d7e17e34fee1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -183,18 +183,18 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
/* Call ACPI methods: require modeset init
* but failure is not fatal
*/
- if (!r) {
- acpi_status = amdgpu_acpi_init(adev);
- if (acpi_status)
- dev_dbg(&dev->pdev->dev,
- "Error during ACPI methods call\n");
- }
+
+ acpi_status = amdgpu_acpi_init(adev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
if (adev->runpm) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+ /* only need to skip on ATPX */
+ if (amdgpu_device_supports_boco(dev) &&
+ !amdgpu_is_atpx_hybrid())
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
- pm_runtime_set_active(dev->dev);
pm_runtime_allow(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
index 919bd566ba3c..edaac242ff85 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h
@@ -77,7 +77,6 @@ struct amdgpu_nbio_funcs {
u32 *flags);
void (*ih_control)(struct amdgpu_device *adev);
void (*init_registers)(struct amdgpu_device *adev);
- void (*detect_hw_virt)(struct amdgpu_device *adev);
void (*remap_hdp_registers)(struct amdgpu_device *adev);
void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev);
void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index c687f5415b3f..3d822eba9a5d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -753,7 +753,7 @@ int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow, struct dma_fence **fence)
return amdgpu_copy_buffer(ring, shadow_addr, parent_addr,
amdgpu_bo_size(shadow), NULL, fence,
- true, false);
+ true, false, false);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 5e39ecd8cc28..7d41f7b9a340 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -229,6 +229,17 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+/**
+ * amdgpu_bo_encrypted - test if the BO is encrypted
+ * @bo: pointer to a buffer object
+ *
+ * Return true if the buffer object is encrypted, false otherwise.
+ */
+static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
+{
+ return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
+}
+
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index abe94a55ecad..16596a9ccabe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -154,17 +154,17 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
*
*/
-static ssize_t amdgpu_get_dpm_state(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -189,18 +189,18 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
}
-static ssize_t amdgpu_set_dpm_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (strncmp("battery", buf, strlen("battery")) == 0)
state = POWER_STATE_TYPE_BATTERY;
@@ -294,17 +294,17 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
*
*/
-static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
enum amd_dpm_forced_level level = 0xff;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -332,10 +332,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
"unknown");
}
-static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
@@ -343,8 +343,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amd_dpm_forced_level current_level = 0xff;
int ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -383,6 +383,15 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
return count;
}
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+ if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, false);
+ else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, true);
+ }
+ }
+
/* profile_exit setting is valid only when current mode is in profile mode */
if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
@@ -436,6 +445,9 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct pp_states_info data;
int i, buf_len, ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
return ret;
@@ -444,8 +456,11 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
ret = smu_get_power_num_states(&adev->smu, &data);
if (ret)
return ret;
- } else if (adev->powerplay.pp_funcs->get_pp_num_states)
+ } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
amdgpu_dpm_get_pp_num_states(adev, &data);
+ } else {
+ memset(&data, 0, sizeof(data));
+ }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -472,8 +487,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -511,8 +526,8 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (adev->pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
@@ -531,8 +546,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
unsigned long idx;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
@@ -589,8 +604,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size, ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -631,8 +646,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int ret = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -681,7 +696,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* default power levels, write "r" (reset) to the file to reset them.
*
*
- * < For Vega20 >
+ * < For Vega20 and newer ASICs >
*
* Reading the file will display:
*
@@ -736,8 +751,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
const char delimiter[3] = {' ', '\n', '\0'};
uint32_t type;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (count > 127)
return -EINVAL;
@@ -828,8 +843,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -870,18 +885,18 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
* the corresponding bit from original ppfeature masks and input the
* new ppfeature masks.
*/
-static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
+static ssize_t amdgpu_set_pp_features(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
uint64_t featuremask;
int ret;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = kstrtou64(buf, 0, &featuremask);
if (ret)
@@ -914,17 +929,17 @@ static ssize_t amdgpu_set_pp_feature_status(struct device *dev,
return count;
}
-static ssize_t amdgpu_get_pp_feature_status(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_pp_features(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -982,8 +997,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1048,8 +1063,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1082,8 +1097,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1112,8 +1127,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
uint32_t mask = 0;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1146,8 +1161,8 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1176,8 +1191,8 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1212,8 +1227,8 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1242,8 +1257,8 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1278,8 +1293,8 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1308,8 +1323,8 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1344,8 +1359,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1374,8 +1389,8 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
int ret;
uint32_t mask = 0;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = amdgpu_read_mask(buf, count, &mask);
if (ret)
@@ -1410,8 +1425,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1438,8 +1453,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_sriov_vf(adev))
- return -EINVAL;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1479,8 +1494,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
uint32_t value = 0;
int ret;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1507,8 +1522,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
int ret;
long int value;
- if (amdgpu_sriov_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = kstrtol(buf, 0, &value);
@@ -1568,8 +1583,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
ssize_t size;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1606,15 +1621,15 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
long int profile_mode = 0;
const char delimiter[3] = {' ', '\n', '\0'};
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
tmp[0] = *(buf);
tmp[1] = '\0';
ret = kstrtol(tmp, 0, &profile_mode);
if (ret)
return -EINVAL;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return -EINVAL;
-
if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
if (count < 2 || count > 127)
return -EINVAL;
@@ -1653,23 +1668,23 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
/**
- * DOC: busy_percent
+ * DOC: gpu_busy_percent
*
* The amdgpu driver provides a sysfs API for reading how busy the GPU
* is as a percentage. The file gpu_busy_percent is used for this.
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
-static ssize_t amdgpu_get_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
@@ -1696,16 +1711,16 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
* The SMU firmware computes a percentage of load based on the
* aggregate activity level in the IP cores.
*/
-static ssize_t amdgpu_get_memory_busy_percent(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
r = pm_runtime_get_sync(ddev->dev);
if (r < 0)
@@ -1742,11 +1757,17 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- uint64_t count0, count1;
+ uint64_t count0 = 0, count1 = 0;
int ret;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
+ if (adev->flags & AMD_IS_APU)
+ return -ENODATA;
+
+ if (!adev->asic_funcs->get_pcie_usage)
+ return -ENODATA;
ret = pm_runtime_get_sync(ddev->dev);
if (ret < 0)
@@ -1778,8 +1799,8 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
if (adev->unique_id)
return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
@@ -1787,57 +1808,185 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
return 0;
}
-static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
-static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
- amdgpu_get_dpm_forced_performance_level,
- amdgpu_set_dpm_forced_performance_level);
-static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
-static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
-static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_force_state,
- amdgpu_set_pp_force_state);
-static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_table,
- amdgpu_set_pp_table);
-static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_sclk,
- amdgpu_set_pp_dpm_sclk);
-static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_mclk,
- amdgpu_set_pp_dpm_mclk);
-static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_socclk,
- amdgpu_set_pp_dpm_socclk);
-static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_fclk,
- amdgpu_set_pp_dpm_fclk);
-static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_dcefclk,
- amdgpu_set_pp_dpm_dcefclk);
-static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_dpm_pcie,
- amdgpu_set_pp_dpm_pcie);
-static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_sclk_od,
- amdgpu_set_pp_sclk_od);
-static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_mclk_od,
- amdgpu_set_pp_mclk_od);
-static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_power_profile_mode,
- amdgpu_set_pp_power_profile_mode);
-static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_od_clk_voltage,
- amdgpu_set_pp_od_clk_voltage);
-static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
- amdgpu_get_busy_percent, NULL);
-static DEVICE_ATTR(mem_busy_percent, S_IRUGO,
- amdgpu_get_memory_busy_percent, NULL);
-static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
-static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR,
- amdgpu_get_pp_feature_status,
- amdgpu_set_pp_feature_status);
-static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL);
+static struct amdgpu_device_attr amdgpu_device_attrs[] = {
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
+};
+
+static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ const char *attr_name = dev_attr->attr.name;
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ enum amd_asic_type asic_type = adev->asic_type;
+
+ if (!(attr->flags & mask)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ return 0;
+ }
+
+#define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
+
+ if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
+ if (asic_type < CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+ if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ if (asic_type < CHIP_VEGA20)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
+ if (asic_type == CHIP_ARCTURUS)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
+ *states = ATTR_STATE_UNSUPPORTED;
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ *states = ATTR_STATE_SUPPORTED;
+ } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
+ if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pcie_bw)) {
+ /* PCIe Perf counters won't work on APU nodes */
+ if (adev->flags & AMD_IS_APU)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(unique_id)) {
+ if (!adev->unique_id)
+ *states = ATTR_STATE_UNSUPPORTED;
+ } else if (DEVICE_ATTR_IS(pp_features)) {
+ if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
+ *states = ATTR_STATE_UNSUPPORTED;
+ }
+
+ if (asic_type == CHIP_ARCTURUS) {
+ /* Arcturus does not support standalone mclk/socclk/fclk level setting */
+ if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
+ DEVICE_ATTR_IS(pp_dpm_socclk) ||
+ DEVICE_ATTR_IS(pp_dpm_fclk)) {
+ dev_attr->attr.mode &= ~S_IWUGO;
+ dev_attr->store = NULL;
+ }
+ }
+
+#undef DEVICE_ATTR_IS
+
+ return 0;
+}
+
+
+static int amdgpu_device_attr_create(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attr,
+ uint32_t mask, struct list_head *attr_list)
+{
+ int ret = 0;
+ struct device_attribute *dev_attr = &attr->dev_attr;
+ const char *name = dev_attr->attr.name;
+ enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
+ struct amdgpu_device_attr_entry *attr_entry;
+
+ int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
+
+ BUG_ON(!attr);
+
+ attr_update = attr->attr_update ? attr_update : default_attr_update;
+
+ ret = attr_update(adev, attr, mask, &attr_states);
+ if (ret) {
+ dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
+ name, ret);
+ return ret;
+ }
+
+ if (attr_states == ATTR_STATE_UNSUPPORTED)
+ return 0;
+
+ ret = device_create_file(adev->dev, dev_attr);
+ if (ret) {
+ dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
+ name, ret);
+ }
+
+ attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
+ if (!attr_entry)
+ return -ENOMEM;
+
+ attr_entry->attr = attr;
+ INIT_LIST_HEAD(&attr_entry->entry);
+
+ list_add_tail(&attr_entry->entry, attr_list);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
+{
+ struct device_attribute *dev_attr = &attr->dev_attr;
+
+ device_remove_file(adev->dev, dev_attr);
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+ struct list_head *attr_list);
+
+static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
+ struct amdgpu_device_attr *attrs,
+ uint32_t counts,
+ uint32_t mask,
+ struct list_head *attr_list)
+{
+ int ret = 0;
+ uint32_t i = 0;
+
+ for (i = 0; i < counts; i++) {
+ ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
+ if (ret)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ amdgpu_device_attr_remove_groups(adev, attr_list);
+
+ return ret;
+}
+
+static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
+ struct list_head *attr_list)
+{
+ struct amdgpu_device_attr_entry *entry, *entry_tmp;
+
+ if (list_empty(attr_list))
+ return ;
+
+ list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
+ amdgpu_device_attr_remove(adev, entry->attr);
+ list_del(&entry->entry);
+ kfree(entry);
+ }
+}
static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
@@ -1847,6 +1996,9 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
int channel = to_sensor_dev_attr(attr)->index;
int r, temp = 0, size = sizeof(temp);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (channel >= PP_TEMP_MAX)
return -EINVAL;
@@ -1978,6 +2130,9 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2009,6 +2164,9 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err, ret;
int value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2058,6 +2216,9 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2107,6 +2268,9 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2137,6 +2301,9 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
int err;
u32 speed = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2166,6 +2333,9 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2191,6 +2361,9 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2215,6 +2388,9 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
int err;
u32 rpm = 0;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2244,6 +2420,9 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = pm_runtime_get_sync(adev->ddev->dev);
if (err < 0)
return err;
@@ -2290,6 +2469,9 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
u32 pwm_mode = 0;
int ret;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
ret = pm_runtime_get_sync(adev->ddev->dev);
if (ret < 0)
return ret;
@@ -2322,6 +2504,9 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
int value;
u32 pwm_mode;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
err = kstrtoint(buf, 10, &value);
if (err)
return err;
@@ -2362,6 +2547,9 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
u32 vddgfx;
int r, size = sizeof(vddgfx);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2394,6 +2582,9 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
u32 vddnb;
int r, size = sizeof(vddnb);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
@@ -2431,6 +2622,9 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
int r, size = sizeof(u32);
unsigned uw;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2467,6 +2661,9 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2496,6 +2693,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
ssize_t size;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2526,6 +2726,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
int err;
u32 value;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -2564,6 +2767,9 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
uint32_t sclk;
int r, size = sizeof(sclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -2596,6 +2802,9 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
uint32_t mclk;
int r, size = sizeof(mclk);
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(adev->ddev->dev);
if (r < 0)
return r;
@@ -3238,8 +3447,8 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
int ret;
+ uint32_t mask = 0;
if (adev->pm.sysfs_initialized)
return 0;
@@ -3247,6 +3456,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.dpm_enabled == 0)
return 0;
+ INIT_LIST_HEAD(&adev->pm.pm_attr_list);
+
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev,
hwmon_groups);
@@ -3257,160 +3468,26 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
return ret;
}
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
- if (ret) {
- DRM_ERROR("failed to create device file for dpm state\n");
- return ret;
- }
-
-
- ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
- if (ret) {
- DRM_ERROR("failed to create device file pp_num_states\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_cur_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
- if (ret) {
- DRM_ERROR("failed to create device file pp_force_state\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_table);
- if (ret) {
- DRM_ERROR("failed to create device file pp_table\n");
- return ret;
- }
-
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_sclk\n");
- return ret;
- }
-
- /* Arcturus does not support standalone mclk/socclk/fclk level setting */
- if (adev->asic_type == CHIP_ARCTURUS) {
- dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_mclk.store = NULL;
-
- dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_socclk.store = NULL;
-
- dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO;
- dev_attr_pp_dpm_fclk.store = NULL;
+ switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
+ case SRIOV_VF_MODE_ONE_VF:
+ mask = ATTR_FLAG_ONEVF;
+ break;
+ case SRIOV_VF_MODE_MULTI_VF:
+ mask = 0;
+ break;
+ case SRIOV_VF_MODE_BARE_METAL:
+ default:
+ mask = ATTR_FLAG_MASK_ALL;
+ break;
}
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_mclk\n");
- return ret;
- }
- if (adev->asic_type >= CHIP_VEGA10) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_socclk\n");
- return ret;
- }
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
- return ret;
- }
- }
- }
- if (adev->asic_type >= CHIP_VEGA20) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_fclk\n");
- return ret;
- }
- }
- if (adev->asic_type != CHIP_ARCTURUS) {
- ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (ret) {
- DRM_ERROR("failed to create device file pp_dpm_pcie\n");
- return ret;
- }
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_sclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
- if (ret) {
- DRM_ERROR("failed to create device file pp_mclk_od\n");
- return ret;
- }
- ret = device_create_file(adev->dev,
- &dev_attr_pp_power_profile_mode);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_power_profile_mode\n");
- return ret;
- }
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
- ret = device_create_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_od_clk_voltage\n");
- return ret;
- }
- }
- ret = device_create_file(adev->dev,
- &dev_attr_gpu_busy_percent);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "gpu_busy_level\n");
- return ret;
- }
- /* APU does not have its own dedicated memory */
- if (!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VEGA10)) {
- ret = device_create_file(adev->dev,
- &dev_attr_mem_busy_percent);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "mem_busy_percent\n");
- return ret;
- }
- }
- /* PCIe Perf counters won't work on APU nodes */
- if (!(adev->flags & AMD_IS_APU)) {
- ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
- if (ret) {
- DRM_ERROR("failed to create device file pcie_bw\n");
- return ret;
- }
- }
- if (adev->unique_id)
- ret = device_create_file(adev->dev, &dev_attr_unique_id);
- if (ret) {
- DRM_ERROR("failed to create device file unique_id\n");
+ ret = amdgpu_device_attr_create_groups(adev,
+ amdgpu_device_attrs,
+ ARRAY_SIZE(amdgpu_device_attrs),
+ mask,
+ &adev->pm.pm_attr_list);
+ if (ret)
return ret;
- }
-
- if ((adev->asic_type >= CHIP_VEGA10) &&
- !(adev->flags & AMD_IS_APU)) {
- ret = device_create_file(adev->dev,
- &dev_attr_pp_features);
- if (ret) {
- DRM_ERROR("failed to create device file "
- "pp_features\n");
- return ret;
- }
- }
adev->pm.sysfs_initialized = true;
@@ -3419,51 +3496,13 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
{
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
if (adev->pm.dpm_enabled == 0)
return;
if (adev->pm.int_hwmon_dev)
hwmon_device_unregister(adev->pm.int_hwmon_dev);
- device_remove_file(adev->dev, &dev_attr_power_dpm_state);
- device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
-
- device_remove_file(adev->dev, &dev_attr_pp_num_states);
- device_remove_file(adev->dev, &dev_attr_pp_cur_state);
- device_remove_file(adev->dev, &dev_attr_pp_force_state);
- device_remove_file(adev->dev, &dev_attr_pp_table);
-
- device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
- device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
- if (adev->asic_type >= CHIP_VEGA10) {
- device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
- if (adev->asic_type != CHIP_ARCTURUS)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
- }
- if (adev->asic_type != CHIP_ARCTURUS)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
- if (adev->asic_type >= CHIP_VEGA20)
- device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
- device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
- device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
- device_remove_file(adev->dev,
- &dev_attr_pp_power_profile_mode);
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
- device_remove_file(adev->dev,
- &dev_attr_pp_od_clk_voltage);
- device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
- if (!(adev->flags & AMD_IS_APU) &&
- (adev->asic_type != CHIP_VEGA10))
- device_remove_file(adev->dev, &dev_attr_mem_busy_percent);
- if (!(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_pcie_bw);
- if (adev->unique_id)
- device_remove_file(adev->dev, &dev_attr_unique_id);
- if ((adev->asic_type >= CHIP_VEGA10) &&
- !(adev->flags & AMD_IS_APU))
- device_remove_file(adev->dev, &dev_attr_pp_features);
+
+ amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
}
void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
@@ -3626,6 +3665,9 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
u32 flags = 0;
int r;
+ if (adev->in_gpu_reset)
+ return -EPERM;
+
r = pm_runtime_get_sync(dev->dev);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 5db0ef86e84c..d9ae2b49a402 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -30,6 +30,55 @@ struct cg_flag_name
const char *name;
};
+enum amdgpu_device_attr_flags {
+ ATTR_FLAG_BASIC = (1 << 0),
+ ATTR_FLAG_ONEVF = (1 << 16),
+};
+
+#define ATTR_FLAG_TYPE_MASK (0x0000ffff)
+#define ATTR_FLAG_MODE_MASK (0xffff0000)
+#define ATTR_FLAG_MASK_ALL (0xffffffff)
+
+enum amdgpu_device_attr_states {
+ ATTR_STATE_UNSUPPORTED = 0,
+ ATTR_STATE_SUPPORTED,
+};
+
+struct amdgpu_device_attr {
+ struct device_attribute dev_attr;
+ enum amdgpu_device_attr_flags flags;
+ int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+ uint32_t mask, enum amdgpu_device_attr_states *states);
+
+};
+
+struct amdgpu_device_attr_entry {
+ struct list_head entry;
+ struct amdgpu_device_attr *attr;
+};
+
+#define to_amdgpu_device_attr(_dev_attr) \
+ container_of(_dev_attr, struct amdgpu_device_attr, dev_attr)
+
+#define __AMDGPU_DEVICE_ATTR(_name, _mode, _show, _store, _flags, ...) \
+ { .dev_attr = __ATTR(_name, _mode, _show, _store), \
+ .flags = _flags, \
+ ##__VA_ARGS__, }
+
+#define AMDGPU_DEVICE_ATTR(_name, _mode, _flags, ...) \
+ __AMDGPU_DEVICE_ATTR(_name, _mode, \
+ amdgpu_get_##_name, amdgpu_set_##_name, \
+ _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RW(_name, _flags, ...) \
+ AMDGPU_DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
+ _flags, ##__VA_ARGS__)
+
+#define AMDGPU_DEVICE_ATTR_RO(_name, _flags, ...) \
+ __AMDGPU_DEVICE_ATTR(_name, S_IRUGO, \
+ amdgpu_get_##_name, NULL, \
+ _flags, ##__VA_ARGS__)
+
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev);
int amdgpu_pm_virt_sysfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index deaa26808841..7301fdcfb8bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -37,11 +37,11 @@
#include "amdgpu_ras.h"
-static void psp_set_funcs(struct amdgpu_device *adev);
-
static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
+static int psp_load_smu_fw(struct psp_context *psp);
+
/*
* Due to DF Cstate management centralized to PMFW, the firmware
* loading sequence will be updated as below:
@@ -80,8 +80,6 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- psp_set_funcs(adev);
-
switch (adev->asic_type) {
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -201,6 +199,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int index;
int timeout = 2000;
bool ras_intr = false;
+ bool skip_unsupport = false;
mutex_lock(&psp->mutex);
@@ -232,6 +231,9 @@ psp_cmd_submit_buf(struct psp_context *psp,
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
}
+ /* We allow TEE_ERROR_NOT_SUPPORTED for VMR command in SRIOV */
+ skip_unsupport = (psp->cmd_buf_mem->resp.status == 0xffff000a) && amdgpu_sriov_vf(psp->adev);
+
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
* doesn't write 0 to that field.
@@ -239,7 +241,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
+ if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
@@ -268,7 +270,7 @@ static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
struct psp_gfx_cmd_resp *cmd,
uint64_t tmr_mc, uint32_t size)
{
- if (psp_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(psp->adev))
cmd->cmd_id = GFX_CMD_ID_SETUP_VMR;
else
cmd->cmd_id = GFX_CMD_ID_SETUP_TMR;
@@ -662,6 +664,121 @@ int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
+
+ /* Invoke xgmi ta to get hive id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ int ret;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
+
+ /* Invoke xgmi ta to get the node id */
+ ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
+ if (ret)
+ return ret;
+
+ *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
+
+ return 0;
+}
+
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
+ int i;
+ int ret;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ /* Fill in the shared memory with topology information as input */
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to get the topology information */
+ ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
+ if (ret)
+ return ret;
+
+ /* Read the output topology information from the shared memory */
+ topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
+ topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
+ for (i = 0; i < topology->num_nodes; i++) {
+ topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+ topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+ topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
+ topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+ }
+
+ return 0;
+}
+
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology)
+{
+ struct ta_xgmi_shared_memory *xgmi_cmd;
+ struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
+ int i;
+
+ if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
+ return -EINVAL;
+
+ xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
+ memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+
+ topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
+ xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
+ topology_info_input->num_nodes = number_devices;
+
+ for (i = 0; i < topology_info_input->num_nodes; i++) {
+ topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
+ topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
+ topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
+ }
+
+ /* Invoke xgmi ta to set topology information */
+ return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
+}
+
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
@@ -744,13 +861,40 @@ static int psp_ras_unload(struct psp_context *psp)
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+
/*
* TODO: bypass the loading in sriov for now
*/
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (ras_cmd->if_version > RAS_TA_HOST_IF_VER)
+ {
+ DRM_WARN("RAS: Unsupported Interface");
+ return -EINVAL;
+ }
+
+ if (!ret) {
+ if (ras_cmd->ras_out_message.flags.err_inject_switch_disable_flag) {
+ dev_warn(psp->adev->dev, "ECC switch disabled\n");
+
+ ras_cmd->ras_status = TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE;
+ }
+ else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
+ dev_warn(psp->adev->dev,
+ "RAS internal register access blocked\n");
+ }
+
+ return ret;
}
int psp_ras_enable_features(struct psp_context *psp,
@@ -834,6 +978,33 @@ static int psp_ras_initialize(struct psp_context *psp)
return 0;
}
+
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+ ras_cmd->ras_in_message.trigger_error = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
+ return ras_cmd->ras_status;
+}
// ras end
// HDCP start
@@ -884,6 +1055,7 @@ static int psp_hdcp_load(struct psp_context *psp)
if (!ret) {
psp->hdcp_context.hdcp_initialized = true;
psp->hdcp_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->hdcp_context.mutex);
}
kfree(cmd);
@@ -1029,6 +1201,7 @@ static int psp_dtm_load(struct psp_context *psp)
if (!ret) {
psp->dtm_context.dtm_initialized = true;
psp->dtm_context.session_id = cmd->resp.session_id;
+ mutex_init(&psp->dtm_context.mutex);
}
kfree(cmd);
@@ -1169,16 +1342,20 @@ static int psp_hw_start(struct psp_context *psp)
}
/*
- * For those ASICs with DF Cstate management centralized
+ * For ASICs with DF Cstate management centralized
* to PMFW, TMR setup should be performed after PMFW
* loaded and before other non-psp firmware loaded.
*/
- if (!psp->pmfw_centralized_cstate_management) {
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
return ret;
- }
+ }
+
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
}
return 0;
@@ -1355,7 +1532,7 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
}
static int psp_execute_np_fw_load(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode)
+ struct amdgpu_firmware_info *ucode)
{
int ret = 0;
@@ -1369,64 +1546,96 @@ static int psp_execute_np_fw_load(struct psp_context *psp,
return ret;
}
+static int psp_load_smu_fw(struct psp_context *psp)
+{
+ int ret;
+ struct amdgpu_device* adev = psp->adev;
+ struct amdgpu_firmware_info *ucode =
+ &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ struct amdgpu_ras *ras = psp->ras.ras;
+
+ if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+
+ if (adev->in_gpu_reset && ras && ras->supported) {
+ ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
+ if (ret) {
+ DRM_WARN("Failed to set MP1 state prepare for reload\n");
+ }
+ }
+
+ ret = psp_execute_np_fw_load(psp, ucode);
+
+ if (ret)
+ DRM_ERROR("PSP load smu failed!\n");
+
+ return ret;
+}
+
+static bool fw_load_skip_check(struct psp_context *psp,
+ struct amdgpu_firmware_info *ucode)
+{
+ if (!ucode->fw)
+ return true;
+
+ if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
+ return true;
+
+ if (amdgpu_sriov_vf(psp->adev) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
+ || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
+ /*skip ucode loading in SRIOV VF */
+ return true;
+
+ if (psp->autoload_supported &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
+ /* skip mec JT when autoload is enabled */
+ return true;
+
+ return false;
+}
+
static int psp_np_fw_load(struct psp_context *psp)
{
int i, ret;
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported ||
- psp->pmfw_centralized_cstate_management) {
- ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw || amdgpu_sriov_vf(adev))
- goto out;
-
- ret = psp_execute_np_fw_load(psp, ucode);
+ if (psp->autoload_supported &&
+ !psp->pmfw_centralized_cstate_management) {
+ ret = psp_load_smu_fw(psp);
if (ret)
return ret;
}
- if (psp->pmfw_centralized_cstate_management) {
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
- }
- }
-
-out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
- if (!ucode->fw)
- continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) ||
- psp->autoload_supported ||
- psp->pmfw_centralized_cstate_management))
- continue;
-
- if (amdgpu_sriov_vf(adev) &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_SDMA0
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA4
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6
- || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
- || ucode->ucode_id == AMDGPU_UCODE_ID_SMC))
- /*skip ucode loading in SRIOV VF */
+ !fw_load_skip_check(psp, ucode)) {
+ ret = psp_load_smu_fw(psp);
+ if (ret)
+ return ret;
continue;
+ }
- if (psp->autoload_supported &&
- (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT))
- /* skip mec JT when autoload is enabled */
+ if (fw_load_skip_check(psp, ucode))
continue;
psp_print_fw_hdr(psp, ucode);
@@ -1438,17 +1647,12 @@ out:
/* Start rlc autoload after psp recieved all the gfx firmware */
if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ?
AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) {
- ret = psp_rlc_autoload(psp);
+ ret = psp_rlc_autoload_start(psp);
if (ret) {
DRM_ERROR("Failed to start rlc autoload\n");
return ret;
}
}
-#if 0
- /* check if firmware loaded sucessfully */
- if (!amdgpu_psp_check_fw_loading_status(adev, i))
- return -EINVAL;
-#endif
}
return 0;
@@ -1806,19 +2010,110 @@ int psp_ring_cmd_submit(struct psp_context *psp,
return 0;
}
-static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
- enum AMDGPU_UCODE_ID ucode_type)
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name)
{
- struct amdgpu_firmware_info *ucode = NULL;
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[30];
+ const struct psp_firmware_header_v1_0 *asd_hdr;
+ int err = 0;
- if (!adev->firmware.fw_size)
- return false;
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for asd microcode\n");
+ return -EINVAL;
+ }
- ucode = &adev->firmware.ucode[ucode_type];
- if (!ucode->fw || !ucode->ucode_size)
- return false;
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
+ err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ if (err)
+ goto out;
+
+ asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
+ adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
+ adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+ le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+ return 0;
+out:
+ dev_err(adev->dev, "fail to initialize asd microcode\n");
+ release_firmware(adev->psp.asd_fw);
+ adev->psp.asd_fw = NULL;
+ return err;
+}
+
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name)
+{
+ struct amdgpu_device *adev = psp->adev;
+ char fw_name[30];
+ const struct psp_firmware_header_v1_0 *sos_hdr;
+ const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
+ const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
+ int err = 0;
+
+ if (!chip_name) {
+ dev_err(adev->dev, "invalid chip name for sos microcode\n");
+ return -EINVAL;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
+ err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ if (err)
+ goto out;
+
+ sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
+ amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
+
+ switch (sos_hdr->header.header_version_major) {
+ case 1:
+ adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
+ adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
+ adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
+ adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
+ adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
+ le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
+ adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr->sos_offset_bytes);
+ if (sos_hdr->header.header_version_minor == 1) {
+ sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
+ adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
+ adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
+ }
+ if (sos_hdr->header.header_version_minor == 2) {
+ sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
+ adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
+ adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
+ le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
+ }
+ break;
+ default:
+ dev_err(adev->dev,
+ "unsupported psp sos firmware\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ return 0;
+out:
+ dev_err(adev->dev,
+ "failed to init sos firmware\n");
+ release_firmware(adev->psp.sos_fw);
+ adev->psp.sos_fw = NULL;
- return psp_compare_sram_data(&adev->psp, ucode, ucode_type);
+ return err;
}
static int psp_set_clockgating_state(void *handle,
@@ -1957,16 +2252,6 @@ static void psp_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
}
-static const struct amdgpu_psp_funcs psp_funcs = {
- .check_fw_loading_status = psp_check_fw_loading_status,
-};
-
-static void psp_set_funcs(struct amdgpu_device *adev)
-{
- if (NULL == adev->firmware.funcs)
- adev->firmware.funcs = &psp_funcs;
-}
-
const struct amdgpu_ip_block_version psp_v3_1_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_PSP,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 297435c0c7c1..2a56ad996d83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -93,22 +93,8 @@ struct psp_funcs
enum psp_ring_type ring_type);
int (*ring_destroy)(struct psp_context *psp,
enum psp_ring_type ring_type);
- bool (*compare_sram_data)(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type);
bool (*smu_reload_quirk)(struct psp_context *psp);
int (*mode1_reset)(struct psp_context *psp);
- int (*xgmi_get_node_id)(struct psp_context *psp, uint64_t *node_id);
- int (*xgmi_get_hive_id)(struct psp_context *psp, uint64_t *hive_id);
- int (*xgmi_get_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
- int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
- struct psp_xgmi_topology_info *topology);
- bool (*support_vmr_ring)(struct psp_context *psp);
- int (*ras_trigger_error)(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info);
- int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
- int (*rlc_autoload_start)(struct psp_context *psp);
int (*mem_training_init)(struct psp_context *psp);
void (*mem_training_fini)(struct psp_context *psp);
int (*mem_training)(struct psp_context *psp, uint32_t ops);
@@ -161,6 +147,7 @@ struct psp_hdcp_context {
struct amdgpu_bo *hdcp_shared_bo;
uint64_t hdcp_shared_mc_addr;
void *hdcp_shared_buf;
+ struct mutex mutex;
};
struct psp_dtm_context {
@@ -169,6 +156,7 @@ struct psp_dtm_context {
struct amdgpu_bo *dtm_shared_bo;
uint64_t dtm_shared_mc_addr;
void *dtm_shared_buf;
+ struct mutex mutex;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
@@ -306,8 +294,6 @@ struct amdgpu_psp_funcs {
#define psp_ring_create(psp, type) (psp)->funcs->ring_create((psp), (type))
#define psp_ring_stop(psp, type) (psp)->funcs->ring_stop((psp), (type))
#define psp_ring_destroy(psp, type) ((psp)->funcs->ring_destroy((psp), (type)))
-#define psp_compare_sram_data(psp, ucode, type) \
- (psp)->funcs->compare_sram_data((psp), (ucode), (type))
#define psp_init_microcode(psp) \
((psp)->funcs->init_microcode ? (psp)->funcs->init_microcode((psp)) : 0)
#define psp_bootloader_load_kdb(psp) \
@@ -318,22 +304,8 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
((psp)->funcs->smu_reload_quirk ? (psp)->funcs->smu_reload_quirk((psp)) : false)
-#define psp_support_vmr_ring(psp) \
- ((psp)->funcs->support_vmr_ring ? (psp)->funcs->support_vmr_ring((psp)) : false)
#define psp_mode1_reset(psp) \
((psp)->funcs->mode1_reset ? (psp)->funcs->mode1_reset((psp)) : false)
-#define psp_xgmi_get_node_id(psp, node_id) \
- ((psp)->funcs->xgmi_get_node_id ? (psp)->funcs->xgmi_get_node_id((psp), (node_id)) : -EINVAL)
-#define psp_xgmi_get_hive_id(psp, hive_id) \
- ((psp)->funcs->xgmi_get_hive_id ? (psp)->funcs->xgmi_get_hive_id((psp), (hive_id)) : -EINVAL)
-#define psp_xgmi_get_topology_info(psp, num_device, topology) \
- ((psp)->funcs->xgmi_get_topology_info ? \
- (psp)->funcs->xgmi_get_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_xgmi_set_topology_info(psp, num_device, topology) \
- ((psp)->funcs->xgmi_set_topology_info ? \
- (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL)
-#define psp_rlc_autoload(psp) \
- ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0)
#define psp_mem_training_init(psp) \
((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0)
#define psp_mem_training_fini(psp) \
@@ -341,15 +313,6 @@ struct amdgpu_psp_funcs {
#define psp_mem_training(psp, ops) \
((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0)
-#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
-
-#define psp_ras_trigger_error(psp, info) \
- ((psp)->funcs->ras_trigger_error ? \
- (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
-#define psp_ras_cure_posion(psp, addr) \
- ((psp)->funcs->ras_cure_posion ? \
- (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
-
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
@@ -377,10 +340,21 @@ int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
int psp_xgmi_initialize(struct psp_context *psp);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
+int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
+int psp_xgmi_get_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
+int psp_xgmi_set_topology_info(struct psp_context *psp,
+ int number_devices,
+ struct psp_xgmi_topology_info *topology);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_enable_features(struct psp_context *psp,
union ta_ras_cmd_input *info, bool enable);
+int psp_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info);
+
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
@@ -393,4 +367,8 @@ int psp_ring_cmd_submit(struct psp_context *psp,
uint64_t cmd_buf_mc_addr,
uint64_t fence_mc_addr,
int index);
+int psp_init_asd_microcode(struct psp_context *psp,
+ const char *chip_name);
+int psp_init_sos_microcode(struct psp_context *psp,
+ const char *chip_name);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index ab379b44679c..50fe08bf2f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -80,6 +80,20 @@ atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0);
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ amdgpu_ras_get_context(adev)->error_query_ready = ready;
+}
+
+bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev)
+{
+ if (adev && amdgpu_ras_get_context(adev))
+ return amdgpu_ras_get_context(adev)->error_query_ready;
+
+ return false;
+}
+
static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
@@ -281,8 +295,9 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
struct ras_debug_if data;
int ret = 0;
- if (amdgpu_ras_intr_triggered()) {
- DRM_WARN("RAS WARN: error injection currently inaccessible\n");
+ if (!amdgpu_ras_get_error_query_ready(adev)) {
+ dev_warn(adev->dev, "RAS WARN: error injection "
+ "currently inaccessible\n");
return size;
}
@@ -310,7 +325,8 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *
/* umc ce/ue error injection for a bad page is not allowed */
if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) &&
amdgpu_ras_check_bad_page(adev, data.inject.address)) {
- DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n",
+ dev_warn(adev->dev, "RAS WARN: 0x%llx has been marked "
+ "as bad before error injection!\n",
data.inject.address);
break;
}
@@ -399,7 +415,7 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
.head = obj->head,
};
- if (amdgpu_ras_intr_triggered())
+ if (!amdgpu_ras_get_error_query_ready(obj->adev))
return snprintf(buf, PAGE_SIZE,
"Query currently inaccessible\n");
@@ -486,6 +502,29 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
}
/* obj end */
+void amdgpu_ras_parse_status_code(struct amdgpu_device* adev,
+ const char* invoke_type,
+ const char* block_name,
+ enum ta_ras_status ret)
+{
+ switch (ret) {
+ case TA_RAS_STATUS__SUCCESS:
+ return;
+ case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
+ dev_warn(adev->dev,
+ "RAS WARN: %s %s currently unavailable\n",
+ invoke_type,
+ block_name);
+ break;
+ default:
+ dev_err(adev->dev,
+ "RAS ERROR: %s %s error failed ret 0x%X\n",
+ invoke_type,
+ block_name,
+ ret);
+ }
+}
+
/* feature ctl begin */
static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
struct ras_common_if *head)
@@ -549,19 +588,23 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
struct ras_common_if *head, bool enable)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- union ta_ras_cmd_input info;
+ union ta_ras_cmd_input *info;
int ret;
if (!con)
return -EINVAL;
+ info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
if (!enable) {
- info.disable_features = (struct ta_ras_disable_features_input) {
+ info->disable_features = (struct ta_ras_disable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
} else {
- info.enable_features = (struct ta_ras_enable_features_input) {
+ info->enable_features = (struct ta_ras_enable_features_input) {
.block_id = amdgpu_ras_block_to_ta(head->block),
.error_type = amdgpu_ras_error_to_ta(head->type),
};
@@ -570,26 +613,33 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
/* Are we alerady in that state we are going to set? */
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
- return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
+ ret = 0;
+ goto out;
+ }
if (!amdgpu_ras_intr_triggered()) {
- ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
- DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
- enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ (enum ta_ras_status)ret);
if (ret == TA_RAS_STATUS__RESET_NEEDED)
- return -EAGAIN;
- return -EINVAL;
+ ret = -EAGAIN;
+ else
+ ret = -EINVAL;
+
+ goto out;
}
}
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
-
- return 0;
+ ret = 0;
+out:
+ kfree(info);
+ return ret;
}
/* Only used in device probe stage and called only once. */
@@ -618,7 +668,8 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (ret == -EINVAL) {
ret = __amdgpu_ras_feature_enable(adev, head, 1);
if (!ret)
- DRM_INFO("RAS INFO: %s setup object\n",
+ dev_info(adev->dev,
+ "RAS INFO: %s setup object\n",
ras_block_str(head->block));
}
} else {
@@ -744,17 +795,48 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
- dev_info(adev->dev, "%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld correctable hardware errors "
+ "detected in %s block, no user "
+ "action is needed.\n",
+ obj->err_data.ce_count,
+ ras_block_str(info->head.block));
}
if (err_data.ue_count) {
- dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, ras_block_str(info->head.block));
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in %s block\n",
+ obj->err_data.ue_count,
+ ras_block_str(info->head.block));
}
return 0;
}
+/* Trigger XGMI/WAFL error */
+int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev,
+ struct ta_ras_trigger_error_input *block_info)
+{
+ int ret;
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to disallow df cstate");
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, false))
+ dev_warn(adev->dev, "Failed to disallow XGMI power down");
+
+ ret = psp_ras_trigger_error(&adev->psp, block_info);
+
+ if (amdgpu_ras_intr_triggered())
+ return ret;
+
+ if (amdgpu_dpm_allow_xgmi_power_down(adev, true))
+ dev_warn(adev->dev, "Failed to allow XGMI power down");
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ dev_warn(adev->dev, "Failed to allow df cstate");
+
+ return ret;
+}
+
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -788,20 +870,22 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
case AMDGPU_RAS_BLOCK__UMC:
case AMDGPU_RAS_BLOCK__MMHUB:
- case AMDGPU_RAS_BLOCK__XGMI_WAFL:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ ret = amdgpu_ras_error_inject_xgmi(adev, &block_info);
+ break;
default:
- DRM_INFO("%s error injection is not supported yet\n",
+ dev_info(adev->dev, "%s error injection is not supported yet\n",
ras_block_str(info->head.block));
ret = -EINVAL;
}
- if (ret)
- DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
- ras_block_str(info->head.block),
- ret);
+ amdgpu_ras_parse_status_code(adev,
+ "inject",
+ ras_block_str(info->head.block),
+ (enum ta_ras_status)ret);
return ret;
}
@@ -1430,9 +1514,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
/* Build list of devices to query RAS related errors */
- if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1)
device_list_handle = &hive->device_list;
- } else {
+ else {
+ INIT_LIST_HEAD(&device_list);
list_add_tail(&adev->gmc.xgmi.head, &device_list);
device_list_handle = &device_list;
}
@@ -1535,7 +1620,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
&data->bps[control->num_recs],
true,
save_count)) {
- DRM_ERROR("Failed to save EEPROM table data!");
+ dev_err(adev->dev, "Failed to save EEPROM table data!");
return -EIO;
}
@@ -1563,7 +1648,7 @@ static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
if (amdgpu_ras_eeprom_process_recods(control, bps, false,
control->num_recs)) {
- DRM_ERROR("Failed to load EEPROM table records!");
+ dev_err(adev->dev, "Failed to load EEPROM table records!");
ret = -EIO;
goto out;
}
@@ -1637,7 +1722,8 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL))
- DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp);
+ dev_warn(adev->dev, "RAS WARN: reserve vram for "
+ "retired page %llx fail\n", bp);
data->bps_bo[i] = bo;
data->last_reserved = i + 1;
@@ -1725,7 +1811,7 @@ free:
kfree(*data);
con->eh_data = NULL;
out:
- DRM_WARN("Failed to initialize ras recovery!\n");
+ dev_warn(adev->dev, "Failed to initialize ras recovery!\n");
return ret;
}
@@ -1787,18 +1873,18 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
return;
if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
- DRM_INFO("HBM ECC is active.\n");
+ dev_info(adev->dev, "HBM ECC is active.\n");
*hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- DRM_INFO("HBM ECC is not presented.\n");
+ dev_info(adev->dev, "HBM ECC is not presented.\n");
if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
- DRM_INFO("SRAM ECC is active.\n");
+ dev_info(adev->dev, "SRAM ECC is active.\n");
*hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
1 << AMDGPU_RAS_BLOCK__DF);
} else
- DRM_INFO("SRAM ECC is not presented.\n");
+ dev_info(adev->dev, "SRAM ECC is not presented.\n");
/* hw_supported needs to be aligned with RAS block mask. */
*hw_supported &= AMDGPU_RAS_BLOCK_MASK;
@@ -1855,7 +1941,7 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
if (amdgpu_ras_fs_init(adev))
goto fs_out;
- DRM_INFO("RAS INFO: ras initialized successfully, "
+ dev_info(adev->dev, "RAS INFO: ras initialized successfully, "
"hardware ability[%x] ras_mask[%x]\n",
con->hw_supported, con->supported);
return 0;
@@ -2037,7 +2123,8 @@ void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev)
return;
if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) {
- DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n");
+ dev_info(adev->dev, "uncorrectable hardware error"
+ "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index 55c3eceb390d..e7df5d8429f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -334,6 +334,8 @@ struct amdgpu_ras {
uint32_t flags;
bool reboot;
struct amdgpu_ras_eeprom_control eeprom_control;
+
+ bool error_query_ready;
};
struct ras_fs_data {
@@ -629,4 +631,6 @@ static inline void amdgpu_ras_intr_cleared(void)
void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev);
+void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index a7e1d0425ed0..13ea8ebc421c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -162,11 +162,13 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
* Returns 0 on success, error on failure.
*/
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned max_dw, struct amdgpu_irq_src *irq_src,
- unsigned irq_type)
+ unsigned int max_dw, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int hw_prio)
{
int r, i;
int sched_hw_submission = amdgpu_sched_hw_submission;
+ u32 *num_sched;
+ u32 hw_ip;
/* Set the hw submission limit higher for KIQ because
* it's used for a number of gfx/compute tasks by both
@@ -258,6 +260,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->priority = DRM_SCHED_PRIORITY_NORMAL;
mutex_init(&ring->priority_mutex);
+ if (!ring->no_scheduler) {
+ hw_ip = ring->funcs->type;
+ num_sched = &adev->gpu_sched[hw_ip][hw_prio].num_scheds;
+ adev->gpu_sched[hw_ip][hw_prio].sched[(*num_sched)++] =
+ &ring->sched;
+ }
+
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 9a443013d70d..be218754629a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -30,11 +30,15 @@
/* max number of rings */
#define AMDGPU_MAX_RINGS 28
+#define AMDGPU_MAX_HWIP_RINGS 8
#define AMDGPU_MAX_GFX_RINGS 2
#define AMDGPU_MAX_COMPUTE_RINGS 8
#define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2
+#define AMDGPU_RING_PRIO_DEFAULT 1
+#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
+
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
#define AMDGPU_FENCE_OWNER_VM ((void *)1ul)
@@ -46,17 +50,30 @@
#define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
+#define AMDGPU_IB_POOL_SIZE (1024 * 1024)
+
enum amdgpu_ring_type {
- AMDGPU_RING_TYPE_GFX,
- AMDGPU_RING_TYPE_COMPUTE,
- AMDGPU_RING_TYPE_SDMA,
- AMDGPU_RING_TYPE_UVD,
- AMDGPU_RING_TYPE_VCE,
- AMDGPU_RING_TYPE_KIQ,
- AMDGPU_RING_TYPE_UVD_ENC,
- AMDGPU_RING_TYPE_VCN_DEC,
- AMDGPU_RING_TYPE_VCN_ENC,
- AMDGPU_RING_TYPE_VCN_JPEG
+ AMDGPU_RING_TYPE_GFX = AMDGPU_HW_IP_GFX,
+ AMDGPU_RING_TYPE_COMPUTE = AMDGPU_HW_IP_COMPUTE,
+ AMDGPU_RING_TYPE_SDMA = AMDGPU_HW_IP_DMA,
+ AMDGPU_RING_TYPE_UVD = AMDGPU_HW_IP_UVD,
+ AMDGPU_RING_TYPE_VCE = AMDGPU_HW_IP_VCE,
+ AMDGPU_RING_TYPE_UVD_ENC = AMDGPU_HW_IP_UVD_ENC,
+ AMDGPU_RING_TYPE_VCN_DEC = AMDGPU_HW_IP_VCN_DEC,
+ AMDGPU_RING_TYPE_VCN_ENC = AMDGPU_HW_IP_VCN_ENC,
+ AMDGPU_RING_TYPE_VCN_JPEG = AMDGPU_HW_IP_VCN_JPEG,
+ AMDGPU_RING_TYPE_KIQ
+};
+
+enum amdgpu_ib_pool_type {
+ /* Normal submissions to the top of the pipeline. */
+ AMDGPU_IB_POOL_DELAYED,
+ /* Immediate submissions to the bottom of the pipeline. */
+ AMDGPU_IB_POOL_IMMEDIATE,
+ /* Direct submission to the ring buffer during init and reset. */
+ AMDGPU_IB_POOL_DIRECT,
+
+ AMDGPU_IB_POOL_MAX
};
struct amdgpu_device;
@@ -65,6 +82,11 @@ struct amdgpu_ib;
struct amdgpu_cs_parser;
struct amdgpu_job;
+struct amdgpu_sched {
+ u32 num_scheds;
+ struct drm_gpu_scheduler *sched[AMDGPU_MAX_HWIP_RINGS];
+};
+
/*
* Fences.
*/
@@ -96,7 +118,8 @@ void amdgpu_fence_driver_suspend(struct amdgpu_device *adev);
void amdgpu_fence_driver_resume(struct amdgpu_device *adev);
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
unsigned flags);
-int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s);
+int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
+ uint32_t timeout);
bool amdgpu_fence_process(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
@@ -159,17 +182,20 @@ struct amdgpu_ring_funcs {
void (*end_use)(struct amdgpu_ring *ring);
void (*emit_switch_buffer) (struct amdgpu_ring *ring);
void (*emit_cntxcntl) (struct amdgpu_ring *ring, uint32_t flags);
- void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg);
+ void (*emit_rreg)(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs);
void (*emit_wreg)(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
void (*emit_reg_wait)(struct amdgpu_ring *ring, uint32_t reg,
uint32_t val, uint32_t mask);
void (*emit_reg_write_reg_wait)(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
- void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
+ void (*emit_frame_cntl)(struct amdgpu_ring *ring, bool start,
+ bool secure);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
+ void (*emit_mem_sync)(struct amdgpu_ring *ring);
};
struct amdgpu_ring {
@@ -214,12 +240,12 @@ struct amdgpu_ring {
unsigned vm_inv_eng;
struct dma_fence *vmid_wait;
bool has_compute_vm_bug;
+ bool no_scheduler;
atomic_t num_jobs[DRM_SCHED_PRIORITY_MAX];
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
- bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -241,11 +267,11 @@ struct amdgpu_ring {
#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
-#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
+#define amdgpu_ring_emit_rreg(r, d, o) (r)->funcs->emit_rreg((r), (d), (o))
#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
-#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
+#define amdgpu_ring_emit_frame_cntl(r, b, s) (r)->funcs->emit_frame_cntl((r), (b), (s))
#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
@@ -257,8 +283,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
- unsigned ring_size, struct amdgpu_irq_src *irq_src,
- unsigned irq_type);
+ unsigned int ring_size, struct amdgpu_irq_src *irq_src,
+ unsigned int irq_type, unsigned int prio);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
uint32_t reg0, uint32_t val0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 4b352206354b..e5b8fb8e75c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -61,8 +61,6 @@ struct amdgpu_sdma_ras_funcs {
struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
- struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES];
- uint32_t num_sdma_sched;
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
struct amdgpu_irq_src ecc_irq;
@@ -91,7 +89,8 @@ struct amdgpu_buffer_funcs {
/* dst addr in bytes */
uint64_t dst_offset,
/* number of byte to transfer */
- uint32_t byte_count);
+ uint32_t byte_count,
+ bool tmz);
/* maximum bytes in a single operation */
uint32_t fill_max_bytes;
@@ -109,7 +108,7 @@ struct amdgpu_buffer_funcs {
uint32_t byte_count);
};
-#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
+#define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
struct amdgpu_sdma_instance *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index b86392253696..b87ca171986a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -249,6 +249,11 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
+ /* Never sync to VM updates either. */
+ if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ continue;
+
/* Ignore fences depending on the sync mode */
switch (mode) {
case AMDGPU_SYNC_ALWAYS:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index b158230af8db..2f4d5ca9894f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -44,7 +44,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE*64*1024;
+ n = adev->gmc.gart_size - AMDGPU_IB_POOL_SIZE;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
if (adev->rings[i])
n -= adev->rings[i]->ring_size;
@@ -124,7 +124,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(gtt_obj[i]);
r = amdgpu_copy_buffer(ring, gart_addr, vram_addr,
- size, NULL, &fence, false, false);
+ size, NULL, &fence, false, false, false);
if (r) {
DRM_ERROR("Failed GTT->VRAM copy %d\n", i);
@@ -170,7 +170,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
amdgpu_bo_kunmap(vram_obj);
r = amdgpu_copy_buffer(ring, vram_addr, gart_addr,
- size, NULL, &fence, false, false);
+ size, NULL, &fence, false, false, false);
if (r) {
DRM_ERROR("Failed VRAM->GTT copy %d\n", i);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 63e734a125fb..5da20fc166d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -35,7 +35,7 @@
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
(unsigned long)__entry->value)
);
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
TP_ARGS(did, reg, value),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 6309ff72bd78..e59c01a83dac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -62,11 +62,6 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, unsigned num_pages,
- uint64_t offset, unsigned window,
- struct amdgpu_ring *ring,
- uint64_t *addr);
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
@@ -277,7 +272,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
*
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
- unsigned long *offset)
+ uint64_t *offset)
{
struct drm_mm_node *mm_node = mem->mm_node;
@@ -289,91 +284,191 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
}
/**
+ * amdgpu_ttm_map_buffer - Map memory into the GART windows
+ * @bo: buffer object to map
+ * @mem: memory object to map
+ * @mm_node: drm_mm node object to map
+ * @num_pages: number of pages to map
+ * @offset: offset into @mm_node where to start
+ * @window: which GART window to use
+ * @ring: DMA ring to use for the copy
+ * @tmz: if we should setup a TMZ enabled mapping
+ * @addr: resulting address inside the MC address space
+ *
+ * Setup one of the GART windows to access a specific piece of memory or return
+ * the physical address for local memory.
+ */
+static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ struct drm_mm_node *mm_node,
+ unsigned num_pages, uint64_t offset,
+ unsigned window, struct amdgpu_ring *ring,
+ bool tmz, uint64_t *addr)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_job *job;
+ unsigned num_dw, num_bytes;
+ struct dma_fence *fence;
+ uint64_t src_addr, dst_addr;
+ void *cpu_addr;
+ uint64_t flags;
+ unsigned int i;
+ int r;
+
+ BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
+ AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+
+ /* Map only what can't be accessed directly */
+ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
+ *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+ return 0;
+ }
+
+ *addr = adev->gmc.gart_start;
+ *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE;
+ *addr += offset & ~PAGE_MASK;
+
+ num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
+ num_bytes = num_pages * 8;
+
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
+ AMDGPU_IB_POOL_DELAYED, &job);
+ if (r)
+ return r;
+
+ src_addr = num_dw * 4;
+ src_addr += job->ibs[0].gpu_addr;
+
+ dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
+ dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
+ amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
+ dst_addr, num_bytes, false);
+
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+ WARN_ON(job->ibs[0].length_dw > num_dw);
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, mem);
+ if (tmz)
+ flags |= AMDGPU_PTE_TMZ;
+
+ cpu_addr = &job->ibs[0].ptr[num_dw];
+
+ if (mem->mem_type == TTM_PL_TT) {
+ struct ttm_dma_tt *dma;
+ dma_addr_t *dma_address;
+
+ dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+ dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ cpu_addr);
+ if (r)
+ goto error_free;
+ } else {
+ dma_addr_t dma_address;
+
+ dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+ dma_address += adev->vm_manager.vram_base_offset;
+
+ for (i = 0; i < num_pages; ++i) {
+ r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1,
+ &dma_address, flags, cpu_addr);
+ if (r)
+ goto error_free;
+
+ dma_address += PAGE_SIZE;
+ }
+ }
+
+ r = amdgpu_job_submit(job, &adev->mman.entity,
+ AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
+ if (r)
+ goto error_free;
+
+ dma_fence_put(fence);
+
+ return r;
+
+error_free:
+ amdgpu_job_free(job);
+ return r;
+}
+
+/**
* amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * @adev: amdgpu device
+ * @src: buffer/address where to read from
+ * @dst: buffer/address where to write to
+ * @size: number of bytes to copy
+ * @tmz: if a secure copy should be used
+ * @resv: resv object to sync to
+ * @f: Returns the last fence if multiple jobs are submitted.
*
* The function copies @size bytes from {src->mem + src->offset} to
* {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a
* move and different for a BO to BO copy.
*
- * @f: Returns the last fence if multiple jobs are submitted.
*/
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- struct amdgpu_copy_mem *src,
- struct amdgpu_copy_mem *dst,
- uint64_t size,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f)
{
+ const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
+ AMDGPU_GPU_PAGE_SIZE);
+
+ uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct drm_mm_node *src_mm, *dst_mm;
- uint64_t src_node_start, dst_node_start, src_node_size,
- dst_node_size, src_page_offset, dst_page_offset;
struct dma_fence *fence = NULL;
int r = 0;
- const uint64_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE);
if (!adev->mman.buffer_funcs_enabled) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
- src_mm = amdgpu_find_mm_node(src->mem, &src->offset);
- src_node_start = amdgpu_mm_node_addr(src->bo, src_mm, src->mem) +
- src->offset;
- src_node_size = (src_mm->size << PAGE_SHIFT) - src->offset;
- src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ src_offset = src->offset;
+ src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
+ src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst->offset);
- dst_node_start = amdgpu_mm_node_addr(dst->bo, dst_mm, dst->mem) +
- dst->offset;
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst->offset;
- dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+ dst_offset = dst->offset;
+ dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
+ dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
mutex_lock(&adev->mman.gtt_window_lock);
while (size) {
- unsigned long cur_size;
- uint64_t from = src_node_start, to = dst_node_start;
+ uint32_t src_page_offset = src_offset & ~PAGE_MASK;
+ uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
struct dma_fence *next;
+ uint32_t cur_size;
+ uint64_t from, to;
/* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst
* begins at an offset, then adjust the size accordingly
*/
- cur_size = min3(min(src_node_size, dst_node_size), size,
- GTT_MAX_BYTES);
- if (cur_size + src_page_offset > GTT_MAX_BYTES ||
- cur_size + dst_page_offset > GTT_MAX_BYTES)
- cur_size -= max(src_page_offset, dst_page_offset);
-
- /* Map only what needs to be accessed. Map src to window 0 and
- * dst to window 1
- */
- if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
- r = amdgpu_map_buffer(src->bo, src->mem,
- PFN_UP(cur_size + src_page_offset),
- src_node_start, 0, ring,
- &from);
- if (r)
- goto error;
- /* Adjust the offset because amdgpu_map_buffer returns
- * start of mapped page
- */
- from += src_page_offset;
- }
+ cur_size = max(src_page_offset, dst_page_offset);
+ cur_size = min(min3(src_node_size, dst_node_size, size),
+ (uint64_t)(GTT_MAX_BYTES - cur_size));
+
+ /* Map src to window 0 and dst to window 1. */
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+ PFN_UP(cur_size + src_page_offset),
+ src_offset, 0, ring, tmz, &from);
+ if (r)
+ goto error;
- if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
- r = amdgpu_map_buffer(dst->bo, dst->mem,
- PFN_UP(cur_size + dst_page_offset),
- dst_node_start, 1, ring,
- &to);
- if (r)
- goto error;
- to += dst_page_offset;
- }
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+ PFN_UP(cur_size + dst_page_offset),
+ dst_offset, 1, ring, tmz, &to);
+ if (r)
+ goto error;
r = amdgpu_copy_buffer(ring, from, to, cur_size,
- resv, &next, false, true);
+ resv, &next, false, true, tmz);
if (r)
goto error;
@@ -386,23 +481,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
src_node_size -= cur_size;
if (!src_node_size) {
- src_node_start = amdgpu_mm_node_addr(src->bo, ++src_mm,
- src->mem);
- src_node_size = (src_mm->size << PAGE_SHIFT);
- src_page_offset = 0;
+ ++src_mm;
+ src_node_size = src_mm->size << PAGE_SHIFT;
+ src_offset = 0;
} else {
- src_node_start += cur_size;
- src_page_offset = src_node_start & (PAGE_SIZE - 1);
+ src_offset += cur_size;
}
+
dst_node_size -= cur_size;
if (!dst_node_size) {
- dst_node_start = amdgpu_mm_node_addr(dst->bo, ++dst_mm,
- dst->mem);
- dst_node_size = (dst_mm->size << PAGE_SHIFT);
- dst_page_offset = 0;
+ ++dst_mm;
+ dst_node_size = dst_mm->size << PAGE_SHIFT;
+ dst_offset = 0;
} else {
- dst_node_start += cur_size;
- dst_page_offset = dst_node_start & (PAGE_SIZE - 1);
+ dst_offset += cur_size;
}
}
error:
@@ -425,6 +517,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_copy_mem src, dst;
struct dma_fence *fence = NULL;
int r;
@@ -438,14 +531,14 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst,
new_mem->num_pages << PAGE_SHIFT,
+ amdgpu_bo_encrypted(abo),
bo->base.resv, &fence);
if (r)
goto error;
/* clear the space being freed */
if (old_mem->mem_type == TTM_PL_VRAM &&
- (ttm_to_amdgpu_bo(bo)->flags &
- AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
+ (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) {
struct dma_fence *wipe_fence = NULL;
r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON,
@@ -742,8 +835,8 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
+ uint64_t offset = (page_offset << PAGE_SHIFT);
struct drm_mm_node *mm;
- unsigned long offset = (page_offset << PAGE_SHIFT);
mm = amdgpu_find_mm_node(&bo->mem, &offset);
return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start +
@@ -766,18 +859,6 @@ struct amdgpu_ttm_tt {
};
#ifdef CONFIG_DRM_AMDGPU_USERPTR
-/* flags used by HMM internal, not related to CPU/GPU PTE flags */
-static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = {
- (1 << 0), /* HMM_PFN_VALID */
- (1 << 1), /* HMM_PFN_WRITE */
-};
-
-static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = {
- 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */
- 0, /* HMM_PFN_NONE */
- 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */
-};
-
/**
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
* memory and start HMM tracking CPU page table update
@@ -816,23 +897,20 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
goto out;
}
range->notifier = &bo->notifier;
- range->flags = hmm_range_flags;
- range->values = hmm_range_values;
- range->pfn_shift = PAGE_SHIFT;
range->start = bo->notifier.interval_tree.start;
range->end = bo->notifier.interval_tree.last + 1;
- range->default_flags = hmm_range_flags[HMM_PFN_VALID];
+ range->default_flags = HMM_PFN_REQ_FAULT;
if (!amdgpu_ttm_tt_is_readonly(ttm))
- range->default_flags |= range->flags[HMM_PFN_WRITE];
+ range->default_flags |= HMM_PFN_REQ_WRITE;
- range->pfns = kvmalloc_array(ttm->num_pages, sizeof(*range->pfns),
- GFP_KERNEL);
- if (unlikely(!range->pfns)) {
+ range->hmm_pfns = kvmalloc_array(ttm->num_pages,
+ sizeof(*range->hmm_pfns), GFP_KERNEL);
+ if (unlikely(!range->hmm_pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
@@ -843,36 +921,32 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
r = -EPERM;
goto out_unlock;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
r = hmm_range_fault(range);
- up_read(&mm->mmap_sem);
- if (unlikely(r <= 0)) {
+ mmap_read_unlock(mm);
+ if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
*/
- if ((r == 0 || r == -EBUSY) && !time_after(jiffies, timeout))
+ if (r == -EBUSY && !time_after(jiffies, timeout))
goto retry;
goto out_free_pfns;
}
- for (i = 0; i < ttm->num_pages; i++) {
- /* FIXME: The pages cannot be touched outside the notifier_lock */
- pages[i] = hmm_device_entry_to_page(range, range->pfns[i]);
- if (unlikely(!pages[i])) {
- pr_err("Page fault failed for pfn[%lu] = 0x%llx\n",
- i, range->pfns[i]);
- r = -ENOMEM;
-
- goto out_free_pfns;
- }
- }
+ /*
+ * Due to default_flags, all pages are HMM_PFN_VALID or
+ * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
+ * the notifier_lock, and mmu_interval_read_retry() must be done first.
+ */
+ for (i = 0; i < ttm->num_pages; i++)
+ pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
gtt->range = range;
mmput(mm);
@@ -880,9 +954,9 @@ retry:
return 0;
out_unlock:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
out_free_pfns:
- kvfree(range->pfns);
+ kvfree(range->hmm_pfns);
out_free_ranges:
kfree(range);
out:
@@ -907,7 +981,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n",
gtt->userptr, ttm->num_pages);
- WARN_ONCE(!gtt->range || !gtt->range->pfns,
+ WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns,
"No user pages to check\n");
if (gtt->range) {
@@ -917,7 +991,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
*/
r = mmu_interval_read_retry(gtt->range->notifier,
gtt->range->notifier_seq);
- kvfree(gtt->range->pfns);
+ kvfree(gtt->range->hmm_pfns);
kfree(gtt->range);
gtt->range = NULL;
}
@@ -1008,8 +1082,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->pages[i] !=
- hmm_device_entry_to_page(gtt->range,
- gtt->range->pfns[i]))
+ hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
break;
}
@@ -1027,6 +1100,9 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
+ if (amdgpu_bo_encrypted(abo))
+ flags |= AMDGPU_PTE_TMZ;
+
if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
@@ -1539,6 +1615,9 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
switch (bo->mem.mem_type) {
case TTM_PL_TT:
+ if (amdgpu_bo_is_amdgpu_bo(bo) &&
+ amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
+ return false;
return true;
case TTM_PL_VRAM:
@@ -1587,8 +1666,9 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
if (bo->mem.mem_type != TTM_PL_VRAM)
return -EIO;
- nodes = amdgpu_find_mm_node(&abo->tbo.mem, &offset);
- pos = (nodes->start << PAGE_SHIFT) + offset;
+ pos = offset;
+ nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
+ pos += (nodes->start << PAGE_SHIFT);
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
@@ -1857,17 +1937,19 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
/*
- * reserve one TMR (64K) memory at the top of VRAM which holds
+ * reserve TMR memory at the top of VRAM which holds
* IP Discovery data and is protected by PSP.
*/
- r = amdgpu_bo_create_kernel_at(adev,
- adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
- DISCOVERY_TMR_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &adev->discovery_memory,
- NULL);
- if (r)
- return r;
+ if (adev->discovery_tmr_size > 0) {
+ r = amdgpu_bo_create_kernel_at(adev,
+ adev->gmc.real_vram_size - adev->discovery_tmr_size,
+ adev->discovery_tmr_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->discovery_memory,
+ NULL);
+ if (r)
+ return r;
+ }
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -2015,75 +2097,14 @@ int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &adev->mman.bdev);
}
-static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *mem, unsigned num_pages,
- uint64_t offset, unsigned window,
- struct amdgpu_ring *ring,
- uint64_t *addr)
-{
- struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct amdgpu_device *adev = ring->adev;
- struct ttm_tt *ttm = bo->ttm;
- struct amdgpu_job *job;
- unsigned num_dw, num_bytes;
- dma_addr_t *dma_address;
- struct dma_fence *fence;
- uint64_t src_addr, dst_addr;
- uint64_t flags;
- int r;
-
- BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
- AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
-
- *addr = adev->gmc.gart_start;
- *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
- AMDGPU_GPU_PAGE_SIZE;
-
- num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
-
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job);
- if (r)
- return r;
-
- src_addr = num_dw * 4;
- src_addr += job->ibs[0].gpu_addr;
-
- dst_addr = amdgpu_bo_gpu_offset(adev->gart.bo);
- dst_addr += window * AMDGPU_GTT_MAX_TRANSFER_SIZE * 8;
- amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr,
- dst_addr, num_bytes);
-
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- WARN_ON(job->ibs[0].length_dw > num_dw);
-
- dma_address = &gtt->ttm.dma_address[offset >> PAGE_SHIFT];
- flags = amdgpu_ttm_tt_pte_flags(adev, ttm, mem);
- r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
- &job->ibs[0].ptr[num_dw]);
- if (r)
- goto error_free;
-
- r = amdgpu_job_submit(job, &adev->mman.entity,
- AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
- if (r)
- goto error_free;
-
- dma_fence_put(fence);
-
- return r;
-
-error_free:
- amdgpu_job_free(job);
- return r;
-}
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush)
+ bool vm_needs_flush, bool tmz)
{
+ enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED;
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -2101,7 +2122,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
num_loops = DIV_ROUND_UP(byte_count, max_bytes);
num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8);
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job);
if (r)
return r;
@@ -2123,7 +2144,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
- dst_offset, cur_size_in_bytes);
+ dst_offset, cur_size_in_bytes, tmz);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
@@ -2190,7 +2211,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
/* for IB padding */
num_dw += 64;
- r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED,
+ &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index bd05bbb4878d..4351d02644a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -24,8 +24,9 @@
#ifndef __AMDGPU_TTM_H__
#define __AMDGPU_TTM_H__
-#include "amdgpu.h"
+#include <linux/dma-direction.h>
#include <drm/gpu_scheduler.h>
+#include "amdgpu.h"
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
#define AMDGPU_PL_GWS (TTM_PL_PRIV + 1)
@@ -74,6 +75,15 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt);
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
@@ -87,11 +97,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
struct dma_fence **fence, bool direct_submit,
- bool vm_needs_flush);
+ bool vm_needs_flush, bool tmz);
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
- struct amdgpu_copy_mem *src,
- struct amdgpu_copy_mem *dst,
- uint64_t size,
+ const struct amdgpu_copy_mem *src,
+ const struct amdgpu_copy_mem *dst,
+ uint64_t size, bool tmz,
struct dma_resv *resv,
struct dma_fence **f);
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 9ef312428231..65bb25e31d45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -403,8 +403,8 @@ FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos_fw_version);
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_fw_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index 9dd51f0d2c11..af1b1ccf613c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -110,7 +110,8 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
* even NOMEM error is encountered
*/
if(!err_data->err_addr)
- DRM_WARN("Failed to alloc memory for umc error address record!\n");
+ dev_warn(adev->dev, "Failed to alloc memory for "
+ "umc error address record!\n");
/* umc query_ras_error_address is also responsible for clearing
* error status
@@ -120,10 +121,14 @@ int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev,
/* only uncorrectable error needs gpu reset */
if (err_data->ue_count) {
+ dev_info(adev->dev, "%ld uncorrectable hardware errors "
+ "detected in UMC block\n",
+ err_data->ue_count);
+
if (err_data->err_addr_cnt &&
amdgpu_ras_add_bad_pages(adev, err_data->err_addr,
err_data->err_addr_cnt))
- DRM_WARN("Failed to add ras bad page!\n");
+ dev_warn(adev->dev, "Failed to add ras bad page!\n");
amdgpu_ras_reset_gpu(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 5fd32ad1c575..5100ebe8858d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1056,7 +1056,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err;
}
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
goto err;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 59ddba137946..ecaa2d7483b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -446,7 +446,8 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -524,7 +525,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence *f = NULL;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ direct ? AMDGPU_IB_POOL_DIRECT :
+ AMDGPU_IB_POOL_DELAYED, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index a41272fbcba2..2badbc0355f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -56,19 +56,23 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
{
- unsigned long bo_size;
+ unsigned long bo_size, fw_shared_bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned char fw_check;
int i, r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
+ mutex_init(&adev->vcn.vcn_pg_lock);
+ atomic_set(&adev->vcn.total_submission_cnt, 0);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++)
+ atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
switch (adev->asic_type) {
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
fw_name = FIRMWARE_RAVEN2;
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
fw_name = FIRMWARE_PICASSO;
else
fw_name = FIRMWARE_RAVEN;
@@ -178,6 +182,17 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
return r;
}
}
+
+ r = amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.inst[i].fw_shared_bo,
+ &adev->vcn.inst[i].fw_shared_gpu_addr, &adev->vcn.inst[i].fw_shared_cpu_addr);
+ if (r) {
+ dev_err(adev->dev, "VCN %d (%d) failed to allocate firmware shared bo\n", i, r);
+ return r;
+ }
+
+ fw_shared_bo_size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ adev->vcn.inst[i].saved_shm_bo = kvmalloc(fw_shared_bo_size, GFP_KERNEL);
}
return 0;
@@ -192,6 +207,12 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
if (adev->vcn.harvest_config & (1 << j))
continue;
+
+ kvfree(adev->vcn.inst[j].saved_shm_bo);
+ amdgpu_bo_free_kernel(&adev->vcn.inst[j].fw_shared_bo,
+ &adev->vcn.inst[j].fw_shared_gpu_addr,
+ (void **)&adev->vcn.inst[j].fw_shared_cpu_addr);
+
if (adev->vcn.indirect_sram) {
amdgpu_bo_free_kernel(&adev->vcn.inst[j].dpg_sram_bo,
&adev->vcn.inst[j].dpg_sram_gpu_addr,
@@ -210,6 +231,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
}
release_firmware(adev->vcn.fw);
+ mutex_destroy(&adev->vcn.vcn_pg_lock);
return 0;
}
@@ -236,6 +258,17 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
return -ENOMEM;
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
+
+ if (adev->vcn.inst[i].fw_shared_bo == NULL)
+ return 0;
+
+ if (!adev->vcn.inst[i].saved_shm_bo)
+ return -ENOMEM;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+ memcpy_fromio(adev->vcn.inst[i].saved_shm_bo, ptr, size);
}
return 0;
}
@@ -273,6 +306,17 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
}
memset_io(ptr, 0, size);
}
+
+ if (adev->vcn.inst[i].fw_shared_bo == NULL)
+ return -EINVAL;
+
+ size = amdgpu_bo_size(adev->vcn.inst[i].fw_shared_bo);
+ ptr = adev->vcn.inst[i].fw_shared_cpu_addr;
+
+ if (adev->vcn.inst[i].saved_shm_bo != NULL)
+ memcpy_toio(ptr, adev->vcn.inst[i].saved_shm_bo, size);
+ else
+ memset_io(ptr, 0, size);
}
return 0;
}
@@ -295,7 +339,8 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
- if (fence[j])
+ if (fence[j] ||
+ unlikely(atomic_read(&adev->vcn.inst[j].dpg_enc_submission_cnt)))
new_state.fw_based = VCN_DPG_STATE__PAUSE;
else
new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
@@ -307,8 +352,7 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
fences += fence[j];
}
- if (fences == 0) {
- amdgpu_gfx_off_ctrl(adev, true);
+ if (!fences && !atomic_read(&adev->vcn.total_submission_cnt)) {
amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
} else {
@@ -319,36 +363,46 @@ static void amdgpu_vcn_idle_work_handler(struct work_struct *work)
void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
- if (set_clocks) {
- amdgpu_gfx_off_ctrl(adev, false);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
- AMD_PG_STATE_UNGATE);
- }
+ atomic_inc(&adev->vcn.total_submission_cnt);
+ cancel_delayed_work_sync(&adev->vcn.idle_work);
+
+ mutex_lock(&adev->vcn.vcn_pg_lock);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN,
+ AMD_PG_STATE_UNGATE);
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
struct dpg_pause_state new_state;
- unsigned int fences = 0;
- unsigned int i;
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
- }
- if (fences)
+ if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) {
+ atomic_inc(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
new_state.fw_based = VCN_DPG_STATE__PAUSE;
- else
- new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ } else {
+ unsigned int fences = 0;
+ unsigned int i;
- if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
- new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i)
+ fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
+
+ if (fences || atomic_read(&adev->vcn.inst[ring->me].dpg_enc_submission_cnt))
+ new_state.fw_based = VCN_DPG_STATE__PAUSE;
+ else
+ new_state.fw_based = VCN_DPG_STATE__UNPAUSE;
+ }
adev->vcn.pause_dpg_mode(adev, ring->me, &new_state);
}
+ mutex_unlock(&adev->vcn.vcn_pg_lock);
}
void amdgpu_vcn_ring_end_use(struct amdgpu_ring *ring)
{
+ if (ring->adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG &&
+ ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC)
+ atomic_dec(&ring->adev->vcn.inst[ring->me].dpg_enc_submission_cnt);
+
+ atomic_dec(&ring->adev->vcn.total_submission_cnt);
+
schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
}
@@ -390,7 +444,8 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 64,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
goto err;
@@ -557,7 +612,8 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -610,7 +666,8 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 6fe057329de2..90aa12b22725 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -132,6 +132,13 @@
} \
} while (0)
+#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
+
+enum fw_queue_mode {
+ FW_QUEUE_RING_RESET = 1,
+ FW_QUEUE_DPG_HOLD_OFF = 2,
+};
+
enum engine_status_constants {
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON = 0x2AAAA0,
UVD_PGFSM_STATUS__UVDM_UVDU_PWR_ON_2_0 = 0xAAAA0,
@@ -179,10 +186,15 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct amdgpu_bo *fw_shared_bo;
struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
+ atomic_t dpg_enc_submission_cnt;
+ void *fw_shared_cpu_addr;
+ uint64_t fw_shared_gpu_addr;
+ void *saved_shm_bo;
};
struct amdgpu_vcn {
@@ -196,16 +208,28 @@ struct amdgpu_vcn {
uint8_t num_vcn_inst;
struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES];
struct amdgpu_vcn_reg internal;
- struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS];
- struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES];
- uint32_t num_vcn_enc_sched;
- uint32_t num_vcn_dec_sched;
+ struct mutex vcn_pg_lock;
+ atomic_t total_submission_cnt;
unsigned harvest_config;
int (*pause_dpg_mode)(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
};
+struct amdgpu_fw_shared_multi_queue {
+ uint8_t decode_queue_mode;
+ uint8_t encode_generalpurpose_queue_mode;
+ uint8_t encode_lowlatency_queue_mode;
+ uint8_t encode_realtime_queue_mode;
+ uint8_t padding[4];
+};
+
+struct amdgpu_fw_shared {
+ uint32_t present_flag_0;
+ uint8_t pad[53];
+ struct amdgpu_fw_shared_multi_queue multi_queue;
+} __attribute__((__packed__));
+
int amdgpu_vcn_sw_init(struct amdgpu_device *adev);
int amdgpu_vcn_sw_fini(struct amdgpu_device *adev);
int amdgpu_vcn_suspend(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index adc813cde8e2..f3b38c9e04ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -38,7 +38,8 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
void amdgpu_virt_init_setting(struct amdgpu_device *adev)
{
/* enable virtual display */
- adev->mode_info.num_crtc = 1;
+ if (adev->mode_info.num_crtc == 0)
+ adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
@@ -59,7 +60,10 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
ref, mask);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -81,6 +85,9 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
return;
+failed_undo:
+ amdgpu_ring_undo(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq:
pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
}
@@ -152,6 +159,19 @@ int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
return 0;
}
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
+{
+ struct amdgpu_virt *virt = &adev->virt;
+
+ if (virt->ops && virt->ops->req_init_data)
+ virt->ops->req_init_data(adev);
+
+ if (adev->virt.req_init_data_ver > 0)
+ DRM_INFO("host supports REQ_INIT_DATA handshake\n");
+ else
+ DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
+}
+
/**
* amdgpu_virt_wait_reset() - wait for reset gpu completed
* @amdgpu: amdgpu device.
@@ -287,3 +307,82 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
}
+
+void amdgpu_detect_virtualization(struct amdgpu_device *adev)
+{
+ uint32_t reg;
+
+ switch (adev->asic_type) {
+ case CHIP_TONGA:
+ case CHIP_FIJI:
+ reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+ break;
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_ARCTURUS:
+ reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
+ break;
+ default: /* other chip doesn't support SRIOV */
+ reg = 0;
+ break;
+ }
+
+ if (reg & 1)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
+
+ if (reg & 0x80000000)
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
+
+ if (!reg) {
+ if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
+ adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
+ }
+}
+
+bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_debug(adev) ? true : false;
+}
+
+bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
+{
+ return amdgpu_sriov_is_normal(adev) ? true : false;
+}
+
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (!amdgpu_sriov_vf(adev) ||
+ amdgpu_virt_access_debugfs_is_kiq(adev))
+ return 0;
+
+ if (amdgpu_virt_access_debugfs_is_mmio(adev))
+ adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
+{
+ if (amdgpu_sriov_vf(adev))
+ adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
+}
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
+{
+ enum amdgpu_sriov_vf_mode mode;
+
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ mode = SRIOV_VF_MODE_ONE_VF;
+ else
+ mode = SRIOV_VF_MODE_MULTI_VF;
+ } else {
+ mode = SRIOV_VF_MODE_BARE_METAL;
+ }
+
+ return mode;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index f0128f745bd2..b90e822cebd7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -30,6 +30,17 @@
#define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */
#define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */
+/* all asic after AI use this offset */
+#define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5
+/* tonga/fiji use this offset */
+#define mmBIF_IOV_FUNC_IDENTIFIER 0x1503
+
+enum amdgpu_sriov_vf_mode {
+ SRIOV_VF_MODE_BARE_METAL = 0,
+ SRIOV_VF_MODE_ONE_VF,
+ SRIOV_VF_MODE_MULTI_VF,
+};
+
struct amdgpu_mm_table {
struct amdgpu_bo *bo;
uint32_t *cpu_addr;
@@ -54,6 +65,7 @@ struct amdgpu_vf_error_buffer {
struct amdgpu_virt_ops {
int (*req_full_gpu)(struct amdgpu_device *adev, bool init);
int (*rel_full_gpu)(struct amdgpu_device *adev, bool init);
+ int (*req_init_data)(struct amdgpu_device *adev);
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
@@ -83,6 +95,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* MM bandwidth */
+ AMDGIM_FEATURE_GIM_MM_BW_MGR = 0x8,
/* PP ONE VF MODE in GIM */
AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
};
@@ -256,6 +270,8 @@ struct amdgpu_virt {
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
uint32_t reg_access_mode;
+ int req_init_data_ver;
+ bool tdr_debug;
};
#define amdgpu_sriov_enabled(adev) \
@@ -287,6 +303,10 @@ static inline bool is_virtual_machine(void)
#define amdgpu_sriov_is_pp_one_vf(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_PP_ONE_VF)
+#define amdgpu_sriov_is_debug(adev) \
+ ((!adev->in_gpu_reset) && adev->virt.tdr_debug)
+#define amdgpu_sriov_is_normal(adev) \
+ ((!adev->in_gpu_reset) && (!adev->virt.tdr_debug))
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
@@ -296,6 +316,7 @@ void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
+void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
@@ -303,4 +324,11 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_detect_virtualization(struct amdgpu_device *adev);
+
+bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
+int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev);
+void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev);
+
+enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6d9252a27916..7417754e9141 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -82,7 +82,7 @@ struct amdgpu_prt_cb {
struct dma_fence_cb cb;
};
-/**
+/*
* vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS
* happens while holding this lock anywhere to prevent deadlocks when
* an MMU notifier runs in reclaim-FS context.
@@ -726,7 +726,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
- * @direct: use a direct update
+ * @immediate: use an immediate update
*
* Root PD needs to be reserved when calling this.
*
@@ -736,7 +736,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo *bo,
- bool direct)
+ bool immediate)
{
struct ttm_operation_ctx ctx = { true, false };
unsigned level = adev->vm_manager.root_level;
@@ -795,7 +795,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
@@ -850,11 +850,11 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
* @adev: amdgpu_device pointer
* @vm: requesting vm
* @level: the page table level
- * @direct: use a direct update
+ * @immediate: use a immediate update
* @bp: resulting BO allocation parameters
*/
static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
- int level, bool direct,
+ int level, bool immediate,
struct amdgpu_bo_param *bp)
{
memset(bp, 0, sizeof(*bp));
@@ -870,7 +870,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else if (!vm->root.base.bo || vm->root.base.bo->shadow)
bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
bp->type = ttm_bo_type_kernel;
- bp->no_wait_gpu = direct;
+ bp->no_wait_gpu = immediate;
if (vm->root.base.bo)
bp->resv = vm->root.base.bo->tbo.base.resv;
}
@@ -881,7 +881,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
* @cursor: Which page table to allocate
- * @direct: use a direct update
+ * @immediate: use an immediate update
*
* Make sure a specific page table or directory is allocated.
*
@@ -892,7 +892,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_vm_pt_cursor *cursor,
- bool direct)
+ bool immediate)
{
struct amdgpu_vm_pt *entry = cursor->entry;
struct amdgpu_bo_param bp;
@@ -913,7 +913,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
if (entry->base.bo)
return 0;
- amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp);
+ amdgpu_vm_bo_param(adev, vm, cursor->level, immediate, &bp);
r = amdgpu_bo_create(adev, &bp, &pt);
if (r)
@@ -925,7 +925,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt, direct);
+ r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
if (r)
goto error_free_pt;
@@ -1276,7 +1276,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @direct: submit directly to the paging queue
+ * @immediate: submit immediately to the paging queue
*
* Makes sure all directories are up to date.
*
@@ -1284,7 +1284,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
* 0 for success, error for failure.
*/
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct)
+ struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
int r;
@@ -1295,7 +1295,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
@@ -1446,20 +1446,24 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ if (!params->unlocked) {
/* make sure that the page tables covering the
* address range are actually allocated
*/
r = amdgpu_vm_alloc_pts(params->adev, params->vm,
- &cursor, params->direct);
+ &cursor, params->immediate);
if (r)
return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (adev->asic_type < CHIP_VEGA10 &&
- (flags & AMDGPU_PTE_VALID)) {
+ if (params->unlocked) {
+ /* Unlocked updates are only allowed on the leaves */
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
/* No huge page support before GMC v9 */
if (cursor.level != AMDGPU_VM_PTB) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1557,7 +1561,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @direct: direct submission in a page fault
+ * @immediate: immediate submission in a page fault
+ * @unlocked: unlocked invalidation during MM callback
* @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
@@ -1572,8 +1577,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct,
- struct dma_resv *resv,
+ struct amdgpu_vm *vm, bool immediate,
+ bool unlocked, struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
@@ -1586,8 +1591,9 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
- params.direct = direct;
+ params.immediate = immediate;
params.pages_addr = pages_addr;
+ params.unlocked = unlocked;
/* Implicitly sync to command submissions in the same VM before
* unmapping. Sync to moving fences before mapping.
@@ -1603,11 +1609,12 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
- struct amdgpu_bo *root = vm->root.base.bo;
+ if (!unlocked && !dma_fence_is_signaled(vm->last_unlocked)) {
+ struct dma_fence *tmp = dma_fence_get_stub();
- if (!dma_fence_is_signaled(vm->last_direct))
- amdgpu_bo_fence(root, vm->last_direct, true);
+ amdgpu_bo_fence(vm->root.base.bo, vm->last_unlocked, true);
+ swap(vm->last_unlocked, tmp);
+ dma_fence_put(tmp);
}
r = vm->update_funcs->prepare(&params, resv, sync_mode);
@@ -1721,7 +1728,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1784,6 +1791,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
+
+ if (amdgpu_bo_encrypted(bo))
+ flags |= AMDGPU_PTE_TMZ;
+
bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
} else {
flags = 0x0;
@@ -2014,7 +2025,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2124,11 +2135,8 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
(bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
bo_va->is_xgmi = true;
- mutex_lock(&adev->vm_manager.lock_pstate);
/* Power up XGMI if it can be potentially used */
- if (++adev->vm_manager.xgmi_map_counter == 1)
- amdgpu_xgmi_set_pstate(adev, 1);
- mutex_unlock(&adev->vm_manager.lock_pstate);
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MAX_VEGA20);
}
return bo_va;
@@ -2551,12 +2559,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
dma_fence_put(bo_va->last_pt_update);
- if (bo && bo_va->is_xgmi) {
- mutex_lock(&adev->vm_manager.lock_pstate);
- if (--adev->vm_manager.xgmi_map_counter == 0)
- amdgpu_xgmi_set_pstate(adev, 0);
- mutex_unlock(&adev->vm_manager.lock_pstate);
- }
+ if (bo && bo_va->is_xgmi)
+ amdgpu_xgmi_set_pstate(adev, AMDGPU_XGMI_PSTATE_MIN);
kfree(bo_va);
}
@@ -2585,7 +2589,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_unlocked)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2762,7 +2766,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- return dma_fence_wait_timeout(vm->last_direct, true, timeout);
+ return dma_fence_wait_timeout(vm->last_unlocked, true, timeout);
}
/**
@@ -2798,7 +2802,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
/* create scheduler entities for page table updates */
- r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL,
+ r = drm_sched_entity_init(&vm->immediate, DRM_SCHED_PRIORITY_NORMAL,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
@@ -2808,7 +2812,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
adev->vm_manager.vm_pte_scheds,
adev->vm_manager.vm_pte_num_scheds, NULL);
if (r)
- goto error_free_direct;
+ goto error_free_immediate;
vm->pte_support_ats = false;
vm->is_compute_context = false;
@@ -2834,7 +2838,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
else
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
- vm->last_direct = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2888,11 +2892,11 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_delayed:
- dma_fence_put(vm->last_direct);
+ dma_fence_put(vm->last_unlocked);
drm_sched_entity_destroy(&vm->delayed);
-error_free_direct:
- drm_sched_entity_destroy(&vm->direct);
+error_free_immediate:
+ drm_sched_entity_destroy(&vm->immediate);
return r;
}
@@ -2996,10 +3000,17 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
!amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
- if (vm->use_cpu_for_update)
+ if (vm->use_cpu_for_update) {
+ /* Sync with last SDMA update/clear before switching to CPU */
+ r = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_UNDEFINED, true);
+ if (r)
+ goto free_idr;
+
vm->update_funcs = &amdgpu_vm_cpu_funcs;
- else
+ } else {
vm->update_funcs = &amdgpu_vm_sdma_funcs;
+ }
dma_fence_put(vm->last_update);
vm->last_update = NULL;
vm->is_compute_context = true;
@@ -3089,8 +3100,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
- dma_fence_wait(vm->last_direct, false);
- dma_fence_put(vm->last_direct);
+ dma_fence_wait(vm->last_unlocked, false);
+ dma_fence_put(vm->last_unlocked);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3107,7 +3118,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&root);
WARN_ON(vm->root.base.bo);
- drm_sched_entity_destroy(&vm->direct);
+ drm_sched_entity_destroy(&vm->immediate);
drm_sched_entity_destroy(&vm->delayed);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3166,9 +3177,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
-
- adev->vm_manager.xgmi_map_counter = 0;
- mutex_init(&adev->vm_manager.lock_pstate);
}
/**
@@ -3343,8 +3351,8 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid,
value = 0;
}
- r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1,
- flags, value, NULL, NULL);
+ r = amdgpu_vm_bo_update_mapping(adev, vm, true, false, NULL, addr,
+ addr + 1, flags, value, NULL, NULL);
if (r)
goto error_unlock;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 06fe30e1492d..c8e68d7890bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -54,6 +54,9 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_PTE_SYSTEM (1ULL << 1)
#define AMDGPU_PTE_SNOOPED (1ULL << 2)
+/* RV+ */
+#define AMDGPU_PTE_TMZ (1ULL << 3)
+
/* VI only */
#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
@@ -203,9 +206,14 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm *vm;
/**
- * @direct: if changes should be made directly
+ * @immediate: if changes should be made immediately
*/
- bool direct;
+ bool immediate;
+
+ /**
+ * @unlocked: true if the root BO is not locked
+ */
+ bool unlocked;
/**
* @pages_addr:
@@ -271,11 +279,11 @@ struct amdgpu_vm {
struct dma_fence *last_update;
/* Scheduler entities for page table updates */
- struct drm_sched_entity direct;
+ struct drm_sched_entity immediate;
struct drm_sched_entity delayed;
- /* Last submission to the scheduler entities */
- struct dma_fence *last_direct;
+ /* Last unlocked submission to the scheduler entities */
+ struct dma_fence *last_unlocked;
unsigned int pasid;
/* dedicated to vm */
@@ -349,10 +357,6 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
-
- /* counter of mapped memory through xgmi */
- uint32_t xgmi_map_counter;
- struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
@@ -380,7 +384,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, bool direct);
+ struct amdgpu_vm *vm, bool immediate);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index e38516304070..39c704a1fb0e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -84,7 +84,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
pe += (unsigned long)amdgpu_bo_kptr(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
for (i = 0; i < count; i++) {
value = p->pages_addr ?
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index cf96c335b258..8d9c6feba660 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -61,10 +61,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
struct dma_resv *resv,
enum amdgpu_sync_mode sync_mode)
{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
if (r)
return r;
@@ -90,11 +92,11 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity;
- struct dma_fence *f, *tmp;
struct amdgpu_ring *ring;
+ struct dma_fence *f;
int r;
- entity = p->direct ? &p->vm->direct : &p->vm->delayed;
+ entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
@@ -104,15 +106,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- if (p->direct) {
- tmp = dma_fence_get(f);
- swap(p->vm->last_direct, tmp);
+ if (p->unlocked) {
+ struct dma_fence *tmp = dma_fence_get(f);
+
+ swap(p->vm->last_unlocked, f);
dma_fence_put(tmp);
} else {
- dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ amdgpu_bo_fence(p->vm->root.base.bo, f, true);
}
- if (fence && !p->direct)
+ if (fence && !p->immediate)
swap(*fence, f);
dma_fence_put(f);
return 0;
@@ -142,7 +145,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
- trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
+ trace_amdgpu_vm_copy_ptes(pe, src, count, p->immediate);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
}
@@ -169,7 +172,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
struct amdgpu_ib *ib = p->job->ibs;
pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
count, incr);
@@ -198,6 +201,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags)
{
+ enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
+ : AMDGPU_IB_POOL_DELAYED;
unsigned int i, ndw, nptes;
uint64_t *pte;
int r;
@@ -223,7 +228,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
- r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
+ &p->job);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 82a3299e53c0..d399e5893170 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -22,6 +22,7 @@
* Authors: Christian König
*/
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_vm.h"
#include "amdgpu_atomfirmware.h"
@@ -148,6 +149,15 @@ static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO,
amdgpu_mem_info_vram_vendor, NULL);
+static const struct attribute *amdgpu_vram_mgr_attributes[] = {
+ &dev_attr_mem_info_vram_total.attr,
+ &dev_attr_mem_info_vis_vram_total.attr,
+ &dev_attr_mem_info_vram_used.attr,
+ &dev_attr_mem_info_vis_vram_used.attr,
+ &dev_attr_mem_info_vram_vendor.attr,
+ NULL
+};
+
/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
@@ -172,31 +182,9 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
man->priv = mgr;
/* Add the two VRAM-related sysfs files */
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_used\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
- return ret;
- }
- ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor);
- if (ret) {
- DRM_ERROR("Failed to create device file mem_info_vram_vendor\n");
- return ret;
- }
+ ret = sysfs_create_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
+ if (ret)
+ DRM_ERROR("Failed to register sysfs\n");
return 0;
}
@@ -219,11 +207,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
- device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
- device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor);
+ sysfs_remove_files(&adev->dev->kobj, amdgpu_vram_mgr_attributes);
return 0;
}
@@ -459,6 +443,104 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
}
/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @mem: TTM memory object
+ * @dev: the other device
+ * @dir: dma direction
+ * @sgt: resulting sg table
+ *
+ * Allocate and fill a sg table from a VRAM allocation.
+ */
+int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
+ struct ttm_mem_reg *mem,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table **sgt)
+{
+ struct drm_mm_node *node;
+ struct scatterlist *sg;
+ int num_entries = 0;
+ unsigned int pages;
+ int i, r;
+
+ *sgt = kmalloc(sizeof(*sg), GFP_KERNEL);
+ if (!*sgt)
+ return -ENOMEM;
+
+ for (pages = mem->num_pages, node = mem->mm_node;
+ pages; pages -= node->size, ++node)
+ ++num_entries;
+
+ r = sg_alloc_table(*sgt, num_entries, GFP_KERNEL);
+ if (r)
+ goto error_free;
+
+ for_each_sg((*sgt)->sgl, sg, num_entries, i)
+ sg->length = 0;
+
+ node = mem->mm_node;
+ for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+ phys_addr_t phys = (node->start << PAGE_SHIFT) +
+ adev->gmc.aper_base;
+ size_t size = node->size << PAGE_SHIFT;
+ dma_addr_t addr;
+
+ ++node;
+ addr = dma_map_resource(dev, phys, size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ r = dma_mapping_error(dev, addr);
+ if (r)
+ goto error_unmap;
+
+ sg_set_page(sg, NULL, size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = size;
+ }
+ return 0;
+
+error_unmap:
+ for_each_sg((*sgt)->sgl, sg, num_entries, i) {
+ if (!sg->length)
+ continue;
+
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(*sgt);
+
+error_free:
+ kfree(*sgt);
+ return r;
+}
+
+/**
+ * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
+ *
+ * @adev: amdgpu device pointer
+ * @sgt: sg table to free
+ *
+ * Free a previously allocate sg table.
+ */
+void amdgpu_vram_mgr_free_sgt(struct amdgpu_device *adev,
+ struct device *dev,
+ enum dma_data_direction dir,
+ struct sg_table *sgt)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ dma_unmap_resource(dev, sg->dma_address,
+ sg->length, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
* @man: TTM memory type manager
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 95b3327168ac..91837a991319 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -325,9 +325,18 @@ success:
static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
struct amdgpu_hive_info *hive)
{
+ char node[10];
+ memset(node, 0, sizeof(node));
+
device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
- sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
- sysfs_remove_link(hive->kobj, adev->ddev->unique);
+ device_remove_file(adev->dev, &dev_attr_xgmi_error);
+
+ if (adev != hive->adev)
+ sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
+
+ sprintf(node, "node%d", hive->number_devices);
+ sysfs_remove_link(hive->kobj, node);
+
}
@@ -373,7 +382,13 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
if (lock)
mutex_lock(&tmp->hive_lock);
- tmp->pstate = -1;
+ tmp->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
+ tmp->hi_req_gpu = NULL;
+ /*
+ * hive pstate on boot is high in vega20 so we have to go to low
+ * pstate on after boot.
+ */
+ tmp->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
mutex_unlock(&xgmi_mutex);
return tmp;
@@ -383,56 +398,59 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
{
int ret = 0;
struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
- struct amdgpu_device *tmp_adev;
- bool update_hive_pstate = true;
- bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20;
+ struct amdgpu_device *request_adev = hive->hi_req_gpu ?
+ hive->hi_req_gpu : adev;
+ bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
+ bool init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
- if (!hive)
+ /* fw bug so temporarily disable pstate switching */
+ return 0;
+
+ if (!hive || adev->asic_type != CHIP_VEGA20)
return 0;
mutex_lock(&hive->hive_lock);
- if (hive->pstate == pstate) {
- adev->pstate = is_high_pstate ? pstate : adev->pstate;
+ if (is_hi_req)
+ hive->hi_req_count++;
+ else
+ hive->hi_req_count--;
+
+ /*
+ * Vega20 only needs single peer to request pstate high for the hive to
+ * go high but all peers must request pstate low for the hive to go low
+ */
+ if (hive->pstate == pstate ||
+ (!is_hi_req && hive->hi_req_count && !init_low))
goto out;
- }
- dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+ dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
- ret = amdgpu_dpm_set_xgmi_pstate(adev, pstate);
+ ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
if (ret) {
- dev_err(adev->dev,
+ dev_err(request_adev->dev,
"XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
- adev->gmc.xgmi.node_id,
- adev->gmc.xgmi.hive_id, ret);
+ request_adev->gmc.xgmi.node_id,
+ request_adev->gmc.xgmi.hive_id, ret);
goto out;
}
- /* Update device pstate */
- adev->pstate = pstate;
-
- /*
- * Update the hive pstate only all devices of the hive
- * are in the same pstate
- */
- list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
- if (tmp_adev->pstate != adev->pstate) {
- update_hive_pstate = false;
- break;
- }
- }
- if (update_hive_pstate || is_high_pstate)
+ if (init_low)
+ hive->pstate = hive->hi_req_count ?
+ hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
+ else {
hive->pstate = pstate;
-
+ hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
+ adev : NULL;
+ }
out:
mutex_unlock(&hive->hive_lock);
-
return ret;
}
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
- int ret = -EINVAL;
+ int ret;
/* Each psp need to set the latest topology */
ret = psp_xgmi_set_topology_info(&adev->psp,
@@ -507,9 +525,6 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
goto exit;
}
- /* Set default device pstate */
- adev->pstate = -1;
-
top_info = &adev->psp.xgmi_context.top_info;
list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
@@ -577,14 +592,14 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
if (!hive)
return -EINVAL;
- if (!(hive->number_devices--)) {
+ task_barrier_rem_task(&hive->tb);
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
+ mutex_unlock(&hive->hive_lock);
+
+ if(!(--hive->number_devices)){
amdgpu_xgmi_sysfs_destroy(adev, hive);
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
- } else {
- task_barrier_rem_task(&hive->tb);
- amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
- mutex_unlock(&hive->hive_lock);
}
return psp_xgmi_terminate(&adev->psp);
@@ -604,6 +619,8 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
adev->gmc.xgmi.num_physical_nodes == 0)
return 0;
+ amdgpu_xgmi_reset_ras_error_count(adev);
+
if (!adev->gmc.xgmi.ras_if) {
adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
if (!adev->gmc.xgmi.ras_if)
@@ -641,31 +658,34 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr)
{
- uint32_t df_inst_id;
- uint64_t dram_base_addr = 0;
- const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
-
- if ((!df_funcs) ||
- (!df_funcs->get_df_inst_id) ||
- (!df_funcs->get_dram_base_addr)) {
- dev_warn(adev->dev,
- "XGMI: relative phy_addr algorithm is not supported\n");
- return addr;
- }
-
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
- dev_warn(adev->dev,
- "failed to disable DF-Cstate, DF register may not be accessible\n");
- return addr;
- }
+ struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
+ return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
+}
- df_inst_id = df_funcs->get_df_inst_id(adev);
- dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
+{
+ WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
+ WREG32_PCIE(pcs_status_reg, 0);
+}
- if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
- dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
- return addr + dram_base_addr;
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_arct[i]);
+ break;
+ case CHIP_VEGA20:
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
+ pcs_clear_status(adev,
+ xgmi_pcs_err_status_reg_vg20[i]);
+ break;
+ default:
+ break;
+ }
}
static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
@@ -758,6 +778,8 @@ int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
break;
}
+ amdgpu_xgmi_reset_ras_error_count(adev);
+
err_data->ue_count += ue_cnt;
err_data->ce_count += ce_cnt;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 4a92067fe595..6999eab16a72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -25,6 +25,7 @@
#include <drm/task_barrier.h>
#include "amdgpu_psp.h"
+
struct amdgpu_hive_info {
uint64_t hive_id;
struct list_head device_list;
@@ -33,8 +34,14 @@ struct amdgpu_hive_info {
struct kobject *kobj;
struct device_attribute dev_attr;
struct amdgpu_device *adev;
- int pstate; /*0 -- low , 1 -- high , -1 unknown*/
+ int hi_req_count;
+ struct amdgpu_device *hi_req_gpu;
struct task_barrier tb;
+ enum {
+ AMDGPU_XGMI_PSTATE_MIN,
+ AMDGPU_XGMI_PSTATE_MAX_VEGA20,
+ AMDGPU_XGMI_PSTATE_UNKNOWN
+ } pstate;
};
struct amdgpu_pcs_ras_field {
@@ -56,6 +63,7 @@ uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
uint64_t addr);
int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
+void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index cae426c7c086..4cfc786699c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -54,6 +54,8 @@
#define PLL_INDEX 2
#define PLL_DATA 3
+#define ATOM_CMD_TIMEOUT_SEC 20
+
typedef struct {
struct atom_context *ctx;
uint32_t *ps, *ws;
@@ -744,8 +746,9 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 10000)) {
- DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > ATOM_CMD_TIMEOUT_SEC*1000)) {
+ DRM_ERROR("atombios stuck in loop for more than %dsecs aborting\n",
+ ATOM_CMD_TIMEOUT_SEC);
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 006f21ef7ddf..fe306d0f73f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1358,8 +1358,6 @@ static int cik_asic_reset(struct amdgpu_device *adev)
int r;
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = cik_asic_pci_config_reset(adev);
@@ -1811,12 +1809,6 @@ static uint32_t cik_get_rev_id(struct amdgpu_device *adev)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
}
-static void cik_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void cik_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
@@ -2179,8 +2171,6 @@ static const struct amdgpu_ip_block_version cik_common_ip_block =
int cik_set_ip_blocks(struct amdgpu_device *adev)
{
- cik_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_BONAIRE:
amdgpu_device_ip_block_add(adev, &cik_common_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 580d3f93d670..20f108818b2b 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -320,8 +320,6 @@ static void cik_sdma_gfx_stop(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], 0);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -679,7 +677,8 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -980,7 +979,8 @@ static int cik_sdma_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1313,7 +1313,8 @@ static void cik_sdma_set_irq_funcs(struct amdgpu_device *adev)
static void cik_sdma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0);
ib->ptr[ib->length_dw++] = byte_count;
diff --git a/drivers/gpu/drm/amd/amdgpu/cikd.h b/drivers/gpu/drm/amd/amdgpu/cikd.h
index cee6e8a3ad9c..5f3f6ebfb387 100644
--- a/drivers/gpu/drm/amd/amdgpu/cikd.h
+++ b/drivers/gpu/drm/amd/amdgpu/cikd.h
@@ -450,7 +450,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 2512e7ebfedf..84b45a019a36 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2303,9 +2303,9 @@ static void dce_v10_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
@@ -2319,10 +2319,10 @@ static void dce_v10_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2404,7 +2404,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -2412,7 +2412,7 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
@@ -2447,7 +2447,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 0dde22db9848..ec61532e2f83 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2382,9 +2382,9 @@ static void dce_v11_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_device *adev = crtc->dev->dev_private;
u32 tmp;
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
@@ -2398,10 +2398,10 @@ static void dce_v11_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- tmp = RREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
+ tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1);
tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2);
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp);
}
static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2483,7 +2483,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -2491,7 +2491,7 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
@@ -2526,7 +2526,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 84219534bd38..cbddead3dafb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2194,9 +2194,9 @@ static void dce_v6_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -2211,10 +2211,10 @@ static void dce_v6_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
@@ -2299,7 +2299,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -2307,7 +2307,7 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
@@ -2342,7 +2342,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3a640702d7d1..fa0ad50b628c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2205,9 +2205,9 @@ static void dce_v8_0_hide_cursor(struct drm_crtc *crtc)
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
@@ -2220,10 +2220,10 @@ static void dce_v8_0_show_cursor(struct drm_crtc *crtc)
WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset,
lower_32_bits(amdgpu_crtc->cursor_addr));
- WREG32_IDX(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
- CUR_CONTROL__CURSOR_EN_MASK |
- (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
- (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
+ WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset,
+ CUR_CONTROL__CURSOR_EN_MASK |
+ (CURSOR_24_8_PRE_MULT << CUR_CONTROL__CURSOR_MODE__SHIFT) |
+ (CURSOR_URGENT_1_2 << CUR_CONTROL__CURSOR_URGENT_CONTROL__SHIFT));
}
static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
@@ -2305,7 +2305,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
aobj = gem_to_amdgpu_bo(obj);
ret = amdgpu_bo_reserve(aobj, false);
if (ret != 0) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -2313,7 +2313,7 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
amdgpu_bo_unreserve(aobj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
@@ -2348,7 +2348,7 @@ unpin:
amdgpu_bo_unpin(aobj);
amdgpu_bo_unreserve(aobj);
}
- drm_gem_object_put_unlocked(amdgpu_crtc->cursor_bo);
+ drm_gem_object_put(amdgpu_crtc->cursor_bo);
}
amdgpu_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 13e12be667fc..d5ff7b6331ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -172,8 +172,9 @@ static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ drm_crtc_vblank_off(crtc);
+ amdgpu_crtc->enabled = false;
amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
amdgpu_crtc->encoder = NULL;
amdgpu_crtc->connector = NULL;
@@ -286,7 +287,7 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
static const struct mode_size {
int w;
int h;
- } common_modes[17] = {
+ } common_modes[21] = {
{ 640, 480},
{ 720, 480},
{ 800, 600},
@@ -303,10 +304,14 @@ static int dce_virtual_get_modes(struct drm_connector *connector)
{1680, 1050},
{1600, 1200},
{1920, 1080},
- {1920, 1200}
+ {1920, 1200},
+ {4096, 3112},
+ {3656, 2664},
+ {3840, 2160},
+ {4096, 2160},
};
- for (i = 0; i < 17; i++) {
+ for (i = 0; i < 21; i++) {
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 5a1bd8ed1a6c..a7b8292cefee 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -686,58 +686,6 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
-static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev,
- uint32_t df_inst)
-{
- uint32_t base_addr_reg_val = 0;
- uint64_t base_addr = 0;
-
- base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 +
- df_inst * DF_3_6_SMN_REG_INST_DIST);
-
- if (REG_GET_FIELD(base_addr_reg_val,
- DF_CS_UMC_AON0_DramBaseAddress0,
- AddrRngVal) == 0) {
- DRM_WARN("address range not valid");
- return 0;
- }
-
- base_addr = REG_GET_FIELD(base_addr_reg_val,
- DF_CS_UMC_AON0_DramBaseAddress0,
- DramBaseAddr);
-
- return base_addr << 28;
-}
-
-static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev)
-{
- uint32_t xgmi_node_id = 0;
- uint32_t df_inst_id = 0;
-
- /* Walk through DF dst nodes to find current XGMI node */
- for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) {
-
- xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 +
- df_inst_id * DF_3_6_SMN_REG_INST_DIST);
- xgmi_node_id = REG_GET_FIELD(xgmi_node_id,
- DF_CS_UMC_AON0_DramLimitAddress0,
- DstFabricID);
-
- /* TODO: establish reason dest fabric id is offset by 7 */
- xgmi_node_id = xgmi_node_id >> 7;
-
- if (adev->gmc.xgmi.physical_node_id == xgmi_node_id)
- break;
- }
-
- if (df_inst_id == DF_3_6_INST_CNT) {
- DRM_WARN("cant match df dst id with gpu node");
- return 0;
- }
-
- return df_inst_id;
-}
-
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -752,6 +700,4 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
.set_fica = df_v3_6_set_fica,
- .get_dram_base_addr = df_v3_6_get_dram_base_addr,
- .get_df_inst_id = df_v3_6_get_df_inst_id
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index d78059fd2c72..bd5dd4f64311 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -138,6 +138,1062 @@ static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_0_nv10[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x33),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
@@ -272,14 +1328,1694 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_nv14[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xE0000000L, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1e),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1a8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1ac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1b8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1bc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1cc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x26),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x25),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x3b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
{
/* Pending on emulation bring up */
};
+static const struct soc15_reg_golden golden_settings_gc_rlc_spm_10_1_2_nv12[] =
+{
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000L, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x2),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x3),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x20),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x24),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x28),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x38),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x3c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x18),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x50),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x54),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x58),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x5c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x48),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x40),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x44),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1a),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x60),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x64),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x70),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x74),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x68),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x6c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x78),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x7c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x88),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x8c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x80),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x84),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x90),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x94),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x98),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x9c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xa8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xac),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xbc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xb4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xc8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xcc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xec),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x16),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xf8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xfc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x17),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x100),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x13),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x104),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xe0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x118),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x11c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x120),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x124),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xdc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x110),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x114),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x14),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x108),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x10c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x19),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0xd8),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1b),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x128),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x12c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x138),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x13c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x130),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x12),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x134),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xf),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x140),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x144),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x150),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x154),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x148),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x14c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x7),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x158),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x15c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x168),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xa),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x16c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x9),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x160),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x164),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x170),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x174),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x180),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x184),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x178),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x17c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x188),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x18c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x5),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x198),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xc),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x19c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x190),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xe),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x194),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x30),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xd),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x34),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x11),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1d),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x4),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_ADDR, 0xFFFFFFFF, 0x2c),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLB_SAMPLEDELAY_IND_DATA, 0xFFFFFFFF, 0xb),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_SAMPLE_SKEW, 0x000000FF, 0x1f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_GLOBALS_MUXSEL_SKEW, 0x000000FF, 0x22),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x1),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_SAMPLE_SKEW, 0x000000FF, 0x6),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x10),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x10000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_SE_MUXSEL_SKEW, 0x000000FF, 0x15),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xffffff, 0x0),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmRLC_SPM_DESER_START_SKEW, 0x000000FF, 0x35),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_GFX_INDEX, 0xFFFFFFFF, 0xe0000000)
+};
+
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
- (SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
+ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
@@ -301,7 +3037,7 @@ static int gfx_v10_0_rlc_backdoor_autoload_enable(struct amdgpu_device *adev);
static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev);
static void gfx_v10_0_ring_emit_ce_meta(struct amdgpu_ring *ring, bool resume);
static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start);
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
{
@@ -431,6 +3167,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_0_nv10,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
case CHIP_NAVI14:
soc15_program_register_sequence(adev,
@@ -439,6 +3178,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_nv14,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
case CHIP_NAVI12:
soc15_program_register_sequence(adev,
@@ -447,6 +3189,9 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
+ soc15_program_register_sequence(adev,
+ golden_settings_gc_rlc_spm_10_1_2_nv12,
+ (const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
break;
default:
break;
@@ -557,7 +3302,8 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1298,7 +4044,8 @@ static int gfx_v10_0_gfx_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
return 0;
@@ -1309,7 +4056,8 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
{
int r;
unsigned irq_type;
- struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ struct amdgpu_ring *ring;
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1328,10 +4076,11 @@ static int gfx_v10_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -1829,9 +4578,9 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
/* csib */
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
- adev->gfx.rlc.clear_state_gpu_addr >> 32);
+ adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
- adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
+ adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
@@ -2441,10 +5190,6 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
@@ -2923,16 +5668,12 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3268,11 +6009,8 @@ static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -3802,14 +6540,16 @@ static int gfx_v10_0_hw_init(void *handle)
* loaded firstly, so in direct type, it has to load smc ucode
* here before rlc.
*/
- r = smu_load_microcode(&adev->smu);
- if (r)
- return r;
+ if (adev->smu.ppt_funcs != NULL) {
+ r = smu_load_microcode(&adev->smu);
+ if (r)
+ return r;
- r = smu_check_fw_status(&adev->smu);
- if (r) {
- pr_err("SMC firmware status is not correct\n");
- return r;
+ r = smu_check_fw_status(&adev->smu);
+ if (r) {
+ pr_err("SMC firmware status is not correct\n");
+ return r;
+ }
}
}
@@ -4273,7 +7013,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
- /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
+ gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
}
if (adev->cg_flags &
@@ -4292,14 +7032,21 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
- u32 data;
+ u32 reg, data;
- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -4341,6 +7088,20 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.reset = gfx_v10_0_rlc_reset,
.start = gfx_v10_0_rlc_start,
.update_spm_vmid = gfx_v10_0_update_spm_vmid,
+};
+
+static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = {
+ .is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
+ .set_safe_mode = gfx_v10_0_set_safe_mode,
+ .unset_safe_mode = gfx_v10_0_unset_safe_mode,
+ .init = gfx_v10_0_rlc_init,
+ .get_csb_size = gfx_v10_0_get_csb_size,
+ .get_csb_buffer = gfx_v10_0_get_csb_buffer,
+ .resume = gfx_v10_0_rlc_resume,
+ .stop = gfx_v10_0_rlc_stop,
+ .reset = gfx_v10_0_rlc_reset,
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
.rlcg_wreg = gfx_v10_rlcg_wreg,
.is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
@@ -4350,14 +7111,14 @@ static int gfx_v10_0_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
+
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else
- amdgpu_gfx_off_ctrl(adev, true);
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -4370,6 +7131,9 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
@@ -4682,7 +7446,8 @@ static void gfx_v10_0_ring_emit_sb(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0);
}
-static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
+static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring,
+ uint32_t flags)
{
uint32_t dw2 = 0;
@@ -4690,8 +7455,6 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
gfx_v10_0_ring_emit_ce_meta(ring,
(!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
- gfx_v10_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -4848,16 +7611,19 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume)
sizeof(de_payload) >> 2);
}
-static void gfx_v10_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
-static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -4866,9 +7632,9 @@ static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -4918,6 +7684,19 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
ref, mask);
}
+static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
+ unsigned vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t value = 0;
+
+ value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
+ value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
+ value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
+ value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+}
+
static void
gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
uint32_t me, uint32_t pipe,
@@ -5241,6 +8020,29 @@ static int gfx_v10_0_kiq_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int gcr_cntl =
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(1) |
+ PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(1);
+
+ /* ACQUIRE_MEM - make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 6));
+ amdgpu_ring_write(ring, 0); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+ amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
+}
+
static const struct amd_ip_funcs gfx_v10_0_ip_funcs = {
.name = "gfx_v10_0",
.early_init = gfx_v10_0_early_init,
@@ -5288,7 +8090,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 8, /* gfx_v10_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v10_0_ring_emit_ib_gfx */
.emit_ib = gfx_v10_0_ring_emit_ib_gfx,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5305,10 +8108,12 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.init_cond_exec = gfx_v10_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v10_0_ring_emit_patch_cond_exec,
.preempt_ib = gfx_v10_0_ring_preempt_ib,
- .emit_tmz = gfx_v10_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v10_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .soft_recovery = gfx_v10_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v10_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -5328,7 +8133,8 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v10_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v10_0_ring_emit_fence x3 for user fence, vm fence */
+ 8, /* gfx_v10_0_emit_mem_sync */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,
.emit_fence = gfx_v10_0_ring_emit_fence,
@@ -5343,6 +8149,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_wreg = gfx_v10_0_ring_emit_wreg,
.emit_reg_wait = gfx_v10_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v10_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
@@ -5429,9 +8236,11 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
+ case CHIP_NAVI12:
+ adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 31f44d05e606..79c52c7a02e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -1914,7 +1914,8 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1950,7 +1951,6 @@ err1:
static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
if (enable) {
WREG32(mmCP_ME_CNTL, 0);
} else {
@@ -1958,10 +1958,6 @@ static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
CP_ME_CNTL__PFP_HALT_MASK |
CP_ME_CNTL__CE_HALT_MASK));
WREG32(mmSCRATCH_UMSK, 0);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
}
udelay(50);
}
@@ -3114,7 +3110,9 @@ static int gfx_v6_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -3136,7 +3134,8 @@ static int gfx_v6_0_sw_init(void *handle)
sprintf(ring->name, "comp_%d.%d.%d", ring->me, ring->pipe, ring->queue);
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP + ring->pipe;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -3466,6 +3465,18 @@ static int gfx_v6_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v6_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v6_0_ip_funcs = {
.name = "gfx_v6_0",
.early_init = gfx_v6_0_early_init,
@@ -3496,7 +3507,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
- 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ 3 + 2 + /* gfx_v6_ring_emit_cntxcntl including vgt flush */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3507,6 +3519,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx = {
.insert_nop = amdgpu_ring_insert_nop,
.emit_cntxcntl = gfx_v6_ring_emit_cntxcntl,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
@@ -3520,7 +3533,8 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
5 + 5 + /* hdp flush / invalidate */
7 + /* gfx_v6_0_ring_emit_pipeline_sync */
SI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
- 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 6, /* gfx_v6_0_ring_emit_ib */
.emit_ib = gfx_v6_0_ring_emit_ib,
.emit_fence = gfx_v6_0_ring_emit_fence,
@@ -3530,6 +3544,7 @@ static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute = {
.test_ib = gfx_v6_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.emit_wreg = gfx_v6_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v6_0_emit_mem_sync,
};
static void gfx_v6_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 733d398c61cc..0cc011f9190d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2364,7 +2364,8 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
WREG32(scratch, 0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -2431,15 +2432,12 @@ err1:
*/
static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
- if (enable) {
+ if (enable)
WREG32(mmCP_ME_CNTL, 0);
- } else {
- WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK | CP_ME_CNTL__PFP_HALT_MASK | CP_ME_CNTL__CE_HALT_MASK));
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
+ else
+ WREG32(mmCP_ME_CNTL, (CP_ME_CNTL__ME_HALT_MASK |
+ CP_ME_CNTL__PFP_HALT_MASK |
+ CP_ME_CNTL__CE_HALT_MASK));
udelay(50);
}
@@ -2700,15 +2698,11 @@ static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
*/
static void gfx_v7_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
- if (enable) {
+ if (enable)
WREG32(mmCP_MEC_CNTL, 0);
- } else {
- WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
- }
+ else
+ WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK |
+ CP_MEC_CNTL__MEC_ME2_HALT_MASK));
udelay(50);
}
@@ -4439,7 +4433,8 @@ static int gfx_v7_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -4511,7 +4506,9 @@ static int gfx_v7_0_sw_init(void *handle)
ring->ring_obj = NULL;
sprintf(ring->name, "gfx");
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -5001,6 +4998,32 @@ static int gfx_v7_0_set_powergating_state(void *handle,
return 0;
}
+static void gfx_v7_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.name = "gfx_v7_0",
.early_init = gfx_v7_0_early_init,
@@ -5033,7 +5056,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
- 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ 3 + 4 + /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v7_0_ring_emit_ib_gfx */
.emit_ib = gfx_v7_0_ring_emit_ib_gfx,
.emit_fence = gfx_v7_0_ring_emit_fence_gfx,
@@ -5048,6 +5072,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v7_ring_emit_cntxcntl,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
.soft_recovery = gfx_v7_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v7_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
@@ -5064,7 +5089,8 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5 + /* hdp invalidate */
7 + /* gfx_v7_0_ring_emit_pipeline_sync */
CIK_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
- 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7 + 7 + 7 + /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7, /* gfx_v7_0_emit_mem_sync_compute */
.emit_ib_size = 7, /* gfx_v7_0_ring_emit_ib_compute */
.emit_ib = gfx_v7_0_ring_emit_ib_compute,
.emit_fence = gfx_v7_0_ring_emit_fence_compute,
@@ -5077,6 +5103,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v7_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v7_0_emit_mem_sync_compute,
};
static void gfx_v7_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fc32586ef80b..1d4128227ffd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -888,7 +888,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1550,7 +1551,8 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -1892,6 +1894,7 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -1911,9 +1914,11 @@ static int gfx_v8_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_RING_PRIO_DEFAULT;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -2017,7 +2022,8 @@ static int gfx_v8_0_sw_init(void *handle)
}
r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
- AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -4120,7 +4126,6 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32(mmCP_ME_CNTL);
if (enable) {
@@ -4131,8 +4136,6 @@ static void gfx_v8_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
}
WREG32(mmCP_ME_CNTL, tmp);
udelay(50);
@@ -4320,14 +4323,10 @@ static int gfx_v8_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32(mmCP_MEC_CNTL, 0);
} else {
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -4437,11 +4436,8 @@ static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *m
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -5619,12 +5615,18 @@ static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
u32 data;
- data = RREG32(mmRLC_SPM_VMID);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(mmRLC_SPM_VMID);
+ else
+ data = RREG32(mmRLC_SPM_VMID);
data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
- WREG32(mmRLC_SPM_VMID, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_NO_KIQ(mmRLC_SPM_VMID, data);
+ else
+ WREG32(mmRLC_SPM_VMID, data);
}
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
@@ -6387,10 +6389,10 @@ static void gfx_v8_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size >> 2) - offset + cur;
}
-static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -6399,9 +6401,9 @@ static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6815,6 +6817,34 @@ static int gfx_v8_0_sq_irq(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v8_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA |
+ PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
+static void gfx_v8_0_emit_mem_sync_compute(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
+ PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_KCACHE_ACTION_ENA |
+ PACKET3_SH_ICACHE_ACTION_ENA |
+ PACKET3_TC_WB_ACTION_ENA); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* poll interval */
+}
+
static const struct amd_ip_funcs gfx_v8_0_ip_funcs = {
.name = "gfx_v8_0",
.early_init = gfx_v8_0_early_init,
@@ -6861,7 +6891,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
12 + 12 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 5, /* SURFACE_SYNC */
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */
.emit_ib = gfx_v8_0_ring_emit_ib_gfx,
.emit_fence = gfx_v8_0_ring_emit_fence_gfx,
@@ -6879,6 +6910,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.patch_cond_exec = gfx_v8_0_ring_emit_patch_cond_exec,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
.soft_recovery = gfx_v8_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v8_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
@@ -6895,7 +6927,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
5 + /* hdp_invalidate */
7 + /* gfx_v8_0_ring_emit_pipeline_sync */
VI_FLUSH_GPU_TLB_NUM_WREG * 5 + 7 + /* gfx_v8_0_ring_emit_vm_flush */
- 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7 + 7 + 7 + /* gfx_v8_0_ring_emit_fence_compute x3 for user fence, vm fence */
+ 7, /* gfx_v8_0_emit_mem_sync_compute */
.emit_ib_size = 7, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_compute,
@@ -6908,6 +6941,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
+ .emit_mem_sync = gfx_v8_0_emit_mem_sync_compute,
};
static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index e6b113ed2f40..711e9dd19705 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -50,18 +50,14 @@
#include "gfx_v9_4.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
#define RLC_SAVE_RESTORE_ADDR_STARTING_OFFSET 0x00000000L
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
#define mmGCEA_PROBE_MAP 0x070c
#define mmGCEA_PROBE_MAP_BASE_IDX 0
@@ -511,8 +507,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSH_MEM_CONFIG, 0x00001000, 0x00001000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_0, 0x0007ffff, 0x00000800),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_CU_1, 0x0007ffff, 0x00000800),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x0000ff87),
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x0000ff8f),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_0, 0x01ffffff, 0x00ffff87),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSPI_RESOURCE_RESERVE_EN_CU_1, 0x01ffffff, 0x00ffff8f),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQC_CONFIG, 0x03000000, 0x020a2000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfffffeef, 0x010b0000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x4a2c0e68),
@@ -963,7 +959,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
case CHIP_RAVEN:
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rv2,
ARRAY_SIZE(golden_settings_gc_9_1_rv2));
@@ -1082,7 +1078,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
gpu_addr = adev->wb.gpu_addr + (index * 4);
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ r = amdgpu_ib_get(adev, NULL, 16,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err1;
@@ -1234,6 +1231,10 @@ struct amdgpu_gfxoff_quirk {
static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
/* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */
{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=207171 */
+ { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
+ /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
{ 0, 0, 0, 0, 0 },
};
@@ -1273,7 +1274,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
case CHIP_VEGA20:
break;
case CHIP_RAVEN:
- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
+ if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
+ (adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
(adev->gfx.rlc_feature_version < 1) ||
@@ -1616,9 +1618,9 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
chip_name = "vega20";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -2118,7 +2120,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
gb_addr_config = RAVEN2_GB_ADDR_CONFIG_GOLDEN;
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
@@ -2195,6 +2197,7 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
int r;
unsigned irq_type;
struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
+ unsigned int hw_prio;
ring = &adev->gfx.compute_ring[ring_id];
@@ -2213,10 +2216,11 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
+ ring->pipe;
-
+ hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
+ AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
/* type-2 packets are deprecated on MEC, use type-3 instead */
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, irq_type);
+ &adev->gfx.eop_irq, irq_type, hw_prio);
if (r)
return r;
@@ -2310,7 +2314,9 @@ static int gfx_v9_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
r = amdgpu_ring_init(adev, ring, 1024,
- &adev->gfx.eop_irq, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP);
+ &adev->gfx.eop_irq,
+ AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -2528,7 +2534,7 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
break;
default:
break;
- };
+ }
}
static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
@@ -2963,8 +2969,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
if (adev->asic_type == CHIP_VEGA12 ||
- (adev->asic_type == CHIP_RAVEN &&
- adev->rev_id >= 8))
+ (adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
}
@@ -3100,16 +3105,11 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (!enable) {
- for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- adev->gfx.gfx_ring[i].sched.ready = false;
- }
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
udelay(50);
}
@@ -3305,15 +3305,11 @@ static int gfx_v9_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v9_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
- int i;
-
if (enable) {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL, 0);
} else {
WREG32_SOC15_RLC(GC, 0, mmCP_MEC_CNTL,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
- for (i = 0; i < adev->gfx.num_compute_rings; i++)
- adev->gfx.compute_ring[i].sched.ready = false;
adev->gfx.kiq.ring.sched.ready = false;
}
udelay(50);
@@ -3383,11 +3379,8 @@ static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *m
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
- ring->has_high_prio = true;
mqd->cp_hqd_queue_priority =
AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
- } else {
- ring->has_high_prio = false;
}
}
}
@@ -4054,13 +4047,18 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
{
signed long r, cnt = 0;
unsigned long flags;
- uint32_t seq;
+ uint32_t seq, reg_val_offs = 0;
+ uint64_t value = 0;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *ring = &kiq->ring;
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
+ pr_err("critical bug! too many kiq readers\n");
+ goto failed_unlock;
+ }
amdgpu_ring_alloc(ring, 32);
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 9 | /* src: register*/
@@ -4070,10 +4068,13 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
- amdgpu_fence_emit_polling(ring, &seq);
+ reg_val_offs * 4));
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r)
+ goto failed_undo;
+
amdgpu_ring_commit(ring);
spin_unlock_irqrestore(&kiq->ring_lock, flags);
@@ -4099,10 +4100,19 @@ static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
if (cnt > MAX_KIQ_REG_TRY)
goto failed_kiq_read;
- return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
- (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+ mb();
+ value = (uint64_t)adev->wb.wb[reg_val_offs] |
+ (uint64_t)adev->wb.wb[reg_val_offs + 1 ] << 32ULL;
+ amdgpu_device_wb_free(adev, reg_val_offs);
+ return value;
+failed_undo:
+ amdgpu_ring_undo(ring);
+failed_unlock:
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read:
+ if (reg_val_offs)
+ amdgpu_device_wb_free(adev, reg_val_offs);
pr_err("failed to read gpu clock\n");
return ~0;
}
@@ -4487,7 +4497,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* allocate an indirect buffer to put the commands in */
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, total_size, &ib);
+ r = amdgpu_ib_get(adev, NULL, total_size,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%d).\n", r);
return r;
@@ -4958,14 +4969,21 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
{
- u32 data;
+ u32 reg, data;
- data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+ reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ data = RREG32_NO_KIQ(reg);
+ else
+ data = RREG32(reg);
data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
- WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ WREG32_SOC15_NO_KIQ(GC, 0, mmRLC_SPM_MC_CNTL, data);
+ else
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
}
static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
@@ -5023,10 +5041,9 @@ static int gfx_v9_0_set_powergating_state(void *handle,
switch (adev->asic_type) {
case CHIP_RAVEN:
case CHIP_RENOIR:
- if (!enable) {
+ if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- }
+
if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
gfx_v9_0_enable_sck_slow_down_on_power_up(adev, true);
gfx_v9_0_enable_sck_slow_down_on_power_down(adev, true);
@@ -5050,12 +5067,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
amdgpu_gfx_off_ctrl(adev, true);
break;
case CHIP_VEGA12:
- if (!enable) {
- amdgpu_gfx_off_ctrl(adev, false);
- cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
- } else {
- amdgpu_gfx_off_ctrl(adev, true);
- }
+ amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
break;
@@ -5426,10 +5438,13 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
}
-static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
+static void gfx_v9_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start,
+ bool secure)
{
+ uint32_t v = secure ? FRAME_TMZ : 0;
+
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
- amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
+ amdgpu_ring_write(ring, v | FRAME_CMD(start ? 0 : 1));
}
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
@@ -5439,8 +5454,6 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
if (amdgpu_sriov_vf(ring->adev))
gfx_v9_0_ring_emit_ce_meta(ring);
- gfx_v9_0_ring_emit_tmz(ring, true);
-
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
/* set load_global_config & load_global_uconfig */
@@ -5491,10 +5504,10 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
}
-static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
+static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg,
+ uint32_t reg_val_offs)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_kiq *kiq = &adev->gfx.kiq;
amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
amdgpu_ring_write(ring, 0 | /* src: register*/
@@ -5503,9 +5516,9 @@ static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
amdgpu_ring_write(ring, reg);
amdgpu_ring_write(ring, 0);
amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
- kiq->reg_val_offs * 4));
+ reg_val_offs * 4));
}
static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
@@ -6406,15 +6419,15 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6426,16 +6439,16 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- vml2_walker_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- vml2_walker_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6446,8 +6459,9 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_2m_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_2m_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
}
@@ -6458,15 +6472,17 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = (data & 0x00006000L) >> 0xd;
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
- atc_l2_cache_4k_mems[i], sec_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "SEC %d\n", i, atc_l2_cache_4k_mems[i],
+ sec_count);
err_data->ce_count += sec_count;
}
ded_count = (data & 0x00018000L) >> 0xf;
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
- atc_l2_cache_4k_mems[i], ded_count);
+ dev_info(adev->dev, "Instance[%d]: SubBlock %s, "
+ "DED %d\n", i, atc_l2_cache_4k_mems[i],
+ ded_count);
err_data->ue_count += ded_count;
}
}
@@ -6479,7 +6495,8 @@ static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_0_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id, uint32_t value,
uint32_t *sec_count, uint32_t *ded_count)
{
@@ -6496,7 +6513,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
gfx_v9_0_ras_fields[i].sec_count_mask) >>
gfx_v9_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], SEC %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
sec_cnt);
@@ -6507,7 +6525,8 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
gfx_v9_0_ras_fields[i].ded_count_mask) >>
gfx_v9_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ dev_info(adev->dev, "GFX SubBlock %s, "
+ "Instance[%d][%d], DED %d\n",
gfx_v9_0_ras_fields[i].name,
se_id, inst_id,
ded_cnt);
@@ -6596,9 +6615,10 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i]));
if (reg_value)
- gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i],
- j, k, reg_value,
- &sec_count, &ded_count);
+ gfx_v9_0_ras_error_count(adev,
+ &gfx_v9_0_edc_counter_regs[i],
+ j, k, reg_value,
+ &sec_count, &ded_count);
}
}
}
@@ -6614,6 +6634,25 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring)
+{
+ const unsigned int cp_coher_cntl =
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(1) |
+ PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(1);
+
+ /* ACQUIRE_MEM -make one or more surfaces valid for use by the subsequent operations */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_ACQUIRE_MEM, 5));
+ amdgpu_ring_write(ring, cp_coher_cntl); /* CP_COHER_CNTL */
+ amdgpu_ring_write(ring, 0xffffffff); /* CP_COHER_SIZE */
+ amdgpu_ring_write(ring, 0xffffff); /* CP_COHER_SIZE_HI */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE */
+ amdgpu_ring_write(ring, 0); /* CP_COHER_BASE_HI */
+ amdgpu_ring_write(ring, 0x0000000A); /* POLL_INTERVAL */
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -6660,7 +6699,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
3 + /* CNTX_CTRL */
5 + /* HDP_INVL */
8 + 8 + /* FENCE x2 */
- 2, /* SWITCH_BUFFER */
+ 2 + /* SWITCH_BUFFER */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_gfx */
.emit_ib = gfx_v9_0_ring_emit_ib_gfx,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6676,11 +6716,12 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = {
.emit_cntxcntl = gfx_v9_ring_emit_cntxcntl,
.init_cond_exec = gfx_v9_0_ring_emit_init_cond_exec,
.patch_cond_exec = gfx_v9_0_ring_emit_patch_cond_exec,
- .emit_tmz = gfx_v9_0_ring_emit_tmz,
+ .emit_frame_cntl = gfx_v9_0_ring_emit_frame_cntl,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v9_0_ring_soft_recovery,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
@@ -6700,7 +6741,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
- 8 + 8 + 8, /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
+ 7, /* gfx_v9_0_emit_mem_sync */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence,
@@ -6715,6 +6757,7 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
+ .emit_mem_sync = gfx_v9_0_emit_mem_sync,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
@@ -6838,7 +6881,7 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index dce945ef21a5..46351db36922 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -732,7 +732,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_walker_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -740,7 +741,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_walker_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -752,14 +754,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
utcl2_router_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
utcl2_router_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -772,7 +776,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_2m_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -780,7 +785,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_2m_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -793,7 +799,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
atc_l2_cache_4k_mems[i], sec_count);
err_data->ce_count += sec_count;
}
@@ -801,7 +808,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL,
DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
atc_l2_cache_4k_mems[i], ded_count);
err_data->ue_count += ded_count;
}
@@ -816,7 +824,8 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
return 0;
}
-static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
+static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t se_id, uint32_t inst_id,
uint32_t value, uint32_t *sec_count,
uint32_t *ded_count)
@@ -833,7 +842,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >>
gfx_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], SEC %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
sec_cnt);
*sec_count += sec_cnt;
@@ -842,7 +852,8 @@ static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg,
ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >>
gfx_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n",
+ dev_info(adev->dev,
+ "GFX SubBlock %s, Instance[%d][%d], DED %d\n",
gfx_v9_4_ras_fields[i].name, se_id, inst_id,
ded_cnt);
*ded_count += ded_cnt;
@@ -876,7 +887,7 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
gfx_v9_4_edc_counter_regs[i]));
if (reg_value)
- gfx_v9_4_ras_error_count(
+ gfx_v9_4_ras_error_count(adev,
&gfx_v9_4_edc_counter_regs[i],
j, k, reg_value, &sec_count,
&ded_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 1a2f18b908fe..6682b843bafe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -80,7 +80,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the
* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 9775eca6fe43..ba2b7ac0c02d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -170,6 +170,9 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+ REG_GET_FIELD(status,
+ GCVM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -369,7 +372,8 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* translation. Avoid this by doing the invalidation from the SDMA
* itself.
*/
- r = amdgpu_job_alloc_with_ib(adev, 16 * 4, &job);
+ r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
+ &job);
if (r)
goto error_alloc;
@@ -423,7 +427,13 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ return -ETIME;
+ }
+
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
@@ -676,17 +686,23 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
*/
static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
{
- /* Could aper size report 0 ? */
- adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
- adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
+ int r;
/* size in MB on si */
adev->gmc.mc_vram_size =
adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
- adev->gmc.visible_vram_size = adev->gmc.aper_size;
+
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_device_resize_fb_bar(adev);
+ if (r)
+ return r;
+ }
+ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
+ adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
/* In case the PCI BAR is larger than the actual amount of vram */
+ adev->gmc.visible_vram_size = adev->gmc.aper_size;
if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index b205039350b6..a75e472b4a81 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -61,17 +61,6 @@ MODULE_FIRMWARE("amdgpu/si58_mc.bin");
#define MC_SEQ_MISC0__MT__HBM 0x60000000
#define MC_SEQ_MISC0__MT__DDR3 0xB0000000
-
-static const u32 crtc_offsets[6] =
-{
- SI_CRTC0_REGISTER_OFFSET,
- SI_CRTC1_REGISTER_OFFSET,
- SI_CRTC2_REGISTER_OFFSET,
- SI_CRTC3_REGISTER_OFFSET,
- SI_CRTC4_REGISTER_OFFSET,
- SI_CRTC5_REGISTER_OFFSET
-};
-
static void gmc_v6_0_mc_stop(struct amdgpu_device *adev)
{
u32 blackout;
@@ -858,7 +847,7 @@ static int gmc_v6_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
if (r) {
- dev_warn(adev->dev, "amdgpu: No suitable DMA available.\n");
+ dev_warn(adev->dev, "No suitable DMA available.\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(44);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 9da9596a3638..bcd4baecfe11 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -762,6 +762,7 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (CIK).
*/
@@ -1019,7 +1020,7 @@ static int gmc_v7_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- pr_warn("amdgpu: No suitable DMA available\n");
+ pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 27d83204fa2b..26976e50e2a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1005,6 +1005,7 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
* @adev: amdgpu_device pointer
* @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
* @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
+ * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value
*
* Print human readable fault information (VI).
*/
@@ -1144,7 +1145,7 @@ static int gmc_v8_0_sw_init(void *handle)
r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
- pr_warn("amdgpu: No suitable DMA available\n");
+ pr_warn("No suitable DMA available\n");
return r;
}
adev->need_swiotlb = drm_need_swiotlb(40);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 8606f877478f..11e93a82131d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -362,6 +362,9 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev,
"VM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
+ dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
+ REG_GET_FIELD(status,
+ VM_L2_PROTECTION_FAULT_STATUS, CID));
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
REG_GET_FIELD(status,
VM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
@@ -438,9 +441,8 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
return ((vmhub == AMDGPU_MMHUB_0 ||
vmhub == AMDGPU_MMHUB_1) &&
(!amdgpu_sriov_vf(adev)) &&
- (!(adev->asic_type == CHIP_RAVEN &&
- adev->rev_id < 0x8 &&
- adev->pdev->device == 0x15d8)));
+ (!(!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+ (adev->apu_flags & AMD_APU_IS_PICASSO))));
}
static bool gmc_v9_0_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
@@ -618,7 +620,13 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
- amdgpu_fence_emit_polling(ring, &seq);
+ r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
+ if (r) {
+ amdgpu_ring_undo(ring);
+ spin_unlock(&adev->gfx.kiq.ring_lock);
+ return -ETIME;
+ }
+
amdgpu_ring_commit(ring);
spin_unlock(&adev->gfx.kiq.ring_lock);
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
index 0debfd9f428c..b10c95cad9a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
@@ -480,7 +480,8 @@ int jpeg_v1_0_sw_init(void *handle)
ring = &adev->jpeg.inst->ring_dec;
sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 6173951db7b4..e67d09cb1b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -106,7 +106,8 @@ static int jpeg_v2_0_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1;
sprintf(ring->name, "jpeg_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst->irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -169,14 +170,11 @@ static int jpeg_v2_0_hw_init(void *handle)
static int jpeg_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->jpeg.inst->ring_dec;
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c04c2078a7c1..713c32560445 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -118,7 +118,8 @@ static int jpeg_v2_5_sw_init(void *handle)
ring->use_doorbell = true;
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + 8 * i;
sprintf(ring->name, "jpeg_dec_%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->jpeg.inst[i].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -267,7 +268,6 @@ static void jpeg_v2_5_disable_clock_gating(struct amdgpu_device* adev, int inst)
data = RREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE);
data &= ~(JPEG_CGC_GATE__JPEG_DEC_MASK
| JPEG_CGC_GATE__JPEG2_DEC_MASK
- | JPEG_CGC_GATE__JPEG_ENC_MASK
| JPEG_CGC_GATE__JMCIF_MASK
| JPEG_CGC_GATE__JRBBM_MASK);
WREG32_SOC15(JPEG, inst, mmJPEG_CGC_GATE, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 396c2a624de0..405767208a4d 100644..100755
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -96,7 +96,7 @@ static void mmhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
- if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
/*
* Raven2 has a HW issue that it is unable to use the vram which
* is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
@@ -690,7 +690,8 @@ static const struct soc15_reg_entry mmhub_v1_0_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2_VG20), 0, 0, 0},
};
-static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
+static int mmhub_v1_0_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
{
uint32_t i;
@@ -704,7 +705,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v1_0_ras_fields[i].sec_count_mask) >>
mmhub_v1_0_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev,
+ "MMHUB SubBlock %s, SEC %d\n",
mmhub_v1_0_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -714,7 +716,8 @@ static int mmhub_v1_0_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v1_0_ras_fields[i].ded_count_mask) >>
mmhub_v1_0_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev,
+ "MMHUB SubBlock %s, DED %d\n",
mmhub_v1_0_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -739,7 +742,8 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v1_0_get_ras_error_count(&mmhub_v1_0_edc_cnt_regs[i],
+ mmhub_v1_0_get_ras_error_count(adev,
+ &mmhub_v1_0_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 37dbe0f2142f..83b453f5d717 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -26,7 +26,7 @@
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
-#define AI_MAILBOX_POLL_FLR_TIMEDOUT 500
+#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -46,7 +46,8 @@ enum idh_event {
IDH_SUCCESS,
IDH_FAIL,
IDH_QUERY_ALIVE,
- IDH_EVENT_MAX
+
+ IDH_TEXT_MESSAGE = 255,
};
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 237fa5e16b7c..ce2bf1fb79ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -30,7 +30,6 @@
#include "navi10_ih.h"
#include "soc15_common.h"
#include "mxgpu_nv.h"
-#include "mxgpu_ai.h"
static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
{
@@ -53,8 +52,7 @@ static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
*/
static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
{
- return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
}
@@ -63,8 +61,7 @@ static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
{
u32 reg;
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0));
+ reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
if (reg != event)
return -ENOENT;
@@ -110,7 +107,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
timeout -= 10;
} while (timeout > 1);
- pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
return -ETIME;
}
@@ -118,7 +114,6 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
enum idh_request req, u32 data1, u32 data2, u32 data3)
{
- u32 reg;
int r;
uint8_t trn;
@@ -137,19 +132,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
}
} while (trn);
- reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0));
- reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0,
- MSGBUF_DATA, req);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0),
- reg);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1),
- data1);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2),
- data2);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3),
- data3);
-
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
+ WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
xgpu_nv_mailbox_set_valid(adev, true);
/* start to poll ack */
@@ -164,23 +150,48 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
int r;
+ enum idh_event event = -1;
xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
- /* start to check msg if request is idh_req_gpu_init_access */
- if (req == IDH_REQ_GPU_INIT_ACCESS ||
- req == IDH_REQ_GPU_FINI_ACCESS ||
- req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ switch (req) {
+ case IDH_REQ_GPU_INIT_ACCESS:
+ case IDH_REQ_GPU_FINI_ACCESS:
+ case IDH_REQ_GPU_RESET_ACCESS:
+ event = IDH_READY_TO_ACCESS_GPU;
+ break;
+ case IDH_REQ_GPU_INIT_DATA:
+ event = IDH_REQ_GPU_INIT_DATA_READY;
+ break;
+ default:
+ break;
+ }
+
+ if (event != -1) {
+ r = xgpu_nv_poll_msg(adev, event);
if (r) {
- pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
- return r;
+ if (req != IDH_REQ_GPU_INIT_DATA) {
+ pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
+ return r;
+ }
+ else /* host doesn't support REQ_GPU_INIT_DATA handshake */
+ adev->virt.req_init_data_ver = 0;
+ } else {
+ if (req == IDH_REQ_GPU_INIT_DATA)
+ {
+ adev->virt.req_init_data_ver =
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
+
+ /* assume V1 in case host doesn't set version number */
+ if (adev->virt.req_init_data_ver < 1)
+ adev->virt.req_init_data_ver = 1;
+ }
}
+
/* Retrieve checksum from mailbox2 */
if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
- RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
- mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2));
+ RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
}
}
@@ -213,6 +224,11 @@ static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
+static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
+{
+ return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -226,11 +242,14 @@ static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 2;
+ else
+ tmp &= ~2;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -282,11 +301,14 @@ static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL));
+ u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
+
+ if (state == AMDGPU_IRQ_STATE_ENABLE)
+ tmp |= 1;
+ else
+ tmp &= ~1;
- tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN,
- (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
- WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp);
+ WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
return 0;
}
@@ -378,6 +400,7 @@ void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
.req_full_gpu = xgpu_nv_request_full_gpu_access,
.rel_full_gpu = xgpu_nv_release_full_gpu_access,
+ .req_init_data = xgpu_nv_request_init_data,
.reset_gpu = xgpu_nv_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_nv_mailbox_trans_msg,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 99b15f6865cb..52605e14a1a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -25,8 +25,32 @@
#define __MXGPU_NV_H__
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000
-#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500
+#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
+#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
+
+enum idh_request {
+ IDH_REQ_GPU_INIT_ACCESS = 1,
+ IDH_REL_GPU_INIT_ACCESS,
+ IDH_REQ_GPU_FINI_ACCESS,
+ IDH_REL_GPU_FINI_ACCESS,
+ IDH_REQ_GPU_RESET_ACCESS,
+ IDH_REQ_GPU_INIT_DATA,
+
+ IDH_LOG_VF_ERROR = 200,
+};
+
+enum idh_event {
+ IDH_CLR_MSG_BUF = 0,
+ IDH_READY_TO_ACCESS_GPU,
+ IDH_FLR_NOTIFICATION,
+ IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
+ IDH_QUERY_ALIVE,
+ IDH_REQ_GPU_INIT_DATA_READY,
+
+ IDH_TEXT_MESSAGE = 255,
+};
extern const struct amdgpu_virt_ops xgpu_nv_virt_ops;
@@ -35,7 +59,21 @@ int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev);
int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev);
void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev);
-#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4)
-#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1)
+#define mmMAILBOX_CONTROL 0xE5E
+
+#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (mmMAILBOX_CONTROL * 4)
+#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE + 1)
+
+#define mmMAILBOX_MSGBUF_TRN_DW0 0xE56
+#define mmMAILBOX_MSGBUF_TRN_DW1 0xE57
+#define mmMAILBOX_MSGBUF_TRN_DW2 0xE58
+#define mmMAILBOX_MSGBUF_TRN_DW3 0xE59
+
+#define mmMAILBOX_MSGBUF_RCV_DW0 0xE5A
+#define mmMAILBOX_MSGBUF_RCV_DW1 0xE5B
+#define mmMAILBOX_MSGBUF_RCV_DW2 0xE5C
+#define mmMAILBOX_MSGBUF_RCV_DW3 0xE5D
+
+#define mmMAILBOX_INT_CNTL 0xE5F
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
index f13dc6cc158f..713ee66a4d3e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h
@@ -43,7 +43,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
- IDH_EVENT_MAX
+
+ IDH_TEXT_MESSAGE = 255
};
extern const struct amdgpu_virt_ops xgpu_vi_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index e08245a446fc..f97857ed3c7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -49,8 +49,48 @@ static void navi10_ih_enable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
adev->irq.ih.enabled = true;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ adev->irq.ih1.enabled = true;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ adev->irq.ih2.enabled = true;
+ }
}
/**
@@ -66,12 +106,61 @@ static void navi10_ih_disable_interrupts(struct amdgpu_device *adev)
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+
/* set rptr, wptr to 0 */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
adev->irq.ih.enabled = false;
adev->irq.ih.rptr = 0;
+
+ if (adev->irq.ih1.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING1,
+ RB_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ adev->irq.ih1.enabled = false;
+ adev->irq.ih1.rptr = 0;
+ }
+
+ if (adev->irq.ih2.ring_size) {
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL_RING2,
+ RB_ENABLE, 0);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ adev->irq.ih2.enabled = false;
+ adev->irq.ih2.rptr = 0;
+ }
+
}
static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl)
@@ -97,6 +186,43 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
return ih_rb_cntl;
}
+static uint32_t navi10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
+static void navi10_ih_reroute_ih(struct amdgpu_device *adev)
+{
+ uint32_t tmp;
+
+ /* Reroute to IH ring 1 for VMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x12);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+
+ /* Reroute IH ring 1 for UMC */
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_INDEX, 0x1B);
+ tmp = RREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+ WREG32_SOC15(OSSSYS, 0, mmIH_CLIENT_CFG_DATA, tmp);
+}
+
/**
* navi10_ih_irq_init - init and enable the interrupt ring
*
@@ -111,7 +237,7 @@ static uint32_t navi10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int navi10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih = &adev->irq.ih;
- u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken;
+ u32 ih_rb_cntl, ih_chicken;
u32 tmp;
/* disable irqs */
@@ -127,6 +253,15 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM,
!!adev->irq.msi_enabled);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) {
+ DRM_ERROR("PSP program IH_RB_CNTL failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
+ }
+ navi10_ih_reroute_ih(adev);
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih->use_bus_addr) {
@@ -137,8 +272,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
}
}
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl);
-
/* set the writeback address whether it's enabled or not */
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
lower_32_bits(ih->wptr_addr));
@@ -149,22 +282,68 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
- ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
- if (ih->use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, OFFSET,
- ih->doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, ENABLE, 1);
- } else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, ENABLE, 0);
- }
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+ navi10_ih_doorbell_rptr(ih));
adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell,
ih->doorbell_index);
+ ih = &adev->irq.ih1;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING1, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING1,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING1,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING1 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+ navi10_ih_doorbell_rptr(ih));
+ }
+
+ ih = &adev->irq.ih2;
+ if (ih->ring_size) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_RING2, ih->gpu_addr >> 8);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
+ (ih->gpu_addr >> 40) & 0xff);
+
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ ih_rb_cntl = navi10_ih_rb_cntl(ih, ih_rb_cntl);
+
+ if (amdgpu_sriov_vf(adev) && adev->asic_type < CHIP_NAVI10) {
+ if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL_RING2,
+ ih_rb_cntl)) {
+ DRM_ERROR("program IH_RB_CNTL_RING2 failed!\n");
+ return -ETIMEDOUT;
+ }
+ } else {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
+ }
+ /* set rptr, wptr to 0 */
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+ navi10_ih_doorbell_rptr(ih));
+ }
+
+
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
CLIENT18_IS_STORM_CLIENT, 1);
@@ -217,7 +396,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_RING2);
+ else
+ BUG();
+
wptr = RREG32_NO_KIQ(reg);
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
goto out;
@@ -233,7 +420,15 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
wptr, ih->rptr, tmp);
ih->rptr = tmp;
- reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ if (ih == &adev->irq.ih)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL);
+ else if (ih == &adev->irq.ih1)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL_RING2);
+ else
+ BUG();
+
tmp = RREG32_NO_KIQ(reg);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(reg, tmp);
@@ -333,8 +528,52 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
navi10_ih_irq_rearm(adev, ih);
- } else
+ } else if (ih == &adev->irq.ih) {
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
+ } else if (ih == &adev->irq.ih1) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, ih->rptr);
+ } else if (ih == &adev->irq.ih2) {
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, ih->rptr);
+ }
+}
+
+/**
+ * navi10_ih_self_irq - dispatch work for ring 1 and 2
+ *
+ * @adev: amdgpu_device pointer
+ * @source: irq source
+ * @entry: IV with WPTR update
+ *
+ * Update the WPTR from the IV and schedule work to handle the entries.
+ */
+static int navi10_ih_self_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t wptr = cpu_to_le32(entry->src_data[0]);
+
+ switch (entry->ring_id) {
+ case 1:
+ *adev->irq.ih1.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih1_work);
+ break;
+ case 2:
+ *adev->irq.ih2.wptr_cpu = wptr;
+ schedule_work(&adev->irq.ih2_work);
+ break;
+ default: break;
+ }
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs navi10_ih_self_irq_funcs = {
+ .process = navi10_ih_self_irq,
+};
+
+static void navi10_ih_set_self_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->irq.self_irq.num_types = 0;
+ adev->irq.self_irq.funcs = &navi10_ih_self_irq_funcs;
}
static int navi10_ih_early_init(void *handle)
@@ -342,6 +581,7 @@ static int navi10_ih_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
navi10_ih_set_interrupt_funcs(adev);
+ navi10_ih_set_self_irq_funcs(adev);
return 0;
}
@@ -351,6 +591,12 @@ static int navi10_ih_sw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool use_bus_addr;
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_IH, 0,
+ &adev->irq.self_irq);
+
+ if (r)
+ return r;
+
/* use gpu virtual address for ih ring
* until ih_checken is programmed to allow
* use bus address for ih ring by psp bl */
@@ -363,6 +609,20 @@ static int navi10_ih_sw_init(void *handle)
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
r = amdgpu_irq_init(adev);
return r;
@@ -373,6 +633,8 @@ static int navi10_ih_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_irq_fini(adev);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih2);
+ amdgpu_ih_ring_fini(adev, &adev->irq.ih1);
amdgpu_ih_ring_fini(adev, &adev->irq.ih);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
index 074a9a09c0a7..a5b60c9a2418 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_sdma_pkt_open.h
@@ -73,6 +73,22 @@
#define SDMA_OP_AQL_COPY 0
#define SDMA_OP_AQL_BARRIER_OR 0
+#define SDMA_GCR_RANGE_IS_PA (1 << 18)
+#define SDMA_GCR_SEQ(x) (((x) & 0x3) << 16)
+#define SDMA_GCR_GL2_WB (1 << 15)
+#define SDMA_GCR_GL2_INV (1 << 14)
+#define SDMA_GCR_GL2_DISCARD (1 << 13)
+#define SDMA_GCR_GL2_RANGE(x) (((x) & 0x3) << 11)
+#define SDMA_GCR_GL2_US (1 << 10)
+#define SDMA_GCR_GL1_INV (1 << 9)
+#define SDMA_GCR_GLV_INV (1 << 8)
+#define SDMA_GCR_GLK_INV (1 << 7)
+#define SDMA_GCR_GLK_WB (1 << 6)
+#define SDMA_GCR_GLM_INV (1 << 5)
+#define SDMA_GCR_GLM_WB (1 << 4)
+#define SDMA_GCR_GL1_RANGE(x) (((x) & 0x3) << 2)
+#define SDMA_GCR_GLI_INV(x) (((x) & 0x3) << 0)
+
/*define for op field*/
#define SDMA_PKT_HEADER_op_offset 0
#define SDMA_PKT_HEADER_op_mask 0x000000FF
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index f3a3fe746222..cbcf04578b99 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -290,23 +290,6 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-static void nbio_v2_3_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_RCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -338,6 +321,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = {
.get_clockgating_state = nbio_v2_3_get_clockgating_state,
.ih_control = nbio_v2_3_ih_control,
.init_registers = nbio_v2_3_init_registers,
- .detect_hw_virt = nbio_v2_3_detect_hw_virt,
.remap_hdp_registers = nbio_v2_3_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 635d9e1fc0a3..7b2fb050407d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -241,23 +241,6 @@ const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
};
-static void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_PF_0_0_RCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
@@ -294,5 +277,4 @@ const struct amdgpu_nbio_funcs nbio_v6_1_funcs = {
.get_clockgating_state = nbio_v6_1_get_clockgating_state,
.ih_control = nbio_v6_1_ih_control,
.init_registers = nbio_v6_1_init_registers,
- .detect_hw_virt = nbio_v6_1_detect_hw_virt,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
index d6cbf26074bc..d34628e113fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c
@@ -280,12 +280,6 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
-static void nbio_v7_0_detect_hw_virt(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
{
@@ -310,6 +304,5 @@ const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
.get_clockgating_state = nbio_v7_0_get_clockgating_state,
.ih_control = nbio_v7_0_ih_control,
.init_registers = nbio_v7_0_init_registers,
- .detect_hw_virt = nbio_v7_0_detect_hw_virt,
.remap_hdp_registers = nbio_v7_0_remap_hdp_registers,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 149d386590df..e629156173d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -185,7 +185,7 @@ static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 4);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
@@ -292,23 +292,6 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
};
-static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev)
-{
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER);
- if (reg & 1)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
-
- if (reg & 0x80000000)
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
-
- if (!reg) {
- if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
{
@@ -340,14 +323,20 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
obj->err_data.ce_count += err_data.ce_count;
if (err_data.ce_count)
- DRM_INFO("%ld correctable errors detected in %s block\n",
- obj->err_data.ce_count, adev->nbio.ras_if->name);
+ dev_info(adev->dev, "%ld correctable hardware "
+ "errors detected in %s block, "
+ "no user action is needed.\n",
+ obj->err_data.ce_count,
+ adev->nbio.ras_if->name);
if (err_data.ue_count)
- DRM_INFO("%ld uncorrectable errors detected in %s block\n",
- obj->err_data.ue_count, adev->nbio.ras_if->name);
+ dev_info(adev->dev, "%ld uncorrectable hardware "
+ "errors detected in %s block\n",
+ obj->err_data.ue_count,
+ adev->nbio.ras_if->name);
- DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
+ dev_info(adev->dev, "RAS controller interrupt triggered "
+ "by NBIF error\n");
/* ras_controller_int is dedicated for nbif ras error,
* not the global interrupt for sync flood
@@ -561,7 +550,6 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = {
.get_clockgating_state = nbio_v7_4_get_clockgating_state,
.ih_control = nbio_v7_4_ih_control,
.init_registers = nbio_v7_4_init_registers,
- .detect_hw_virt = nbio_v7_4_detect_hw_virt,
.remap_hdp_registers = nbio_v7_4_remap_hdp_registers,
.handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring,
.handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 033cbbca2072..6655dd2009b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -351,8 +351,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
struct smu_context *smu = &adev->smu;
if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
ret = smu_baco_enter(smu);
if (ret)
return ret;
@@ -360,8 +358,6 @@ static int nv_asic_reset(struct amdgpu_device *adev)
if (ret)
return ret;
} else {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
ret = nv_asic_mode1_reset(adev);
}
@@ -457,18 +453,19 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
{
int r;
- /* Set IP register base before any HW register access */
- r = nv_reg_base_init(adev);
- if (r)
- return r;
-
adev->nbio.funcs = &nbio_v2_3_funcs;
adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- adev->nbio.funcs->detect_hw_virt(adev);
-
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
adev->virt.ops = &xgpu_nv_virt_ops;
+ /* try send GPU_INIT_DATA request to host */
+ amdgpu_virt_request_init_data(adev);
+ }
+
+ /* Set IP register base before any HW register access */
+ r = nv_reg_base_init(adev);
+ if (r)
+ return r;
switch (adev->asic_type) {
case CHIP_NAVI10:
@@ -501,8 +498,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- !amdgpu_sriov_vf(adev))
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
@@ -552,13 +548,6 @@ static bool nv_need_full_reset(struct amdgpu_device *adev)
return true;
}
-static void nv_get_pcie_usage(struct amdgpu_device *adev,
- uint64_t *count0,
- uint64_t *count1)
-{
- /*TODO*/
-}
-
static bool nv_need_reset_on_init(struct amdgpu_device *adev)
{
#if 0
@@ -633,7 +622,6 @@ static const struct amdgpu_asic_funcs nv_asic_funcs =
.invalidate_hdp = &nv_invalidate_hdp,
.init_doorbell_index = &nv_init_doorbell_index,
.need_full_reset = &nv_need_full_reset,
- .get_pcie_usage = &nv_get_pcie_usage,
.need_reset_on_init = &nv_need_reset_on_init,
.get_pcie_replay_count = &nv_get_pcie_replay_count,
.supports_baco = &nv_asic_supports_baco,
diff --git a/drivers/gpu/drm/amd/amdgpu/nvd.h b/drivers/gpu/drm/amd/amdgpu/nvd.h
index 1de984647dbb..fd6b58243b03 100644
--- a/drivers/gpu/drm/amd/amdgpu/nvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/nvd.h
@@ -256,6 +256,54 @@
#define PACKET3_BLK_CNTX_UPDATE 0x53
#define PACKET3_INCR_UPDT_STATE 0x55
#define PACKET3_ACQUIRE_MEM 0x58
+/* 1. HEADER
+ * 2. COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 2. COHER_SIZE [31:0]
+ * 3. COHER_SIZE_HI [7:0]
+ * 4. COHER_BASE_LO [31:0]
+ * 5. COHER_BASE_HI [23:0]
+ * 7. POLL_INTERVAL [15:0]
+ * 8. GCR_CNTL [18:0]
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLI_INV(x) ((x) << 0)
+ /*
+ * 0:NOP
+ * 1:ALL
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_RANGE(x) ((x) << 2)
+ /*
+ * 0:ALL
+ * 1:reserved
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_WB(x) ((x) << 4)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLM_INV(x) ((x) << 5)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_WB(x) ((x) << 6)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLK_INV(x) ((x) << 7)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GLV_INV(x) ((x) << 8)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL1_INV(x) ((x) << 9)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_US(x) ((x) << 10)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_RANGE(x) ((x) << 11)
+ /*
+ * 0:ALL
+ * 1:VOL
+ * 2:RANGE
+ * 3:FIRST_LAST
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_DISCARD(x) ((x) << 13)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_INV(x) ((x) << 14)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_GL2_WB(x) ((x) << 15)
+#define PACKET3_ACQUIRE_MEM_GCR_CNTL_SEQ(x) ((x) << 16)
+ /*
+ * 0: PARALLEL
+ * 1: FORWARD
+ * 2: REVERSE
+ */
+#define PACKET3_ACQUIRE_MEM_GCR_RANGE_IS_PA (1 << 18)
#define PACKET3_REWIND 0x59
#define PACKET3_INTERRUPT 0x5A
#define PACKET3_GEN_PDEPTE 0x5B
@@ -306,6 +354,7 @@
#define PACKET3_GET_LOD_STATS 0x8E
#define PACKET3_DRAW_MULTI_PREAMBLE 0x8F
#define PACKET3_FRAME_CONTROL 0x90
+# define FRAME_TMZ (1 << 0)
# define FRAME_CMD(x) ((x) << 28)
/*
* x=0: tmz_begin
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 7539104175e8..d7f92634eba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -50,15 +50,14 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RAVEN:
- if (adev->rev_id >= 0x8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -66,22 +65,10 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
goto out;
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
-
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -126,8 +113,6 @@ out:
dev_err(adev->dev,
"psp v10.0: Failed to load firmware \"%s\"\n",
fw_name);
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
}
return err;
@@ -230,129 +215,6 @@ static int psp_v10_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v10_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v10_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v10_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (!ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
-
static int psp_v10_0_mode1_reset(struct psp_context *psp)
{
DRM_INFO("psp mode 1 reset not supported now! \n");
@@ -379,7 +241,6 @@ static const struct psp_funcs psp_v10_0_funcs = {
.ring_create = psp_v10_0_ring_create,
.ring_stop = psp_v10_0_ring_stop,
.ring_destroy = psp_v10_0_ring_destroy,
- .compare_sram_data = psp_v10_0_compare_sram_data,
.mode1_reset = psp_v10_0_mode1_reset,
.ring_get_wptr = psp_v10_0_ring_get_wptr,
.ring_set_wptr = psp_v10_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0afd610a1263..1de89cc3c355 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -75,10 +75,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
const char *chip_name;
char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *sos_hdr;
- const struct psp_firmware_header_v1_1 *sos_hdr_v1_1;
- const struct psp_firmware_header_v1_2 *sos_hdr_v1_2;
- const struct psp_firmware_header_v1_0 *asd_hdr;
const struct ta_firmware_header_v1_0 *ta_hdr;
DRM_DEBUG("\n");
@@ -103,66 +99,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
+ err = psp_init_sos_microcode(psp, chip_name);
if (err)
- goto out;
+ return err;
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
-
- sos_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- amdgpu_ucode_print_psp_hdr(&sos_hdr->header);
-
- switch (sos_hdr->header.header_version_major) {
- case 1:
- adev->psp.sos_fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(sos_hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(sos_hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(sos_hdr->sos_offset_bytes);
- adev->psp.sys_start_addr = (uint8_t *)sos_hdr +
- le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr->sos_offset_bytes);
- if (sos_hdr->header.header_version_minor == 1) {
- sos_hdr_v1_1 = (const struct psp_firmware_header_v1_1 *)adev->psp.sos_fw->data;
- adev->psp.toc_bin_size = le32_to_cpu(sos_hdr_v1_1->toc_size_bytes);
- adev->psp.toc_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->toc_offset_bytes);
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_1->kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_1->kdb_offset_bytes);
- }
- if (sos_hdr->header.header_version_minor == 2) {
- sos_hdr_v1_2 = (const struct psp_firmware_header_v1_2 *)adev->psp.sos_fw->data;
- adev->psp.kdb_bin_size = le32_to_cpu(sos_hdr_v1_2->kdb_size_bytes);
- adev->psp.kdb_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(sos_hdr_v1_2->kdb_offset_bytes);
- }
- break;
- default:
- dev_err(adev->dev,
- "Unsupported psp sos firmware\n");
- err = -EINVAL;
- goto out;
- }
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out1;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out1;
-
- asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
- le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
+ return err;
switch (adev->asic_type) {
case CHIP_VEGA20:
@@ -194,6 +137,8 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
+ if (amdgpu_sriov_vf(adev))
+ break;
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name);
err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev);
if (err) {
@@ -229,15 +174,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
out2:
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
-out1:
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
-out:
- dev_err(adev->dev,
- "psp v11.0: Failed to load firmware \"%s\"\n", fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
-
return err;
}
@@ -283,11 +219,8 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp)
/* Check tOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- if (psp_v11_0_is_sos_alive(psp)) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- }
ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
@@ -319,11 +252,8 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp)
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
- if (psp_v11_0_is_sos_alive(psp)) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (psp_v11_0_is_sos_alive(psp))
return 0;
- }
ret = psp_v11_0_wait_for_bootloader(psp);
if (ret)
@@ -446,13 +376,6 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
return 0;
}
-static bool psp_v11_0_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- return false;
-}
-
static int psp_v11_0_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -460,7 +383,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
@@ -471,7 +394,7 @@ static int psp_v11_0_ring_stop(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
@@ -489,7 +412,7 @@ static int psp_v11_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
ret = psp_v11_0_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n");
@@ -567,138 +490,6 @@ static int psp_v11_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v11_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- if (adev->asic_type < CHIP_NAVI10) {
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- } else {
- *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_ADDR_NV10;
- *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmRLC_GPM_UCODE_DATA_NV10;
- }
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- if (adev->asic_type < CHIP_NAVI10) {
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- } else {
- *sram_addr_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_ADDR_NV10;
- *sram_data_reg_offset = adev->reg_offset[GC_HWIP][0][1] + mmSDMA0_UCODE_DATA_NV10;
- }
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v11_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v11_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static int psp_v11_0_mode1_reset(struct psp_context *psp)
{
int ret;
@@ -733,181 +524,6 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp)
return 0;
}
-/* TODO: Fill in follow functions once PSP firmware interface for XGMI is ready.
- * For now, return success and hack the hive_id so high level code can
- * start testing
- */
-static int psp_v11_0_xgmi_get_topology_info(struct psp_context *psp,
- int number_devices, struct psp_xgmi_topology_info *topology)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
- struct ta_xgmi_cmd_get_topology_info_output *topology_info_output;
- int i;
- int ret;
-
- if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
- return -EINVAL;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- /* Fill in the shared memory with topology information as input */
- topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO;
- topology_info_input->num_nodes = number_devices;
-
- for (i = 0; i < topology_info_input->num_nodes; i++) {
- topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
- topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
- topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
- }
-
- /* Invoke xgmi ta to get the topology information */
- ret = psp_xgmi_invoke(psp, TA_COMMAND_XGMI__GET_GET_TOPOLOGY_INFO);
- if (ret)
- return ret;
-
- /* Read the output topology information from the shared memory */
- topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
- topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
- for (i = 0; i < topology->num_nodes; i++) {
- topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
- topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
- topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
- topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
- }
-
- return 0;
-}
-
-static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
- int number_devices, struct psp_xgmi_topology_info *topology)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
- int i;
-
- if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
- return -EINVAL;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__SET_TOPOLOGY_INFO;
- topology_info_input->num_nodes = number_devices;
-
- for (i = 0; i < topology_info_input->num_nodes; i++) {
- topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
- topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = 1;
- topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
- }
-
- /* Invoke xgmi ta to set topology information */
- return psp_xgmi_invoke(psp, TA_COMMAND_XGMI__SET_TOPOLOGY_INFO);
-}
-
-static int psp_v11_0_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- int ret;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
-
- /* Invoke xgmi ta to get hive id */
- ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
- if (ret)
- return ret;
-
- *hive_id = xgmi_cmd->xgmi_out_message.get_hive_id.hive_id;
-
- return 0;
-}
-
-static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
-{
- struct ta_xgmi_shared_memory *xgmi_cmd;
- int ret;
-
- xgmi_cmd = (struct ta_xgmi_shared_memory*)psp->xgmi_context.xgmi_shared_buf;
- memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
-
- xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
-
- /* Invoke xgmi ta to get the node id */
- ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
- if (ret)
- return ret;
-
- *node_id = xgmi_cmd->xgmi_out_message.get_node_id.node_id;
-
- return 0;
-}
-
-static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
- struct ta_ras_trigger_error_input *info)
-{
- struct ta_ras_shared_memory *ras_cmd;
- int ret;
-
- if (!psp->ras.ras_initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
- ras_cmd->ras_in_message.trigger_error = *info;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret)
- return -EINVAL;
-
- /* If err_event_athub occurs error inject was successful, however
- return status from TA is no long reliable */
- if (amdgpu_ras_intr_triggered())
- return 0;
-
- return ras_cmd->ras_status;
-}
-
-static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
-{
-#if 0
- // not support yet.
- struct ta_ras_shared_memory *ras_cmd;
- int ret;
-
- if (!psp->ras.ras_initialized)
- return -EINVAL;
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
- memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
-
- ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
- ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
-
- ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
- if (ret)
- return -EINVAL;
-
- return ras_cmd->ras_status;
-#else
- return -EINVAL;
-#endif
-}
-
-static int psp_v11_0_rlc_autoload_start(struct psp_context *psp)
-{
- return psp_rlc_autoload_start(psp);
-}
-
static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg)
{
int ret;
@@ -1099,7 +715,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -1111,7 +727,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v11_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
@@ -1203,16 +819,7 @@ static const struct psp_funcs psp_v11_0_funcs = {
.ring_create = psp_v11_0_ring_create,
.ring_stop = psp_v11_0_ring_stop,
.ring_destroy = psp_v11_0_ring_destroy,
- .compare_sram_data = psp_v11_0_compare_sram_data,
.mode1_reset = psp_v11_0_mode1_reset,
- .xgmi_get_topology_info = psp_v11_0_xgmi_get_topology_info,
- .xgmi_set_topology_info = psp_v11_0_xgmi_set_topology_info,
- .xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
- .xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
- .support_vmr_ring = psp_v11_0_support_vmr_ring,
- .ras_trigger_error = psp_v11_0_ras_trigger_error,
- .ras_cure_posion = psp_v11_0_ras_cure_posion,
- .rlc_autoload_start = psp_v11_0_rlc_autoload_start,
.mem_training_init = psp_v11_0_memory_training_init,
.mem_training_fini = psp_v11_0_memory_training_fini,
.mem_training = psp_v11_0_memory_training,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index 58d8b6d732e8..6c9614f77d33 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -45,11 +45,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
- char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *asd_hdr;
-
- DRM_DEBUG("\n");
switch (adev->asic_type) {
case CHIP_RENOIR:
@@ -59,28 +55,7 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out1;
-
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
- if (err)
- goto out1;
-
- asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
- le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
-
- return 0;
-
-out1:
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
-
+ err = psp_init_asd_microcode(psp, chip_name);
return err;
}
@@ -95,11 +70,8 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp)
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (sol_reg)
return 0;
- }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -228,13 +200,6 @@ static int psp_v12_0_ring_init(struct psp_context *psp,
return 0;
}
-static bool psp_v12_0_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev) && psp->sos_fw_version > 0x80045)
- return true;
- return false;
-}
-
static int psp_v12_0_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -243,7 +208,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(psp->adev)) {
/* Write low address of the ring to C2PMSG_102 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg);
@@ -295,7 +260,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
struct amdgpu_device *adev = psp->adev;
/* Write the ring destroy command*/
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
else
@@ -306,7 +271,7 @@ static int psp_v12_0_ring_stop(struct psp_context *psp,
mdelay(20);
/* Wait for response flag (bit 31) */
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
0x80000000, 0x80000000, false);
else
@@ -334,128 +299,6 @@ static int psp_v12_0_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v12_0_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch (ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v12_0_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v12_0_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static int psp_v12_0_mode1_reset(struct psp_context *psp)
{
int ret;
@@ -495,7 +338,7 @@ static uint32_t psp_v12_0_ring_get_wptr(struct psp_context *psp)
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -507,7 +350,7 @@ static void psp_v12_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v12_0_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
} else
@@ -522,7 +365,6 @@ static const struct psp_funcs psp_v12_0_funcs = {
.ring_create = psp_v12_0_ring_create,
.ring_stop = psp_v12_0_ring_stop,
.ring_destroy = psp_v12_0_ring_destroy,
- .compare_sram_data = psp_v12_0_compare_sram_data,
.mode1_reset = psp_v12_0_mode1_reset,
.ring_get_wptr = psp_v12_0_ring_get_wptr,
.ring_set_wptr = psp_v12_0_ring_set_wptr,
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 735c43c7daab..f2e725f72d2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -50,9 +50,6 @@ MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
#define smnMP1_FIRMWARE_FLAGS 0x3010028
-static uint32_t sos_old_versions[] = {1517616, 1510592, 1448594, 1446554};
-
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp);
static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type);
@@ -60,9 +57,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
const char *chip_name;
- char fw_name[30];
int err = 0;
- const struct psp_firmware_header_v1_0 *hdr;
DRM_DEBUG("\n");
@@ -76,55 +71,15 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sos.bin", chip_name);
- err = request_firmware(&adev->psp.sos_fw, fw_name, adev->dev);
- if (err)
- goto out;
-
- err = amdgpu_ucode_validate(adev->psp.sos_fw);
+ err = psp_init_sos_microcode(psp, chip_name);
if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.sos_fw->data;
- adev->psp.sos_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.sos_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.sos_bin_size = le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_bin_size = le32_to_cpu(hdr->header.ucode_size_bytes) -
- le32_to_cpu(hdr->sos_size_bytes);
- adev->psp.sys_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
- adev->psp.sos_start_addr = (uint8_t *)adev->psp.sys_start_addr +
- le32_to_cpu(hdr->sos_offset_bytes);
-
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_asd.bin", chip_name);
- err = request_firmware(&adev->psp.asd_fw, fw_name, adev->dev);
- if (err)
- goto out;
+ return err;
- err = amdgpu_ucode_validate(adev->psp.asd_fw);
+ err = psp_init_asd_microcode(psp, chip_name);
if (err)
- goto out;
-
- hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd_fw_version = le32_to_cpu(hdr->header.ucode_version);
- adev->psp.asd_feature_version = le32_to_cpu(hdr->ucode_feature_version);
- adev->psp.asd_ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
- adev->psp.asd_start_addr = (uint8_t *)hdr +
- le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ return err;
return 0;
-out:
- if (err) {
- dev_err(adev->dev,
- "psp v3.1: Failed to load firmware \"%s\"\n",
- fw_name);
- release_firmware(adev->psp.sos_fw);
- adev->psp.sos_fw = NULL;
- release_firmware(adev->psp.asd_fw);
- adev->psp.asd_fw = NULL;
- }
-
- return err;
}
static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
@@ -168,41 +123,19 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp)
return ret;
}
-static bool psp_v3_1_match_version(struct amdgpu_device *adev, uint32_t ver)
-{
- int i;
-
- if (ver == adev->psp.sos_fw_version)
- return true;
-
- /*
- * Double check if the latest four legacy versions.
- * If yes, it is still the right version.
- */
- for (i = 0; i < ARRAY_SIZE(sos_old_versions); i++) {
- if (sos_old_versions[i] == adev->psp.sos_fw_version)
- return true;
- }
-
- return false;
-}
-
static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
{
int ret;
unsigned int psp_gfxdrv_command_reg = 0;
struct amdgpu_device *adev = psp->adev;
- uint32_t sol_reg, ver;
+ uint32_t sol_reg;
/* Check sOS sign of life register to confirm sys driver and sOS
* are already been loaded.
*/
sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
- if (sol_reg) {
- psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- printk("sos fw version = 0x%x.\n", psp->sos_fw_version);
+ if (sol_reg)
return 0;
- }
/* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
@@ -227,11 +160,6 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp)
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81),
RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81),
0, true);
-
- ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58);
- if (!psp_v3_1_match_version(adev, ver))
- DRM_WARN("SOS version doesn't match\n");
-
return ret;
}
@@ -302,7 +230,7 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
psp_v3_1_reroute_ih(psp);
- if (psp_v3_1_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
ret = psp_v3_1_ring_stop(psp, ring_type);
if (ret) {
DRM_ERROR("psp_v3_1_ring_stop_sriov failed!\n");
@@ -360,34 +288,26 @@ static int psp_v3_1_ring_stop(struct psp_context *psp,
enum psp_ring_type ring_type)
{
int ret = 0;
- unsigned int psp_ring_reg = 0;
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp)) {
- /* Write the Destroy GPCOM ring command to C2PMSG_101 */
- psp_ring_reg = GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, psp_ring_reg);
-
- /* there might be handshake issue which needs delay */
- mdelay(20);
-
- /* Wait for response flag (bit 31) in C2PMSG_101 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
- 0x80000000, 0x80000000, false);
- } else {
- /* Write the ring destroy command to C2PMSG_64 */
- psp_ring_reg = 3 << 16;
- WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, psp_ring_reg);
+ /* Write the ring destroy command*/
+ if (amdgpu_sriov_vf(adev))
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
+ GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING);
+ else
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64,
+ GFX_CTRL_CMD_ID_DESTROY_RINGS);
- /* there might be handshake issue which needs delay */
- mdelay(20);
+ /* there might be handshake issue with hardware which needs delay */
+ mdelay(20);
- /* Wait for response flag (bit 31) in C2PMSG_64 */
- ret = psp_wait_for(psp,
- SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
- 0x80000000, 0x80000000, false);
- }
+ /* Wait for response flag (bit 31) */
+ if (amdgpu_sriov_vf(adev))
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101),
+ 0x80000000, 0x80000000, false);
+ else
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x80000000, false);
return ret;
}
@@ -410,128 +330,6 @@ static int psp_v3_1_ring_destroy(struct psp_context *psp,
return ret;
}
-static int
-psp_v3_1_sram_map(struct amdgpu_device *adev,
- unsigned int *sram_offset, unsigned int *sram_addr_reg_offset,
- unsigned int *sram_data_reg_offset,
- enum AMDGPU_UCODE_ID ucode_id)
-{
- int ret = 0;
-
- switch(ucode_id) {
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SMC:
- *sram_offset = 0;
- *sram_addr_reg_offset = 0;
- *sram_data_reg_offset = 0;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_CP_CE:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_CE_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_PFP:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_PFP_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_ME:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_ME_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC1:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_MEC_ME1_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_CP_MEC2:
- *sram_offset = 0x10000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_HYP_MEC2_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_RLC_G:
- *sram_offset = 0x2000;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_UCODE_DATA);
- break;
-
- case AMDGPU_UCODE_ID_SDMA0:
- *sram_offset = 0x0;
- *sram_addr_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_ADDR);
- *sram_data_reg_offset = SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_UCODE_DATA);
- break;
-
-/* TODO: needs to confirm */
-#if 0
- case AMDGPU_UCODE_ID_SDMA1:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_UVD:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-
- case AMDGPU_UCODE_ID_VCE:
- *sram_offset = ;
- *sram_addr_reg_offset = ;
- break;
-#endif
-
- case AMDGPU_UCODE_ID_MAXIMUM:
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static bool psp_v3_1_compare_sram_data(struct psp_context *psp,
- struct amdgpu_firmware_info *ucode,
- enum AMDGPU_UCODE_ID ucode_type)
-{
- int err = 0;
- unsigned int fw_sram_reg_val = 0;
- unsigned int fw_sram_addr_reg_offset = 0;
- unsigned int fw_sram_data_reg_offset = 0;
- unsigned int ucode_size;
- uint32_t *ucode_mem = NULL;
- struct amdgpu_device *adev = psp->adev;
-
- err = psp_v3_1_sram_map(adev, &fw_sram_reg_val, &fw_sram_addr_reg_offset,
- &fw_sram_data_reg_offset, ucode_type);
- if (err)
- return false;
-
- WREG32(fw_sram_addr_reg_offset, fw_sram_reg_val);
-
- ucode_size = ucode->ucode_size;
- ucode_mem = (uint32_t *)ucode->kaddr;
- while (ucode_size) {
- fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
-
- if (*ucode_mem != fw_sram_reg_val)
- return false;
-
- ucode_mem++;
- /* 4 bytes */
- ucode_size -= 4;
- }
-
- return true;
-}
-
static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -575,20 +373,12 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp)
return 0;
}
-static bool psp_v3_1_support_vmr_ring(struct psp_context *psp)
-{
- if (amdgpu_sriov_vf(psp->adev))
- return true;
-
- return false;
-}
-
static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
{
uint32_t data;
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp))
+ if (amdgpu_sriov_vf(adev))
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
else
data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
@@ -599,7 +389,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
{
struct amdgpu_device *adev = psp->adev;
- if (psp_v3_1_support_vmr_ring(psp)) {
+ if (amdgpu_sriov_vf(adev)) {
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
/* send interrupt to PSP for SRIOV ring write pointer update */
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
@@ -616,10 +406,8 @@ static const struct psp_funcs psp_v3_1_funcs = {
.ring_create = psp_v3_1_ring_create,
.ring_stop = psp_v3_1_ring_stop,
.ring_destroy = psp_v3_1_ring_destroy,
- .compare_sram_data = psp_v3_1_compare_sram_data,
.smu_reload_quirk = psp_v3_1_smu_reload_quirk,
.mode1_reset = psp_v3_1_mode1_reset,
- .support_vmr_ring = psp_v3_1_support_vmr_ring,
.ring_get_wptr = psp_v3_1_ring_get_wptr,
.ring_set_wptr = psp_v3_1_ring_set_wptr,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index 7d509a40076f..5f304d61999e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -355,8 +355,6 @@ static void sdma_v2_4_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -614,7 +612,8 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -874,7 +873,8 @@ static int sdma_v2_4_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1200,7 +1200,8 @@ static void sdma_v2_4_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v2_4_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index b6109a99fc43..c59f6f6f4c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -529,8 +529,6 @@ static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
}
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -886,7 +884,8 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -1158,7 +1157,8 @@ static int sdma_v3_0_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1638,7 +1638,8 @@ static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 5f3a5ee2a3f4..33501c6c7189 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -115,17 +115,21 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_4_1[] = {
@@ -174,6 +178,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -203,6 +208,7 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -222,27 +228,35 @@ static const struct soc15_reg_golden golden_settings_sdma_arct[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA2, 0, mmSDMA2_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA3, 0, mmSDMA3_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA4, 0, mmSDMA4_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA5, 0, mmSDMA5_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA6, 0, mmSDMA6_UTCL1_TIMEOUT, 0xffffffff, 0x00010001),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG, 0x0000773f, 0x00004002),
- SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002)
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002),
+ SOC15_REG_GOLDEN_VALUE(SDMA7, 0, mmSDMA7_UTCL1_TIMEOUT, 0xffffffff, 0x00010001)
};
static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
@@ -472,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
soc15_program_register_sequence(adev,
golden_settings_sdma_rv2,
ARRAY_SIZE(golden_settings_sdma_rv2));
@@ -561,9 +575,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
chip_name = "vega20";
break;
case CHIP_RAVEN:
- if (adev->rev_id >= 8)
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
chip_name = "picasso";
else
chip_name = "raven";
@@ -923,8 +937,6 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = RREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL);
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_GFX_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
@@ -971,8 +983,6 @@ static void sdma_v4_0_page_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_PAGE_IB_CNTL,
IB_ENABLE, 0);
WREG32_SDMA(i, mmSDMA0_PAGE_IB_CNTL, ib_cntl);
-
- sdma[i]->sched.ready = false;
}
}
@@ -1539,7 +1549,8 @@ static int sdma_v4_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -1840,7 +1851,7 @@ static int sdma_v4_0_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
/* doorbell size is 2 dwords, get DWORD offset */
@@ -1848,7 +1859,8 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "sdma%d", i);
r = amdgpu_ring_init(adev, ring, 1024, &adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -1866,7 +1878,8 @@ static int sdma_v4_0_sw_init(void *handle)
sprintf(ring->name, "page%d", i);
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
- AMDGPU_SDMA_IRQ_INSTANCE0 + i);
+ AMDGPU_SDMA_IRQ_INSTANCE0 + i,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -2445,10 +2458,12 @@ static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v4_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index ebfd2cdf4e65..b544baf306f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -88,6 +88,29 @@ static const struct soc15_reg_golden golden_settings_sdma_5[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_UTCL1_PAGE, 0x00ffffff, 0x000c5c00)
};
+static const struct soc15_reg_golden golden_settings_sdma_5_sriov[] = {
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_PAGE_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC2_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC4_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC5_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC6_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+ SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
+};
+
static const struct soc15_reg_golden golden_settings_sdma_nv10[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA0_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSDMA1_RLC3_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -141,9 +164,14 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break;
case CHIP_NAVI12:
- soc15_program_register_sequence(adev,
- golden_settings_sdma_5,
- (const u32)ARRAY_SIZE(golden_settings_sdma_5));
+ if (amdgpu_sriov_vf(adev))
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5_sriov,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5_sriov));
+ else
+ soc15_program_register_sequence(adev,
+ golden_settings_sdma_5,
+ (const u32)ARRAY_SIZE(golden_settings_sdma_5));
soc15_program_register_sequence(adev,
golden_settings_sdma_nv12,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
@@ -382,6 +410,18 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
unsigned vmid = AMDGPU_JOB_GET_VMID(job);
uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid);
+ /* Invalidate L2, because if we don't do it, we might get stale cache
+ * lines from previous IBs.
+ */
+ amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, (SDMA_GCR_GL2_INV |
+ SDMA_GCR_GL2_WB |
+ SDMA_GCR_GLM_INV |
+ SDMA_GCR_GLM_WB) << 16);
+ amdgpu_ring_write(ring, 0xffffff80);
+ amdgpu_ring_write(ring, 0xffff);
+
/* An IB packet must end on a 8 DW boundary--the next dword
* must be on a 8-dword boundary. Our IB packet below is 6
* dwords long, thus add x number of NOPs, such that, in
@@ -502,9 +542,6 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
}
-
- sdma0->sched.ready = false;
- sdma1->sched.ready = false;
}
/**
@@ -529,7 +566,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
*/
static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
{
- u32 f32_cntl, phase_quantum = 0;
+ u32 f32_cntl = 0, phase_quantum = 0;
int i;
if (amdgpu_sdma_phase_quantum) {
@@ -557,9 +594,12 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
}
for (i = 0; i < adev->sdma.num_instances; i++) {
- f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
- AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL,
+ AUTO_CTXSW_ENABLE, enable ? 1 : 0);
+ }
+
if (enable && amdgpu_sdma_phase_quantum) {
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
phase_quantum);
@@ -568,7 +608,8 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
phase_quantum);
}
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl);
}
}
@@ -591,6 +632,9 @@ static void sdma_v5_0_enable(struct amdgpu_device *adev, bool enable)
sdma_v5_0_rlc_stop(adev);
}
+ if (amdgpu_sriov_vf(adev))
+ return;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
f32_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_F32_CNTL));
f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1);
@@ -623,7 +667,8 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
ring = &adev->sdma.instance[i].ring;
wb_offset = (ring->rptr_offs * 4);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL), 0);
/* Set ring buffer size in dwords */
rb_bufsz = order_base_2(ring->ring_size / 4);
@@ -699,26 +744,28 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
/* set minor_ptr_update to 0 after wptr programed */
WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
- /* set utc l1 enable flag always to 1 */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
-
- /* enable MCBP */
- temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
-
- /* Set up RESP_MODE to non-copy addresses */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
- temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
-
- /* program default cache read and write policy */
- temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
- /* clean read policy and write policy bits */
- temp &= 0xFF0FFF;
- temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
- WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* set utc l1 enable flag always to 1 */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, UTC_L1_ENABLE, 1);
+
+ /* enable MCBP */
+ temp = REG_SET_FIELD(temp, SDMA0_CNTL, MIDCMD_PREEMPT_ENABLE, 1);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_CNTL), temp);
+
+ /* Set up RESP_MODE to non-copy addresses */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL));
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, RESP_MODE, 3);
+ temp = REG_SET_FIELD(temp, SDMA0_UTCL1_CNTL, REDO_DELAY, 9);
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_CNTL), temp);
+
+ /* program default cache read and write policy */
+ temp = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE));
+ /* clean read policy and write policy bits */
+ temp &= 0xFF0FFF;
+ temp |= ((CACHE_READ_POLICY_L2__DEFAULT << 12) | (CACHE_WRITE_POLICY_L2__DEFAULT << 14));
+ WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_UTCL1_PAGE), temp);
+ }
if (!amdgpu_sriov_vf(adev)) {
/* unhalt engine */
@@ -948,7 +995,8 @@ static int sdma_v5_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r) {
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
goto err0;
@@ -1224,7 +1272,7 @@ static int sdma_v5_0_sw_init(void *handle)
ring->ring_obj = NULL;
ring->use_doorbell = true;
- DRM_INFO("use_doorbell being set to: [%s]\n",
+ DRM_DEBUG("SDMA %d use_doorbell being set to: [%s]\n", i,
ring->use_doorbell?"true":"false");
ring->doorbell_index = (i == 0) ?
@@ -1236,7 +1284,8 @@ static int sdma_v5_0_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -1387,14 +1436,16 @@ static int sdma_v5_0_set_trap_irq_state(struct amdgpu_device *adev,
{
u32 sdma_cntl;
- u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
- sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
- sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
+ if (!amdgpu_sriov_vf(adev)) {
+ u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+ sdma_v5_0_get_reg_offset(adev, 0, mmSDMA0_CNTL) :
+ sdma_v5_0_get_reg_offset(adev, 1, mmSDMA0_CNTL);
- sdma_cntl = RREG32(reg_offset);
- sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
- state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32(reg_offset, sdma_cntl);
+ sdma_cntl = RREG32(reg_offset);
+ sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_cntl);
+ }
return 0;
}
@@ -1595,7 +1646,7 @@ static const struct amdgpu_ring_funcs sdma_v5_0_ring_funcs = {
SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 6 * 2 +
10 + 10 + 10, /* sdma_v5_0_ring_emit_fence x3 for user fence, vm fence */
- .emit_ib_size = 7 + 6, /* sdma_v5_0_ring_emit_ib */
+ .emit_ib_size = 5 + 7 + 6, /* sdma_v5_0_ring_emit_ib */
.emit_ib = sdma_v5_0_ring_emit_ib,
.emit_fence = sdma_v5_0_ring_emit_fence,
.emit_pipeline_sync = sdma_v5_0_ring_emit_pipeline_sync,
@@ -1655,10 +1706,12 @@ static void sdma_v5_0_set_irq_funcs(struct amdgpu_device *adev)
static void sdma_v5_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) |
- SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR);
+ SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
+ SDMA_PKT_COPY_LINEAR_HEADER_TMZ(tmz ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
index 4d415bfdb42f..153db3f763bc 100644
--- a/drivers/gpu/drm/amd/amdgpu/si.c
+++ b/drivers/gpu/drm/amd/amdgpu/si.c
@@ -1249,12 +1249,6 @@ static int si_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
return 0;
}
-static void si_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- if (is_virtual_machine()) /* passthrough mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
-}
-
static void si_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{
if (!ring || !ring->funcs->emit_wreg) {
@@ -2165,8 +2159,6 @@ static const struct amdgpu_ip_block_version si_common_ip_block =
int si_set_ip_blocks(struct amdgpu_device *adev)
{
- si_detect_hw_virtualization(adev);
-
switch (adev->asic_type) {
case CHIP_VERDE:
case CHIP_TAHITI:
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index 42d5601b6bf3..7d2bbcbe547b 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -124,7 +124,6 @@ static void si_dma_stop(struct amdgpu_device *adev)
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, false);
- ring->sched.ready = false;
}
}
@@ -267,7 +266,8 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
tmp = 0xCAFEDEAD;
adev->wb.wb[index] = cpu_to_le32(tmp);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
+ r = amdgpu_ib_get(adev, NULL, 256,
+ AMDGPU_IB_POOL_DIRECT, &ib);
if (r)
goto err0;
@@ -504,7 +504,8 @@ static int si_dma_sw_init(void *handle)
&adev->sdma.trap_irq,
(i == 0) ?
AMDGPU_SDMA_IRQ_INSTANCE0 :
- AMDGPU_SDMA_IRQ_INSTANCE1);
+ AMDGPU_SDMA_IRQ_INSTANCE1,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -775,7 +776,8 @@ static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
uint64_t src_offset,
uint64_t dst_offset,
- uint32_t byte_count)
+ uint32_t byte_count,
+ bool tmz)
{
ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
1, 0, 0, byte_count);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 0860e85a2d35..c00ba4b23c9a 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -345,26 +345,6 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-#if 0
-static const struct si_dte_data dte_data_tahiti_le =
-{
- { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
- { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
- 0x5,
- 0xAFC8,
- 0x64,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
- { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
- { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
- 85,
- true
-};
-#endif
-
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index a40499d51c93..c7c9e07962b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -564,19 +564,16 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
static int soc15_asic_reset(struct amdgpu_device *adev)
{
/* original raven doesn't have full asic reset */
- if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8)
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
+ !(adev->apu_flags & AMD_APU_IS_RAVEN2))
return 0;
switch (soc15_asic_reset_method(adev)) {
case AMD_RESET_METHOD_BACO:
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
return soc15_asic_baco_reset(adev);
case AMD_RESET_METHOD_MODE2:
return amdgpu_dpm_mode2_reset(adev);
default:
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
return soc15_asic_mode1_reset(adev);
}
}
@@ -712,7 +709,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
adev->df.funcs = &df_v1_7_funcs;
adev->rev_id = soc15_get_rev_id(adev);
- adev->nbio.funcs->detect_hw_virt(adev);
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_ai_virt_ops;
@@ -1134,16 +1130,23 @@ static int soc15_common_early_init(void *handle)
break;
case CHIP_RAVEN:
adev->asic_funcs = &soc15_asic_funcs;
+ if (adev->pdev->device == 0x15dd)
+ adev->apu_flags |= AMD_APU_IS_RAVEN;
+ if (adev->pdev->device == 0x15d8)
+ adev->apu_flags |= AMD_APU_IS_PICASSO;
if (adev->rev_id >= 0x8)
+ adev->apu_flags |= AMD_APU_IS_RAVEN2;
+
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->external_rev_id = adev->rev_id + 0x79;
- else if (adev->pdev->device == 0x15d8)
+ else if (adev->apu_flags & AMD_APU_IS_PICASSO)
adev->external_rev_id = adev->rev_id + 0x41;
else if (adev->rev_id == 1)
adev->external_rev_id = adev->rev_id + 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
- if (adev->rev_id >= 0x8) {
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1161,7 +1164,7 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_VCN_MGCG;
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
- } else if (adev->pdev->device == 0x15d8) {
+ } else if (adev->apu_flags & AMD_APU_IS_PICASSO) {
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1222,11 +1225,12 @@ static int soc15_common_early_init(void *handle)
AMD_CG_SUPPORT_IH_CG |
AMD_CG_SUPPORT_VCN_MGCG |
AMD_CG_SUPPORT_JPEG_MGCG;
- adev->pg_flags = 0;
+ adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x32;
break;
case CHIP_RENOIR:
adev->asic_funcs = &soc15_asic_funcs;
+ adev->apu_flags |= AMD_APU_IS_RENOIR;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index c893c645a4b2..56d02aa690a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -35,6 +35,9 @@
#define RREG32_SOC15(ip, inst, reg) \
RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+#define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
+ RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+
#define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15d.h b/drivers/gpu/drm/amd/amdgpu/soc15d.h
index edfe50821cd9..799925d22fc8 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15d.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15d.h
@@ -253,7 +253,30 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
+/* 1. HEADER
+ * 2. COHER_CNTL [30:0]
+ * 2.1 ENGINE_SEL [31:31]
+ * 3. COHER_SIZE [31:0]
+ * 4. COHER_SIZE_HI [7:0]
+ * 5. COHER_BASE_LO [31:0]
+ * 6. COHER_BASE_HI [23:0]
+ * 7. POLL_INTERVAL [15:0]
+ */
+/* COHER_CNTL fields for CP_COHER_CNTL */
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_NC_ACTION_ENA(x) ((x) << 3)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WC_ACTION_ENA(x) ((x) << 4)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_INV_METADATA_ACTION_ENA(x) ((x) << 5)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_VOL_ACTION_ENA(x) ((x) << 15)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_WB_ACTION_ENA(x) ((x) << 18)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TCL1_ACTION_ENA(x) ((x) << 22)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_TC_ACTION_ENA(x) ((x) << 23)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_CB_ACTION_ENA(x) ((x) << 25)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_DB_ACTION_ENA(x) ((x) << 26)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_ACTION_ENA(x) ((x) << 27)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_VOL_ACTION_ENA(x) ((x) << 28)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_ICACHE_ACTION_ENA(x) ((x) << 29)
+#define PACKET3_ACQUIRE_MEM_CP_COHER_CNTL_SH_KCACHE_WB_ACTION_ENA(x) ((x) << 30)
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
@@ -286,6 +309,7 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_FRAME_CONTROL 0x90
+# define FRAME_TMZ (1 << 0)
# define FRAME_CMD(x) ((x) << 28)
/*
* x=0: tmz_begin
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index ca7d05993ca2..745ed0fba1ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -24,6 +24,8 @@
#ifndef _TA_RAS_IF_H
#define _TA_RAS_IF_H
+#define RAS_TA_HOST_IF_VER 0
+
/* Responses have bit 31 set */
#define RSP_ID_MASK (1U << 31)
#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
@@ -36,18 +38,24 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR,
};
-enum ta_ras_status {
- TA_RAS_STATUS__SUCCESS = 0x00,
- TA_RAS_STATUS__RESET_NEEDED = 0x01,
- TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
- TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
- TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
- TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05,
- TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0x06,
- TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0x07,
- TA_RAS_STATUS__ERROR_TIMEOUT = 0x08,
- TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0x09,
- TA_RAS_STATUS__ERROR_GENERIC = 0x10,
+enum ta_ras_status
+{
+ TA_RAS_STATUS__SUCCESS = 0x00,
+ TA_RAS_STATUS__RESET_NEEDED = 0xA001,
+ TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
+ TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
+ TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0xA004,
+ TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0xA005,
+ TA_RAS_STATUS__ERROR_ASD_READ_WRITE = 0xA006,
+ TA_RAS_STATUS__ERROR_TOGGLE_DF_CSTATE = 0xA007,
+ TA_RAS_STATUS__ERROR_TIMEOUT = 0xA008,
+ TA_RAS_STATUS__ERROR_BLOCK_DISABLED = 0XA009,
+ TA_RAS_STATUS__ERROR_GENERIC = 0xA00A,
+ TA_RAS_STATUS__ERROR_RAS_MMHUB_INIT = 0xA00B,
+ TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
+ TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
+ TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F
};
enum ta_ras_block {
@@ -97,22 +105,39 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc.
};
+struct ta_ras_output_flags
+{
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
+};
+
/* Common input structure for RAS callbacks */
/**********************************************************/
union ta_ras_cmd_input {
struct ta_ras_enable_features_input enable_features;
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
+
+ uint32_t reserve_pad[256];
+};
+
+union ta_ras_cmd_output
+{
+ struct ta_ras_output_flags flags;
+
+ uint32_t reserve_pad[256];
};
/* Shared Memory structures */
/**********************************************************/
struct ta_ras_shared_memory {
- uint32_t cmd_id;
- uint32_t resp_id;
- enum ta_ras_status ras_status;
- uint32_t reserved;
- union ta_ras_cmd_input ras_in_message;
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ uint32_t ras_status;
+ uint32_t if_version;
+ union ta_ras_cmd_input ras_in_message;
+ union ta_ras_cmd_output ras_out_message;
};
#endif // TL_RAS_IF_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 14d346321a5f..418cf097c918 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -56,24 +56,43 @@ const uint32_t
static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev)
{
- WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+ rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 1);
+
+ WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
}
static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev)
{
- WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
+
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
+ mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
+
+ rsmu_umc_val = REG_SET_FIELD(rsmu_umc_val,
+ RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN, 0);
+
+ WREG32_PCIE(rsmu_umc_addr * 4, rsmu_umc_val);
}
static uint32_t umc_v6_1_get_umc_index_mode_state(struct amdgpu_device *adev)
{
- uint32_t rsmu_umc_index;
+ uint32_t rsmu_umc_addr, rsmu_umc_val;
- rsmu_umc_index = RREG32_SOC15(RSMU, 0,
+ rsmu_umc_addr = SOC15_REG_OFFSET(RSMU, 0,
mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU);
+ rsmu_umc_val = RREG32_PCIE(rsmu_umc_addr * 4);
- return REG_GET_FIELD(rsmu_umc_index,
+ return REG_GET_FIELD(rsmu_umc_val,
RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU,
RSMU_UMC_INDEX_MODE_EN);
}
@@ -85,6 +104,81 @@ static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev,
return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst;
}
+static void umc_v6_1_clear_error_count_per_channel(struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_err_cnt_addr;
+ uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr;
+
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ /* UMC 6_1_2 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCntSel_ARCT);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCnt_ARCT);
+ } else {
+ /* UMC 6_1_1 registers */
+ ecc_err_cnt_sel_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCntSel);
+ ecc_err_cnt_addr =
+ SOC15_REG_OFFSET(UMC, 0,
+ mmUMCCH0_0_EccErrCnt);
+ }
+
+ /* select the lower chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 0);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear lower chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_1_CE_CNT_INIT);
+
+ /* select the higher chip */
+ ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr +
+ umc_reg_offset) * 4);
+ ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel,
+ UMCCH0_0_EccErrCntSel,
+ EccErrCntCsSel, 1);
+ WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4,
+ ecc_err_cnt_sel);
+
+ /* clear higher chip error count */
+ WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4,
+ UMC_V6_1_CE_CNT_INIT);
+}
+
+static void umc_v6_1_clear_error_count(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+ uint32_t rsmu_umc_index_state =
+ umc_v6_1_get_umc_index_mode_state(adev);
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_disable_umc_index_mode(adev);
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_6_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+
+ umc_v6_1_clear_error_count_per_channel(adev,
+ umc_reg_offset);
+ }
+
+ if (rsmu_umc_index_state)
+ umc_v6_1_enable_umc_index_mode(adev);
+}
+
static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
uint32_t umc_reg_offset,
unsigned long *error_count)
@@ -117,23 +211,21 @@ static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev,
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 0);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
- /* clear the lower chip err count */
- WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* select the higher chip and check the err counter */
ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel,
EccErrCntCsSel, 1);
WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel);
+
ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4);
*error_count +=
(REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) -
UMC_V6_1_CE_CNT_INIT);
- /* clear the higher chip err count */
- WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT);
/* check for SRAM correctable error
MCUMC_STATUS is a 64 bit register */
@@ -209,6 +301,8 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
+
+ umc_v6_1_clear_error_count(adev);
}
static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 82abd8e728ab..3cafba726587 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -118,7 +118,8 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -210,13 +211,10 @@ done:
static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v4_2_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 0fa8aae2d78e..a566ff926e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -116,7 +116,8 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -208,13 +209,10 @@ done:
static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v5_0_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index e0aadcaf6c8b..0a880bc101b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -216,7 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -279,7 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -416,7 +418,8 @@ static int uvd_v6_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -428,7 +431,9 @@ static int uvd_v6_0_sw_init(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
ring = &adev->uvd.inst->ring_enc[i];
sprintf(ring->name, "uvd_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -535,13 +540,10 @@ done:
static int uvd_v6_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->uvd.inst->ring;
if (RREG32(mmUVD_STATUS) != 0)
uvd_v6_0_stop(adev);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 0995378d8263..7a55457e6f9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -224,7 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -286,7 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
uint64_t addr;
int i, r;
- r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
+ r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
+ AMDGPU_IB_POOL_DIRECT, &job);
if (r)
return r;
@@ -450,7 +452,9 @@ static int uvd_v7_0_sw_init(void *handle)
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd_%d", ring->me);
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -469,7 +473,9 @@ static int uvd_v7_0_sw_init(void *handle)
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.uvd_ring2_3 * 2 + 1;
}
- r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->uvd.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -598,7 +604,6 @@ done:
static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (!amdgpu_sriov_vf(adev))
uvd_v7_0_stop(adev);
@@ -607,12 +612,6 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
- if (adev->uvd.harvest_config & (1 << i))
- continue;
- adev->uvd.inst[i].ring.sched.ready = false;
- }
-
return 0;
}
@@ -1694,7 +1693,7 @@ static int uvd_v7_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
+ bool enable = (state == AMD_CG_STATE_GATE);
uvd_v7_0_set_bypass_mode(adev, enable);
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index b6837fcfdba7..0e2945baf0f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -434,7 +434,8 @@ static int vce_v2_0_sw_init(void *handle)
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512,
- &adev->vce.irq, 0);
+ &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 217db187207c..6d9108fa22e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -442,7 +442,8 @@ static int vce_v3_0_sw_init(void *handle)
for (i = 0; i < adev->vce.num_rings; i++) {
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 3fd102efb7af..a0fb119240f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -476,7 +476,8 @@ static int vce_v4_0_sw_init(void *handle)
else
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
- r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -539,7 +540,6 @@ static int vce_v4_0_hw_init(void *handle)
static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
@@ -549,9 +549,6 @@ static int vce_v4_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->vce.num_rings; i++)
- adev->vce.ring[i].sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 09b0572b838d..1ad79155ed00 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -127,7 +127,8 @@ static int vcn_v1_0_sw_init(void *handle)
ring = &adev->vcn.inst->ring_dec;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -145,7 +146,8 @@ static int vcn_v1_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -227,14 +229,11 @@ done:
static int vcn_v1_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
RREG32_SOC15(VCN, 0, mmUVD_STATUS))
vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index ec8091a661df..90ed773695ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -92,6 +92,7 @@ static int vcn_v2_0_sw_init(void *handle)
struct amdgpu_ring *ring;
int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared;
/* VCN DEC TRAP */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
@@ -133,7 +134,8 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.vcn.vcn_ring0_1 << 1;
sprintf(ring->name, "vcn_dec");
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -163,7 +165,8 @@ static int vcn_v2_0_sw_init(void *handle)
else
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
@@ -174,6 +177,8 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
+ fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
return 0;
}
@@ -188,6 +193,9 @@ static int vcn_v2_0_sw_fini(void *handle)
{
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
+
+ fw_shared->present_flag_0 = 0;
amdgpu_virt_free_mm_table(adev);
@@ -223,6 +231,10 @@ static int vcn_v2_0_hw_init(void *handle)
if (r)
goto done;
+ //Disable vcn decode for sriov
+ if (amdgpu_sriov_vf(adev))
+ ring->sched.ready = false;
+
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
r = amdgpu_ring_test_helper(ring);
@@ -248,21 +260,12 @@ done:
static int vcn_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
- int i;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
vcn_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
- ring->sched.ready = false;
-
- for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- ring = &adev->vcn.inst->ring_enc[i];
- ring->sched.ready = false;
- }
-
return 0;
}
@@ -359,6 +362,15 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ /* non-cache window */
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
+
WREG32_SOC15(UVD, 0, mmUVD_GFX10_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
}
@@ -442,13 +454,16 @@ static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirec
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(0, SOC15_DPG_MODE_OFFSET_2_0(
@@ -773,6 +788,7 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
@@ -872,6 +888,12 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
+
/* set the write pointer delay */
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR_CNTL, 0);
@@ -894,11 +916,16 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
return 0;
}
static int vcn_v2_0_start(struct amdgpu_device *adev)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec;
uint32_t rb_bufsz, tmp;
uint32_t lmi_swap_cntl;
@@ -1033,6 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
@@ -1045,20 +1073,25 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
ring->wptr = RREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR);
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[0];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[1];
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
return 0;
}
@@ -1180,6 +1213,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
@@ -1189,23 +1223,38 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
/* Restore */
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[0];
+ ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst->ring_enc[1];
+ ring->wptr = 0;
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_LO2, ring->gpu_addr);
WREG32_SOC15(UVD, 0, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
WREG32_SOC15(UVD, 0, mmUVD_RB_SIZE2, ring->ring_size / 4);
WREG32_SOC15(UVD, 0, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
WREG32_SOC15(UVD, 0, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_WPTR,
RREG32_SOC15(UVD, 0, mmUVD_SCRATCH2) & 0x7FFFFFFF);
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(UVD, 0, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
SOC15_WAIT_ON_RREG(UVD, 0, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON,
@@ -1796,7 +1845,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
- struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
struct mmsch_v2_0_cmd_end end = { {0} };
struct mmsch_v2_0_init_header *header;
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
@@ -1806,8 +1854,6 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type =
MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
- direct_poll.cmd_header.command_type =
- MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index c6363f5ad564..3c6eafb62ee6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -86,7 +86,7 @@ static int vcn_v2_5_early_init(void *handle)
adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
adev->vcn.harvest_config |= 1 << i;
}
@@ -165,6 +165,8 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
+ volatile struct amdgpu_fw_shared *fw_shared;
+
if (adev->vcn.harvest_config & (1 << j))
continue;
adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
@@ -175,15 +177,15 @@ static int vcn_v2_5_sw_init(void *handle)
adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(UVD, j, mmUVD_SCRATCH9);
+ adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA0);
+ adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_DATA1);
+ adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(UVD, j, mmUVD_GPCOM_VCPU_CMD);
+ adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
- adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(UVD, j, mmUVD_NO_OP);
+ adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
ring = &adev->vcn.inst[j].ring_dec;
ring->use_doorbell = true;
@@ -191,7 +193,8 @@ static int vcn_v2_5_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
sprintf(ring->name, "vcn_dec_%d", j);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
+ 0, AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
@@ -203,10 +206,15 @@ static int vcn_v2_5_sw_init(void *handle)
(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
- r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0);
+ r = amdgpu_ring_init(adev, ring, 512,
+ &adev->vcn.inst[j].irq, 0,
+ AMDGPU_RING_PRIO_DEFAULT);
if (r)
return r;
}
+
+ fw_shared = adev->vcn.inst[j].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
}
if (amdgpu_sriov_vf(adev)) {
@@ -230,8 +238,16 @@ static int vcn_v2_5_sw_init(void *handle)
*/
static int vcn_v2_5_sw_fini(void *handle)
{
- int r;
+ int i, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ volatile struct amdgpu_fw_shared *fw_shared;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ }
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
@@ -308,25 +324,16 @@ done:
static int vcn_v2_5_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct amdgpu_ring *ring;
- int i, j;
+ int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- ring = &adev->vcn.inst[i].ring_dec;
if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
-
- ring->sched.ready = false;
-
- for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
- ring = &adev->vcn.inst[i].ring_enc[j];
- ring->sched.ready = false;
- }
}
return 0;
@@ -392,38 +399,47 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
continue;
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
offset = 0;
} else {
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET0,
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE0, size);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
/* cache window 1: stack */
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
/* cache window 2: context */
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
- WREG32_SOC15(UVD, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
+
+ /* non-cache window */
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
+ lower_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
+ upper_32_bits(adev->vcn.inst[i].fw_shared_gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
+ WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
}
}
@@ -436,88 +452,91 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
}
offset = 0;
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
offset = size;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET0),
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
}
if (!indirect)
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
else
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
/* cache window 1: stack */
if (!indirect) {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
} else {
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
}
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
/* cache window 2: context */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
/* non-cache window */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect);
+ VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
+ AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -671,19 +690,19 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__MMSCH_MODE_MASK);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
+ VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
/* turn off clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
+ VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
/* turn on SUVD clock gating */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
+ VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
/* turn on sw mode in UVD_SUVD_CGC_CTRL */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
+ VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
}
/**
@@ -750,17 +769,18 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
struct amdgpu_ring *ring;
uint32_t rb_bufsz, tmp;
/* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 1,
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* enable dynamic power gating mode */
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_POWER_STATUS, tmp);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
if (indirect)
adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t*)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
@@ -773,11 +793,11 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* disable master interupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
+ VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
/* setup mmUVD_LMI_CTRL */
tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
@@ -789,28 +809,28 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
0x00100000L);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
+ VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_CNTL),
+ VCN, 0, mmUVD_MPC_CNTL),
0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUXA0),
+ VCN, 0, mmUVD_MPC_SET_MUXA0),
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUXB0),
+ VCN, 0, mmUVD_MPC_SET_MUXB0),
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MPC_SET_MUX),
+ VCN, 0, mmUVD_MPC_SET_MUX),
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
@@ -818,26 +838,26 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
+ VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
+ VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
/* enable LMI MC and UMC channels */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
+ VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
/* unblock VCPU register access */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
+ VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
+ VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
/* enable master interrupt */
WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0(
- UVD, 0, mmUVD_MASTINT_EN),
+ VCN, 0, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
if (indirect)
@@ -853,30 +873,41 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
+
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* set the write pointer delay */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
/* set the wb address */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
(upper_32_bits(ring->gpu_addr) >> 2));
/* programm the RB_BASE for ring buffer */
- WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
- WREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2, 0);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
- ring->wptr = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
+ ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
return 0;
}
@@ -898,12 +929,12 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* disable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
/* set uvd status busy */
- tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
- WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
+ WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
}
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
@@ -916,44 +947,44 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << i))
continue;
/* enable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
/* disable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* setup mmUVD_LMI_CTRL */
- tmp = RREG32_SOC15(UVD, i, mmUVD_LMI_CTRL);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
tmp &= ~0xff;
- WREG32_SOC15(UVD, i, mmUVD_LMI_CTRL, tmp | 0x8|
+ WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
/* setup mmUVD_MPC_CNTL */
- tmp = RREG32_SOC15(UVD, i, mmUVD_MPC_CNTL);
+ tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
/* setup UVD_MPC_SET_MUXA0 */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXA0,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
/* setup UVD_MPC_SET_MUXB0 */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUXB0,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
/* setup mmUVD_MPC_SET_MUX */
- WREG32_SOC15(UVD, i, mmUVD_MPC_SET_MUX,
+ WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
@@ -962,30 +993,31 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
vcn_v2_5_mc_resume(adev);
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
if (adev->vcn.harvest_config & (1 << i))
continue;
/* VCN global tiling registers */
- WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
- WREG32_SOC15(UVD, i, mmUVD_GFX8_ADDR_CONFIG,
+ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
adev->gfx.config.gb_addr_config);
/* enable LMI MC and UMC channels */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_LMI_CTRL2), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
/* unblock VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
for (k = 0; k < 10; ++k) {
uint32_t status;
for (j = 0; j < 100; ++j) {
- status = RREG32_SOC15(UVD, i, mmUVD_STATUS);
+ status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
if (status & 2)
break;
if (amdgpu_emu_mode == 1)
@@ -998,11 +1030,11 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
break;
DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~UVD_VCPU_CNTL__BLK_RST_MASK);
mdelay(10);
@@ -1015,15 +1047,15 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
}
/* enable master interrupt */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_MASTINT_EN),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK,
~UVD_MASTINT_EN__VCPU_EN_MASK);
/* clear the busy bit of VCN_STATUS */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_VMID, 0);
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
ring = &adev->vcn.inst[i].ring_dec;
/* force RBC into idle state */
@@ -1033,33 +1065,40 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, tmp);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
+ fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
/* programm the RB_BASE for ring buffer */
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
lower_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
+ WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
upper_32_bits(ring->gpu_addr));
/* Initialize the ring buffer's read and write pointers */
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR, 0);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
- ring->wptr = RREG32_SOC15(UVD, i, mmUVD_RBC_RB_RPTR);
- WREG32_SOC15(UVD, i, mmUVD_RBC_RB_WPTR,
+ ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
+ WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
lower_32_bits(ring->wptr));
- ring = &adev->vcn.inst[i].ring_enc[0];
- WREG32_SOC15(UVD, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
+ ring = &adev->vcn.inst[i].ring_enc[0];
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[i].ring_enc[1];
- WREG32_SOC15(UVD, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
}
return 0;
@@ -1079,33 +1118,33 @@ static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
* memory descriptor location
*/
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
/* 2, update vmid of descriptor */
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
/* use domain0 for MM scheduler */
data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
/* 3, notify mmsch about the size of this descriptor */
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
/* 4, set resp to zero */
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
/*
* 5, kick off the initialization and wait until
* VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
*/
- WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+ WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop = 10;
while ((data & 0x10000002) != 0x10000002) {
udelay(100);
- data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
loop--;
if (!loop)
break;
@@ -1128,14 +1167,12 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
uint32_t table_size = 0;
struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
- struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } };
struct mmsch_v1_0_cmd_end end = { { 0 } };
uint32_t *init_table = adev->virt.mm_table.cpu_addr;
struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
- direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING;
end.cmd_header.command_type = MMSCH_COMMAND__END;
header->version = MMSCH_VERSION;
@@ -1150,93 +1187,93 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
table_size = 0;
MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
} else {
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr));
offset = size;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
}
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
size);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
AMDGPU_VCN_STACK_SIZE);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
AMDGPU_VCN_STACK_SIZE));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
0);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
AMDGPU_VCN_CONTEXT_SIZE);
ring = &adev->vcn.inst[i].ring_enc[0];
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
upper_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
ring->ring_size / 4);
ring = &adev->vcn.inst[i].ring_dec;
ring->wptr = 0;
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
lower_32_bits(ring->gpu_addr));
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i,
+ SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
upper_32_bits(ring->gpu_addr));
@@ -1248,7 +1285,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
MMSCH_V1_0_INSERT_DIRECT_WT(
- SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+ SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
/* add end packet */
memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
@@ -1269,24 +1306,24 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
uint32_t tmp;
/* Wait for power status to be 1 */
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* wait for read ptr to be equal to write ptr */
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF, ret_code);
- tmp = RREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
+ tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF, ret_code);
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
/* disable dynamic power gating mode */
- WREG32_P(SOC15_REG_OFFSET(UVD, inst_idx, mmUVD_POWER_STATUS), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
return 0;
@@ -1330,17 +1367,17 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
return r;
/* block VCPU register access */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_RB_ARB_CTRL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
/* reset VCPU */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
UVD_VCPU_CNTL__BLK_RST_MASK,
~UVD_VCPU_CNTL__BLK_RST_MASK);
/* disable VCPU clock */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CNTL), 0,
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
~(UVD_VCPU_CNTL__CLK_EN_MASK));
/* clear status */
@@ -1349,7 +1386,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev)
vcn_v2_5_enable_clock_gating(adev);
/* enable register anti-hang mechanism */
- WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS),
+ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
}
@@ -1365,55 +1402,69 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
{
struct amdgpu_ring *ring;
uint32_t reg_data = 0;
- int ret_code;
+ int ret_code = 0;
/* pause/unpause if state is changed */
if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
- reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
+ reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
- ret_code = 0;
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
if (!ret_code) {
+ volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
+
/* pause DPG */
reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
/* wait for ACK */
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_DPG_PAUSE,
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, ret_code);
+ /* Stall DPG before WPTR/RPTR reset */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
+ ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
/* Restore */
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[0];
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
-
+ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
ring = &adev->vcn.inst[inst_idx].ring_enc[1];
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
- WREG32_SOC15(UVD, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
-
- WREG32_SOC15(UVD, inst_idx, mmUVD_RBC_RB_WPTR,
- RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
-
- SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
+ ring->wptr = 0;
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
+
+ /* Unstall DPG */
+ WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
+ 0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
+
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
- /* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
- WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
+ SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
+ UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
@@ -1432,7 +1483,7 @@ static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
}
/**
@@ -1449,7 +1500,7 @@ static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
}
/**
@@ -1463,15 +1514,11 @@ static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
- WREG32_SOC15(UVD, ring->me, mmUVD_SCRATCH2,
- lower_32_bits(ring->wptr) | 0x80000000);
-
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
}
}
@@ -1517,9 +1564,9 @@ static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_RPTR2);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
}
/**
@@ -1537,12 +1584,12 @@ static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
} else {
if (ring->use_doorbell)
return adev->wb.wb[ring->wptr_offs];
else
- return RREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2);
+ return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
}
}
@@ -1562,14 +1609,14 @@ static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
}
} else {
if (ring->use_doorbell) {
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr);
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
} else {
- WREG32_SOC15(UVD, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
+ WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 78b35901643b..af8986a55354 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -448,27 +448,6 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
return true;
}
-static void vi_detect_hw_virtualization(struct amdgpu_device *adev)
-{
- uint32_t reg = 0;
-
- if (adev->asic_type == CHIP_TONGA ||
- adev->asic_type == CHIP_FIJI) {
- reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
- /* bit0: 0 means pf and 1 means vf */
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
- /* bit31: 0 means disable IOV and 1 means enable */
- if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
- adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
- }
-
- if (reg == 0) {
- if (is_virtual_machine()) /* passthrough mode exclus sr-iov mode */
- adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
- }
-}
-
static const struct amdgpu_allowed_register_entry vi_allowed_read_registers[] = {
{mmGRBM_STATUS},
{mmGRBM_STATUS2},
@@ -765,8 +744,6 @@ static int vi_asic_reset(struct amdgpu_device *adev)
int r;
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
- if (!adev->in_suspend)
- amdgpu_inc_vram_lost(adev);
r = amdgpu_dpm_baco_reset(adev);
} else {
r = vi_asic_pci_config_reset(adev);
@@ -1730,9 +1707,6 @@ static const struct amdgpu_ip_block_version vi_common_ip_block =
int vi_set_ip_blocks(struct amdgpu_device *adev)
{
- /* in early init stage, vbios code won't work */
- vi_detect_hw_virtualization(adev);
-
if (amdgpu_sriov_vf(adev))
adev->virt.ops = &xgpu_vi_virt_ops;
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index 19ddd2312e00..7a01e6133798 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -332,7 +332,7 @@
# define PACKET3_DMA_DATA_CMD_SAIC (1 << 28)
# define PACKET3_DMA_DATA_CMD_DAIC (1 << 29)
# define PACKET3_DMA_DATA_CMD_RAW_WAIT (1 << 30)
-#define PACKET3_AQUIRE_MEM 0x58
+#define PACKET3_ACQUIRE_MEM 0x58
#define PACKET3_REWIND 0x59
#define PACKET3_LOAD_UCONFIG_REG 0x5E
#define PACKET3_LOAD_SH_REG 0x5F
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 0ec5f25adf56..cf0017f4d9d5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -215,6 +215,7 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
}
q_properties->is_interop = false;
+ q_properties->is_gws = false;
q_properties->queue_percent = args->queue_percentage;
q_properties->priority = args->queue_priority;
q_properties->queue_address = args->ring_base_address;
@@ -1322,6 +1323,10 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
goto err_free;
}
+ /* Update the VRAM usage count */
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM)
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + args->size);
+
mutex_unlock(&p->mutex);
args->handle = MAKE_HANDLE(args->gpu_id, idr_handle);
@@ -1337,7 +1342,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return err;
@@ -1351,6 +1356,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
void *mem;
struct kfd_dev *dev;
int ret;
+ uint64_t size = 0;
dev = kfd_device_by_id(GET_GPU_ID(args->handle));
if (!dev)
@@ -1373,7 +1379,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
}
ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd,
- (struct kgd_mem *)mem);
+ (struct kgd_mem *)mem, &size);
/* If freeing the buffer failed, leave the handle in place for
* clean-up during process tear-down.
@@ -1382,6 +1388,8 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
kfd_process_device_remove_obj_handle(
pdd, GET_IDR_HANDLE(args->handle));
+ WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
+
err_unlock:
mutex_unlock(&p->mutex);
return ret;
@@ -1584,6 +1592,45 @@ copy_from_user_failed:
return err;
}
+static int kfd_ioctl_alloc_queue_gws(struct file *filep,
+ struct kfd_process *p, void *data)
+{
+ int retval;
+ struct kfd_ioctl_alloc_queue_gws_args *args = data;
+ struct queue *q;
+ struct kfd_dev *dev;
+
+ mutex_lock(&p->mutex);
+ q = pqm_get_user_queue(&p->pqm, args->queue_id);
+
+ if (q) {
+ dev = q->device;
+ } else {
+ retval = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (!dev->gws) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
+ retval = -ENODEV;
+ goto out_unlock;
+ }
+
+ retval = pqm_set_gws(&p->pqm, args->queue_id, args->num_gws ? dev->gws : NULL);
+ mutex_unlock(&p->mutex);
+
+ args->first_gws = 0;
+ return retval;
+
+out_unlock:
+ mutex_unlock(&p->mutex);
+ return retval;
+}
+
static int kfd_ioctl_get_dmabuf_info(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -1687,7 +1734,7 @@ static int kfd_ioctl_import_dmabuf(struct file *filep,
return 0;
err_free:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, (struct kgd_mem *)mem, NULL);
err_unlock:
mutex_unlock(&p->mutex);
return r;
@@ -1786,6 +1833,8 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_IMPORT_DMABUF,
kfd_ioctl_import_dmabuf, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_ALLOC_QUEUE_GWS,
+ kfd_ioctl_alloc_queue_gws, 0),
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index de9f68d5c312..1009a3b8dcc2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -502,7 +502,7 @@ int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
num_nodes = crat_table->num_domains;
image_len = crat_table->length;
- pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
+ pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
for (node_id = 0; node_id < num_nodes; node_id++) {
top_dev = kfd_create_topology_device(device_list);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 05bc6d96ec52..0491ab2b4a9b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -569,6 +569,23 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
}
+static int kfd_gws_init(struct kfd_dev *kfd)
+{
+ int ret = 0;
+
+ if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
+ return 0;
+
+ if (hws_gws_support
+ || (kfd->device_info->asic_family >= CHIP_VEGA10
+ && kfd->device_info->asic_family <= CHIP_RAVEN
+ && kfd->mec2_fw_version >= 0x1b3))
+ ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
+ amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
+
+ return ret;
+}
+
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
@@ -578,6 +595,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
+ kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
+ KGD_ENGINE_MEC2);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -598,13 +617,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
- /* Allocate global GWS that is shared by all KFD processes */
- if (hws_gws_support && amdgpu_amdkfd_alloc_gws(kfd->kgd,
- amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws)) {
- dev_err(kfd_device, "Could not allocate %d gws\n",
- amdgpu_amdkfd_get_num_gws(kfd->kgd));
- goto out;
- }
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
@@ -662,6 +674,15 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto device_queue_manager_error;
}
+ /* If supported on this device, allocate global GWS that is shared
+ * by all KFD processes
+ */
+ if (kfd_gws_init(kfd)) {
+ dev_err(kfd_device, "Could not allocate %d gws\n",
+ amdgpu_amdkfd_get_num_gws(kfd->kgd));
+ goto gws_error;
+ }
+
if (kfd_iommu_device_init(kfd)) {
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
@@ -691,6 +712,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_topology_add_device_error:
kfd_resume_error:
device_iommu_error:
+gws_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
kfd_interrupt_exit(kfd);
@@ -701,7 +723,7 @@ kfd_doorbell_error:
kfd_gtt_sa_init_error:
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
alloc_gtt_mem_failure:
- if (hws_gws_support)
+ if (kfd->gws)
amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
@@ -720,7 +742,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
- if (hws_gws_support)
+ if (kfd->gws)
amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 77ea0f0cb163..e9c4867abeff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -505,8 +505,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
- if (q->properties.is_active)
+ if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
+ }
return retval;
}
@@ -583,6 +588,20 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (!q->properties.is_active && prev_active)
decrement_queue_count(dqm, q->properties.type);
+ if (q->gws && !q->properties.is_gws) {
+ if (q->properties.is_active) {
+ dqm->gws_queue_count++;
+ pdd->qpd.mapped_gws_queue = true;
+ }
+ q->properties.is_gws = true;
+ } else if (!q->gws && q->properties.is_gws) {
+ if (q->properties.is_active) {
+ dqm->gws_queue_count--;
+ pdd->qpd.mapped_gws_queue = false;
+ }
+ q->properties.is_gws = false;
+ }
+
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
else if (q->properties.is_active &&
@@ -631,6 +650,10 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.type)];
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -744,6 +767,10 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
q->properties.type)];
q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count++;
+ qpd->mapped_gws_queue = true;
+ }
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -913,6 +940,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
dqm->active_cp_queue_count = 0;
+ dqm->gws_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
@@ -1061,7 +1089,9 @@ static int set_sched_resources(struct device_queue_manager *dqm)
break;
}
- res.queue_mask |= (1ull << i);
+ res.queue_mask |= 1ull
+ << amdgpu_queue_mask_bit_to_set_resource_bit(
+ (struct amdgpu_device *)dqm->dev->kgd, i);
}
res.gws_mask = ~0ull;
res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
@@ -1082,7 +1112,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
INIT_LIST_HEAD(&dqm->queues);
dqm->active_queue_count = dqm->processes_count = 0;
dqm->active_cp_queue_count = 0;
-
+ dqm->gws_queue_count = 0;
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1432,6 +1462,10 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
}
/*
@@ -1650,8 +1684,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- if (q->properties.is_active)
+ if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
+ if (q->properties.is_gws) {
+ dqm->gws_queue_count--;
+ qpd->mapped_gws_queue = false;
+ }
+ }
dqm->total_queue_count--;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 50d919f814e9..4afa015c69b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -182,6 +182,7 @@ struct device_queue_manager {
unsigned int processes_count;
unsigned int active_queue_count;
unsigned int active_cp_queue_count;
+ unsigned int gws_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 15476fca8fa6..a9583b95fcc1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -901,7 +901,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memset(&memory_exception_data, 0, sizeof(memory_exception_data));
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma(mm, address);
memory_exception_data.gpu_id = dev->id;
@@ -924,7 +924,7 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NoExecute = 0;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
pr_debug("notpresent %d, noexecute %d, readonly %d\n",
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index e05d75ecda21..fce6ccabe38b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -37,7 +37,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
if (vmid < dev->vm_info.first_vmid_kfd ||
vmid > dev->vm_info.last_vmid_kfd)
- return 0;
+ return false;
source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
@@ -69,7 +69,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
/* If there is no valid PASID, it's likely a bug */
if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
- return 0;
+ return false;
/* Interrupt types we care about: various signals and faults.
* They will be forwarded to a work queue (see below).
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 8d871514671e..7c8786b9eb0a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -192,7 +192,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
- PCI_BUS_NUM(pdev->devfn),
+ pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
pasid,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index bae706462f96..a2b77d1df854 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -126,6 +126,7 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.queue_size = queue_size;
prop.is_interop = false;
+ prop.is_gws = false;
prop.priority = 1;
prop.queue_percent = 100;
prop.type = type;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index efdb75e7677b..685ca82d42fe 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -41,7 +41,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
unsigned int *rlib_size,
bool *over_subscription)
{
- unsigned int process_count, queue_count, compute_queue_count;
+ unsigned int process_count, queue_count, compute_queue_count, gws_queue_count;
unsigned int map_queue_size;
unsigned int max_proc_per_quantum = 1;
struct kfd_dev *dev = pm->dqm->dev;
@@ -49,6 +49,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
process_count = pm->dqm->processes_count;
queue_count = pm->dqm->active_queue_count;
compute_queue_count = pm->dqm->active_cp_queue_count;
+ gws_queue_count = pm->dqm->gws_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -61,7 +62,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_cp_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm) ||
+ gws_queue_count > 1) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2de01009f1b6..bdca9dc5f118 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -43,7 +43,7 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.pasid = qpd->pqm->process->pasid;
packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
- packet->bitfields14.num_gws = qpd->num_gws;
+ packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 4a3049841086..fee60921fccf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -40,6 +40,7 @@
#include <drm/drm_file.h>
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
+#include <drm/drm_ioctl.h>
#include <kgd_kfd_interface.h>
#include <linux/swap.h>
@@ -282,6 +283,7 @@ struct kfd_dev {
/* Firmware versions */
uint16_t mec_fw_version;
+ uint16_t mec2_fw_version;
uint16_t sdma_fw_version;
/* Maximum process number mapped to HW scheduler */
@@ -410,6 +412,10 @@ enum KFD_QUEUE_PRIORITY {
* @is_active: Defines if the queue is active or not. @is_active and
* @is_evicted are protected by the DQM lock.
*
+ * @is_gws: Defines if the queue has been updated to be GWS-capable or not.
+ * @is_gws should be protected by the DQM lock, since changing it can yield the
+ * possibility of updating DQM state on number of GWS queues.
+ *
* @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
* of the queue.
*
@@ -432,6 +438,7 @@ struct queue_properties {
bool is_interop;
bool is_evicted;
bool is_active;
+ bool is_gws;
/* Not relevant for user mode queues in cp scheduling */
unsigned int vmid;
/* Relevant only for sdma queues*/
@@ -563,6 +570,14 @@ struct qcm_process_device {
*/
bool reset_wavefronts;
+ /* This flag tells us if this process has a GWS-capable
+ * queue that will be mapped into the runlist. It's
+ * possible to request a GWS BO, but not have the queue
+ * currently mapped, and this changes how the MAP_PROCESS
+ * PM4 packet is configured.
+ */
+ bool mapped_gws_queue;
+
/*
* All the memory management data should be here too
*/
@@ -615,6 +630,8 @@ enum kfd_pdd_bound {
PDD_BOUND_SUSPENDED,
};
+#define MAX_VRAM_FILENAME_LEN 11
+
/* Data that is per-process-per device. */
struct kfd_process_device {
/*
@@ -657,6 +674,11 @@ struct kfd_process_device {
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
+
+ /* VRAM usage */
+ uint64_t vram_usage;
+ struct attribute attr_vram;
+ char vram_filename[MAX_VRAM_FILENAME_LEN];
};
#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -923,6 +945,8 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+ unsigned int qid);
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
@@ -1050,10 +1074,10 @@ void kfd_dec_compute_active(struct kfd_dev *dev);
/* Check with device cgroup if @kfd device is accessible */
static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
{
-#if defined(CONFIG_CGROUP_DEVICE)
+#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = kfd->ddev;
- return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,
DEVCG_ACC_WRITE | DEVCG_ACC_READ);
#else
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index fe0cd49d4ea7..d27221ddcdeb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -79,18 +79,22 @@ static struct kfd_procfs_tree procfs;
static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
- int val = 0;
-
if (strcmp(attr->name, "pasid") == 0) {
struct kfd_process *p = container_of(attr, struct kfd_process,
attr_pasid);
- val = p->pasid;
+
+ return snprintf(buffer, PAGE_SIZE, "%d\n", p->pasid);
+ } else if (strncmp(attr->name, "vram_", 5) == 0) {
+ struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
+ attr_vram);
+ if (pdd)
+ return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
} else {
pr_err("Invalid attribute");
return -EINVAL;
}
- return snprintf(buffer, PAGE_SIZE, "%d\n", val);
+ return 0;
}
static void kfd_procfs_kobj_release(struct kobject *kobj)
@@ -206,6 +210,34 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
+int kfd_procfs_add_vram_usage(struct kfd_process *p)
+{
+ int ret = 0;
+ struct kfd_process_device *pdd;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!p->kobj)
+ return -EFAULT;
+
+ /* Create proc/<pid>/vram_<gpuid> file for each GPU */
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+ snprintf(pdd->vram_filename, MAX_VRAM_FILENAME_LEN, "vram_%u",
+ pdd->dev->id);
+ pdd->attr_vram.name = pdd->vram_filename;
+ pdd->attr_vram.mode = KFD_SYSFS_FILE_MODE;
+ sysfs_attr_init(&pdd->attr_vram);
+ ret = sysfs_create_file(p->kobj, &pdd->attr_vram);
+ if (ret)
+ pr_warn("Creating vram usage for gpu id %d failed",
+ (int)pdd->dev->id);
+ }
+
+ return ret;
+}
+
+
void kfd_procfs_del_queue(struct queue *q)
{
if (!q)
@@ -248,7 +280,7 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
struct kfd_dev *dev = pdd->dev;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -312,7 +344,7 @@ sync_memory_failed:
return err;
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -411,6 +443,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
+
+ ret = kfd_procfs_add_vram_usage(process);
+ if (ret)
+ pr_warn("Creating vram usage file for pid %d failed",
+ (int)process->lead_thread->pid);
}
out:
if (!IS_ERR(process))
@@ -488,7 +525,7 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
peer_pdd->dev->kgd, mem, peer_pdd->vm);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -551,6 +588,7 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+ struct kfd_process_device *pdd;
/* Remove the procfs files */
if (p->kobj) {
@@ -558,6 +596,10 @@ static void kfd_process_wq_release(struct work_struct *work)
kobject_del(p->kobj_queues);
kobject_put(p->kobj_queues);
p->kobj_queues = NULL;
+
+ list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -858,10 +900,12 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->qpd.dqm = dev->dqm;
pdd->qpd.pqm = &p->pqm;
pdd->qpd.evicted = 0;
+ pdd->qpd.mapped_gws_queue = false;
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
pdd->runtime_inuse = false;
+ pdd->vram_usage = 0;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -1078,7 +1122,7 @@ struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
return p;
}
-/* process_evict_queues - Evict all user queues of a process
+/* kfd_process_evict_queues - Evict all user queues of a process
*
* Eviction is reference-counted per process-device. This means multiple
* evictions from different sources can be nested safely.
@@ -1118,7 +1162,7 @@ fail:
return r;
}
-/* process_restore_queues - Restore all user queues of a process */
+/* kfd_process_restore_queues - Restore all user queues of a process */
int kfd_process_restore_queues(struct kfd_process *p)
{
struct kfd_process_device *pdd;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 084c35f55d59..eb1635ac8988 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -476,6 +476,15 @@ struct kernel_queue *pqm_get_kernel_queue(
return NULL;
}
+struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
+ unsigned int qid)
+{
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ return pqn ? pqn->q : NULL;
+}
+
int pqm_get_wave_state(struct process_queue_manager *pqm,
unsigned int qid,
void __user *ctl_stack,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index aa0bfa78a667..bb77f7af2b6d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.device_id);
sysfs_show_32bit_prop(buffer, "location_id",
dev->node_props.location_id);
+ sysfs_show_32bit_prop(buffer, "domain",
+ dev->node_props.domain);
sysfs_show_32bit_prop(buffer, "drm_render_minor",
dev->node_props.drm_render_minor);
sysfs_show_64bit_prop(buffer, "hive_id",
@@ -787,7 +789,6 @@ static int kfd_topology_update_sysfs(void)
{
int ret;
- pr_info("Creating topology SYSFS entries\n");
if (!sys_props.kobj_topology) {
sys_props.kobj_topology =
kfd_alloc_struct(sys_props.kobj_topology);
@@ -1048,7 +1049,6 @@ int kfd_topology_init(void)
sys_props.generation_count++;
kfd_update_system_properties();
kfd_debug_print_topology();
- pr_info("Finished initializing topology\n");
} else
pr_err("Failed to update topology in sysfs ret=%d\n", ret);
@@ -1303,7 +1303,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.vendor_id = gpu->pdev->vendor;
dev->node_props.device_id = gpu->pdev->device;
+ dev->node_props.capability |=
+ ((amdgpu_amdkfd_get_asic_rev_id(dev->gpu->kgd) <<
+ HSA_CAP_ASIC_REVISION_SHIFT) &
+ HSA_CAP_ASIC_REVISION_MASK);
dev->node_props.location_id = pci_dev_id(gpu->pdev);
+ dev->node_props.domain = pci_domain_nr(gpu->pdev->bus);
dev->node_props.max_engine_clk_fcompute =
amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd);
dev->node_props.max_engine_clk_ccompute =
@@ -1317,7 +1322,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
gpu->device_info->num_xgmi_sdma_engines;
dev->node_props.num_sdma_queues_per_engine =
gpu->device_info->num_sdma_queues_per_engine;
- dev->node_props.num_gws = (hws_gws_support &&
+ dev->node_props.num_gws = (dev->gpu->gws &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 46eeecaf1b68..326d9b26b7aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -41,7 +41,6 @@
#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK 0x00003000
#define HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT 12
-#define HSA_CAP_RESERVED 0xffffc000
#define HSA_CAP_DOORBELL_TYPE_PRE_1_0 0x0
#define HSA_CAP_DOORBELL_TYPE_1_0 0x1
@@ -51,6 +50,10 @@
#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+#define HSA_CAP_ASIC_REVISION_MASK 0x03c00000
+#define HSA_CAP_ASIC_REVISION_SHIFT 22
+
+#define HSA_CAP_RESERVED 0xfc078000
struct kfd_node_properties {
uint64_t hive_id;
@@ -77,6 +80,7 @@ struct kfd_node_properties {
uint32_t vendor_id;
uint32_t device_id;
uint32_t location_id;
+ uint32_t domain;
uint32_t max_engine_clk_fcompute;
uint32_t max_engine_clk_ccompute;
int32_t drm_render_minor;
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 87858bc57e64..1911a34cc060 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -21,16 +21,12 @@ config DRM_AMD_DC_HDCP
bool "Enable HDCP support in DC"
depends on DRM_AMD_DC
help
- Choose this option
- if you want to support
- HDCP authentication
+ Choose this option if you want to support HDCP authentication.
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
help
- Choose this option
- if you want to hit
- kdgb_break in assert.
+ Choose this option if you want to hit kdgb_break in assert.
endmenu
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f7c5cdc10a70..7ced9f87be97 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -30,7 +30,7 @@
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
#include "dc/inc/hw/dmcu.h"
#include "dc/inc/hw/abm.h"
#include "dc/dc_dmub_srv.h"
@@ -441,7 +441,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
/**
* dm_crtc_high_irq() - Handles CRTC interrupt
- * @interrupt_params: ignored
+ * @interrupt_params: used for determining the CRTC instance
*
* Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
* event handler.
@@ -455,70 +455,6 @@ static void dm_crtc_high_irq(void *interrupt_params)
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
- if (acrtc) {
- acrtc_state = to_dm_crtc_state(acrtc->base.state);
-
- DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
- acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
-
- /* Core vblank handling at start of front-porch is only possible
- * in non-vrr mode, as only there vblank timestamping will give
- * valid results while done in front-porch. Otherwise defer it
- * to dm_vupdate_high_irq after end of front-porch.
- */
- if (!amdgpu_dm_vrr_active(acrtc_state))
- drm_crtc_handle_vblank(&acrtc->base);
-
- /* Following stuff must happen at start of vblank, for crc
- * computation and below-the-range btr support in vrr mode.
- */
- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
- if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
- acrtc_state->vrr_params.supported &&
- acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
- spin_lock_irqsave(&adev->ddev->event_lock, flags);
- mod_freesync_handle_v_update(
- adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
-
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
- spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- }
- }
-}
-
-#if defined(CONFIG_DRM_AMD_DC_DCN)
-/**
- * dm_dcn_crtc_high_irq() - Handles VStartup interrupt for DCN generation ASICs
- * @interrupt params - interrupt parameters
- *
- * Notify DRM's vblank event handler at VSTARTUP
- *
- * Unlike DCE hardware, we trigger the handler at VSTARTUP. at which:
- * * We are close enough to VUPDATE - the point of no return for hw
- * * We are in the fixed portion of variable front porch when vrr is enabled
- * * We are before VUPDATE, where double-buffered vrr registers are swapped
- *
- * It is therefore the correct place to signal vblank, send user flip events,
- * and update VRR.
- */
-static void dm_dcn_crtc_high_irq(void *interrupt_params)
-{
- struct common_irq_params *irq_params = interrupt_params;
- struct amdgpu_device *adev = irq_params->adev;
- struct amdgpu_crtc *acrtc;
- struct dm_crtc_state *acrtc_state;
- unsigned long flags;
-
- acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
-
if (!acrtc)
return;
@@ -528,22 +464,35 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
amdgpu_dm_vrr_active(acrtc_state),
acrtc_state->active_planes);
+ /**
+ * Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!amdgpu_dm_vrr_active(acrtc_state))
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /**
+ * Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
- drm_crtc_handle_vblank(&acrtc->base);
+
+ /* BTR updates need to happen before VUPDATE on Vega and above. */
+ if (adev->family < AMDGPU_FAMILY_AI)
+ return;
spin_lock_irqsave(&adev->ddev->event_lock, flags);
- if (acrtc_state->vrr_params.supported &&
+ if (acrtc_state->stream && acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
- mod_freesync_handle_v_update(
- adev->dm.freesync_module,
- acrtc_state->stream,
- &acrtc_state->vrr_params);
+ mod_freesync_handle_v_update(adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
- dc_stream_adjust_vmin_vmax(
- adev->dm.dc,
- acrtc_state->stream,
- &acrtc_state->vrr_params.adjust);
+ dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
}
/*
@@ -556,7 +505,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
* avoid race conditions between flip programming and completion,
* which could cause too early flip completion events.
*/
- if (acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
+ if (adev->family >= AMDGPU_FAMILY_RV &&
+ acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
acrtc_state->active_planes == 0) {
if (acrtc->event) {
drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
@@ -568,7 +518,6 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
-#endif
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
@@ -825,8 +774,9 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_inst_const_size);
}
- memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
- fw_bss_data_size);
+ if (fw_bss_data_size)
+ memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
+ fw_bss_data, fw_bss_data_size);
/* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
@@ -968,6 +918,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
+ if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
+ adev->dm.dc->debug.force_single_disp_pipe_split = false;
+ adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
+ adev->dm.dc->debug.disable_stutter = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
+ adev->dm.dc->debug.disable_dsc = true;
+
+ if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
+ adev->dm.dc->debug.disable_clock_gate = true;
+
r = dm_dmub_hw_init(adev);
if (r) {
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1265,6 +1232,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
le32_to_cpu(hdr->inst_const_bytes);
+ region_params.fw_inst_const =
+ adev->dm.dmub_fw->data +
+ le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
+ PSP_HEADER_BYTES;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@@ -1384,8 +1355,13 @@ static int dm_late_init(void *handle)
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
int i;
- struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
- bool ret = false;
+ struct dmcu *dmcu = NULL;
+ bool ret;
+
+ if (!adev->dm.fw_dmcu)
+ return detect_mst_link_for_all_connectors(adev->ddev);
+
+ dmcu = adev->dm.dc->res_pool->dmcu;
for (i = 0; i < 16; i++)
linear_lut[i] = 0xFFFF * i / 15;
@@ -1401,13 +1377,10 @@ static int dm_late_init(void *handle)
*/
params.min_abm_backlight = 0x28F;
- /* todo will enable for navi10 */
- if (adev->asic_type <= CHIP_RAVEN) {
- ret = dmcu_load_iram(dmcu, params);
+ ret = dmcu_load_iram(dmcu, params);
- if (!ret)
- return -EINVAL;
- }
+ if (!ret)
+ return -EINVAL;
return detect_mst_link_for_all_connectors(adev->ddev);
}
@@ -1562,12 +1535,115 @@ static int dm_hw_fini(void *handle)
return 0;
}
+
+static int dm_enable_vblank(struct drm_crtc *crtc);
+static void dm_disable_vblank(struct drm_crtc *crtc);
+
+static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
+ struct dc_state *state, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc;
+ int rc = -EBUSY;
+ int i = 0;
+
+ for (i = 0; i < state->stream_count; i++) {
+ acrtc = get_crtc_by_otg_inst(
+ adev, state->stream_status[i].primary_otg_inst);
+
+ if (acrtc && state->stream_status[i].plane_count != 0) {
+ irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+ DRM_DEBUG("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+ if (rc)
+ DRM_WARN("Failed to %s pflip interrupts\n",
+ enable ? "enable" : "disable");
+
+ if (enable) {
+ rc = dm_enable_vblank(&acrtc->base);
+ if (rc)
+ DRM_WARN("Failed to enable vblank interrupts\n");
+ } else {
+ dm_disable_vblank(&acrtc->base);
+ }
+
+ }
+ }
+
+}
+
+enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
+{
+ struct dc_state *context = NULL;
+ enum dc_status res = DC_ERROR_UNEXPECTED;
+ int i;
+ struct dc_stream_state *del_streams[MAX_PIPES];
+ int del_streams_count = 0;
+
+ memset(del_streams, 0, sizeof(del_streams));
+
+ context = dc_create_state(dc);
+ if (context == NULL)
+ goto context_alloc_fail;
+
+ dc_resource_state_copy_construct_current(dc, context);
+
+ /* First remove from context all streams */
+ for (i = 0; i < context->stream_count; i++) {
+ struct dc_stream_state *stream = context->streams[i];
+
+ del_streams[del_streams_count++] = stream;
+ }
+
+ /* Remove all planes for removed streams and then remove the streams */
+ for (i = 0; i < del_streams_count; i++) {
+ if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
+ if (res != DC_OK)
+ goto fail;
+ }
+
+
+ res = dc_validate_global_state(dc, context, false);
+
+ if (res != DC_OK) {
+ DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
+ goto fail;
+ }
+
+ res = dc_commit_state(dc, context);
+
+fail:
+ dc_release_state(context);
+
+context_alloc_fail:
+ return res;
+}
+
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ if (adev->in_gpu_reset) {
+ mutex_lock(&dm->dc_lock);
+ dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+
+ amdgpu_dm_commit_zero_streams(dm->dc);
+
+ amdgpu_dm_irq_suspend(adev);
+
+ return ret;
+ }
+
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
@@ -1578,7 +1654,7 @@ static int dm_suspend(void *handle)
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
- return ret;
+ return 0;
}
static struct amdgpu_dm_connector *
@@ -1682,6 +1758,46 @@ static void emulated_link_detect(struct dc_link *link)
}
+static void dm_gpureset_commit_state(struct dc_state *dc_state,
+ struct amdgpu_display_manager *dm)
+{
+ struct {
+ struct dc_surface_update surface_updates[MAX_SURFACES];
+ struct dc_plane_info plane_infos[MAX_SURFACES];
+ struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
+ struct dc_stream_update stream_update;
+ } * bundle;
+ int k, m;
+
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
+
+ if (!bundle) {
+ dm_error("Failed to allocate update bundle\n");
+ goto cleanup;
+ }
+
+ for (k = 0; k < dc_state->stream_count; k++) {
+ bundle->stream_update.stream = dc_state->streams[k];
+
+ for (m = 0; m < dc_state->stream_status->plane_count; m++) {
+ bundle->surface_updates[m].surface =
+ dc_state->stream_status->plane_states[m];
+ bundle->surface_updates[m].surface->force_full_update =
+ true;
+ }
+ dc_commit_updates_for_stream(
+ dm->dc, bundle->surface_updates,
+ dc_state->stream_status->plane_count,
+ dc_state->streams[k], &bundle->stream_update, dc_state);
+ }
+
+cleanup:
+ kfree(bundle);
+
+ return;
+}
+
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -1698,8 +1814,44 @@ static int dm_resume(void *handle)
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
- int i, r;
+ struct dc_state *dc_state;
+ int i, r, j;
+
+ if (adev->in_gpu_reset) {
+ dc_state = dm->cached_dc_state;
+
+ r = dm_dmub_hw_init(adev);
+ if (r)
+ DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+ dc_resume(dm->dc);
+
+ amdgpu_dm_irq_resume_early(adev);
+
+ for (i = 0; i < dc_state->stream_count; i++) {
+ dc_state->streams[i]->mode_changed = true;
+ for (j = 0; j < dc_state->stream_status->plane_count; j++) {
+ dc_state->stream_status->plane_states[j]->update_flags.raw
+ = 0xffffffff;
+ }
+ }
+
+ WARN_ON(!dc_commit_state(dm->dc, dc_state));
+
+ dm_gpureset_commit_state(dm->cached_dc_state, dm);
+
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
+
+ dc_release_state(dm->cached_dc_state);
+ dm->cached_dc_state = NULL;
+
+ amdgpu_dm_irq_resume_late(adev);
+
+ mutex_unlock(&dm->dc_lock);
+
+ return 0;
+ }
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);
@@ -2008,17 +2160,22 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
- drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
+ if (aconnector->dc_link->aux_mode) {
+ drm_dp_cec_unset_edid(
+ &aconnector->dm_dp_aux.aux);
+ }
} else {
aconnector->edid =
- (struct edid *) sink->dc_edid.raw_edid;
-
+ (struct edid *)sink->dc_edid.raw_edid;
drm_connector_update_edid_property(connector,
- aconnector->edid);
- drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
- aconnector->edid);
+ aconnector->edid);
+
+ if (aconnector->dc_link->aux_mode)
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
}
+
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
update_connector_ext_caps(aconnector);
} else {
@@ -2440,8 +2597,36 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
+ amdgpu_dm_irq_register_interrupt(
+ adev, &int_params, dm_crtc_high_irq, c_irq_params);
+ }
+
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
amdgpu_dm_irq_register_interrupt(adev, &int_params,
- dm_dcn_crtc_high_irq, c_irq_params);
+ dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
@@ -3031,9 +3216,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
- if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
- dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
-
/* No userspace support. */
dm->dc->debug.disable_tri_buf = true;
@@ -3304,7 +3486,7 @@ static int fill_dc_scaling_info(const struct drm_plane_state *state,
}
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
- uint64_t *tiling_flags)
+ uint64_t *tiling_flags, bool *tmz_surface)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
int r = amdgpu_bo_reserve(rbo, false);
@@ -3319,6 +3501,9 @@ static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
if (tiling_flags)
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
+ if (tmz_surface)
+ *tmz_surface = amdgpu_bo_encrypted(rbo);
+
amdgpu_bo_unreserve(rbo);
return r;
@@ -3340,7 +3525,8 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool force_disable_dcc)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
@@ -3352,6 +3538,9 @@ fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output));
+ if (force_disable_dcc)
+ return 0;
+
if (!offset)
return 0;
@@ -3401,7 +3590,9 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool tmz_surface,
+ bool force_disable_dcc)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
@@ -3411,6 +3602,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
memset(dcc, 0, sizeof(*dcc));
memset(address, 0, sizeof(*address));
+ address->tmz_surface = tmz_surface;
+
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_size->surface_size.x = 0;
plane_size->surface_size.y = 0;
@@ -3507,7 +3700,8 @@ fill_plane_buffer_attributes(struct amdgpu_device *adev,
ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
plane_size, tiling_info,
- tiling_flags, dcc, address);
+ tiling_flags, dcc, address,
+ force_disable_dcc);
if (ret)
return ret;
}
@@ -3599,7 +3793,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_plane_state *plane_state,
const uint64_t tiling_flags,
struct dc_plane_info *plane_info,
- struct dc_plane_address *address)
+ struct dc_plane_address *address,
+ bool tmz_surface,
+ bool force_disable_dcc)
{
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
@@ -3642,6 +3838,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_P010:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
break;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
+ break;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@@ -3681,7 +3885,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
plane_info->rotation, tiling_flags,
&plane_info->tiling_info,
&plane_info->plane_size,
- &plane_info->dcc, address);
+ &plane_info->dcc, address, tmz_surface,
+ force_disable_dcc);
if (ret)
return ret;
@@ -3704,6 +3909,8 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
struct dc_plane_info plane_info;
uint64_t tiling_flags;
int ret;
+ bool tmz_surface = false;
+ bool force_disable_dcc = false;
ret = fill_dc_scaling_info(plane_state, &scaling_info);
if (ret)
@@ -3714,13 +3921,16 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
dc_plane_state->clip_rect = scaling_info.clip_rect;
dc_plane_state->scaling_quality = scaling_info.scaling_quality;
- ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
if (ret)
return ret;
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
&plane_info,
- &dc_plane_state->address);
+ &dc_plane_state->address,
+ tmz_surface,
+ force_disable_dcc);
if (ret)
return ret;
@@ -3807,8 +4017,7 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode,
static enum dc_color_depth
convert_color_depth_from_display_info(const struct drm_connector *connector,
- const struct drm_connector_state *state,
- bool is_y420)
+ bool is_y420, int requested_bpc)
{
uint8_t bpc;
@@ -3828,10 +4037,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
bpc = bpc ? bpc : 8;
}
- if (!state)
- state = connector->state;
-
- if (state) {
+ if (requested_bpc > 0) {
/*
* Cap display bpc based on the user requested value.
*
@@ -3840,7 +4046,7 @@ convert_color_depth_from_display_info(const struct drm_connector *connector,
* or if this was called outside of atomic check, so it
* can't be used directly.
*/
- bpc = min(bpc, state->max_requested_bpc);
+ bpc = min_t(u8, bpc, requested_bpc);
/* Round down to the nearest even number. */
bpc = bpc - (bpc & 1);
@@ -3962,7 +4168,8 @@ static void fill_stream_properties_from_drm_display_mode(
const struct drm_display_mode *mode_in,
const struct drm_connector *connector,
const struct drm_connector_state *connector_state,
- const struct dc_stream_state *old_stream)
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
{
struct dc_crtc_timing *timing_out = &stream->timing;
const struct drm_display_info *info = &connector->display_info;
@@ -3992,8 +4199,9 @@ static void fill_stream_properties_from_drm_display_mode(
timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
timing_out->display_color_depth = convert_color_depth_from_display_info(
- connector, connector_state,
- (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420));
+ connector,
+ (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
+ requested_bpc);
timing_out->scan_type = SCANNING_TYPE_NODATA;
timing_out->hdmi_vic = 0;
@@ -4199,7 +4407,8 @@ static struct dc_stream_state *
create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
const struct drm_display_mode *drm_mode,
const struct dm_connector_state *dm_state,
- const struct dc_stream_state *old_stream)
+ const struct dc_stream_state *old_stream,
+ int requested_bpc)
{
struct drm_display_mode *preferred_mode = NULL;
struct drm_connector *drm_connector;
@@ -4284,10 +4493,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
*/
if (!scale || mode_refresh != preferred_refresh)
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, NULL);
+ &mode, &aconnector->base, con_state, NULL, requested_bpc);
else
fill_stream_properties_from_drm_display_mode(stream,
- &mode, &aconnector->base, con_state, old_stream);
+ &mode, &aconnector->base, con_state, old_stream, requested_bpc);
stream->timing.flags.DSC = 0;
@@ -4324,14 +4533,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false);
- if (stream->link->psr_feature_enabled) {
+ if (stream->link->psr_settings.psr_feature_enabled) {
struct dc *core_dc = stream->link->ctx->dc;
if (dc_is_dmcu_initialized(core_dc)) {
- struct dmcu *dmcu = core_dc->res_pool->dmcu;
-
- stream->psr_version = dmcu->dmcu_version.psr_version;
-
//
// should decide stream support vsc sdp colorimetry capability
// before building vsc info packet
@@ -4437,10 +4642,6 @@ static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
struct amdgpu_device *adev = crtc->dev->dev_private;
int rc;
- /* Do not set vupdate for DCN hardware */
- if (adev->family > AMDGPU_FAMILY_AI)
- return 0;
-
irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -4664,6 +4865,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
i2c_del_adapter(&aconnector->i2c->base);
kfree(aconnector->i2c);
}
+ kfree(aconnector->dm_dp_aux.aux.name);
kfree(connector);
}
@@ -4723,10 +4925,19 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
static int
amdgpu_dm_connector_late_register(struct drm_connector *connector)
{
-#if defined(CONFIG_DEBUG_FS)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
+ int r;
+
+ if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+ (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
+ }
+#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
@@ -4804,16 +5015,54 @@ static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
create_eml_sink(aconnector);
}
+static struct dc_stream_state *
+create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
+ const struct drm_display_mode *drm_mode,
+ const struct dm_connector_state *dm_state,
+ const struct dc_stream_state *old_stream)
+{
+ struct drm_connector *connector = &aconnector->base;
+ struct amdgpu_device *adev = connector->dev->dev_private;
+ struct dc_stream_state *stream;
+ int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ enum dc_status dc_result = DC_OK;
+
+ do {
+ stream = create_stream_for_sink(aconnector, drm_mode,
+ dm_state, old_stream,
+ requested_bpc);
+ if (stream == NULL) {
+ DRM_ERROR("Failed to create stream for sink!\n");
+ break;
+ }
+
+ dc_result = dc_validate_stream(adev->dm.dc, stream);
+
+ if (dc_result != DC_OK) {
+ DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
+ drm_mode->hdisplay,
+ drm_mode->vdisplay,
+ drm_mode->clock,
+ dc_result);
+
+ dc_stream_release(stream);
+ stream = NULL;
+ requested_bpc -= 2; /* lower bpc to retry validation */
+ }
+
+ } while (stream == NULL && requested_bpc >= 6);
+
+ return stream;
+}
+
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int result = MODE_ERROR;
struct dc_sink *dc_sink;
- struct amdgpu_device *adev = connector->dev->dev_private;
/* TODO: Unhardcode stream count */
struct dc_stream_state *stream;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- enum dc_status dc_result = DC_OK;
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN))
@@ -4834,24 +5083,11 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
goto fail;
}
- stream = create_stream_for_sink(aconnector, mode, NULL, NULL);
- if (stream == NULL) {
- DRM_ERROR("Failed to create stream for sink!\n");
- goto fail;
- }
-
- dc_result = dc_validate_stream(adev->dm.dc, stream);
-
- if (dc_result == DC_OK)
+ stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
+ if (stream) {
+ dc_stream_release(stream);
result = MODE_OK;
- else
- DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n",
- mode->hdisplay,
- mode->vdisplay,
- mode->clock,
- dc_result);
-
- dc_stream_release(stream);
+ }
fail:
/* TODO: error handling*/
@@ -5174,10 +5410,12 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
return 0;
if (!state->duplicated) {
+ int max_bpc = conn_state->max_requested_bpc;
is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
aconnector->force_yuv420_output;
- color_depth = convert_color_depth_from_display_info(connector, conn_state,
- is_y420);
+ color_depth = convert_color_depth_from_display_info(connector,
+ is_y420,
+ max_bpc);
bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
clock = adjusted_mode->clock;
dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
@@ -5332,6 +5570,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
uint64_t tiling_flags;
uint32_t domain;
int r;
+ bool tmz_surface = false;
+ bool force_disable_dcc = false;
dm_plane_state_old = to_dm_plane_state(plane->state);
dm_plane_state_new = to_dm_plane_state(new_state);
@@ -5380,6 +5620,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
amdgpu_bo_get_tiling_flags(rbo, &tiling_flags);
+ tmz_surface = amdgpu_bo_encrypted(rbo);
+
ttm_eu_backoff_reservation(&ticket, &list);
afb->address = amdgpu_bo_gpu_offset(rbo);
@@ -5390,11 +5632,13 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
+ force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
fill_plane_buffer_attributes(
adev, afb, plane_state->format, plane_state->rotation,
tiling_flags, &plane_state->tiling_info,
&plane_state->plane_size, &plane_state->dcc,
- &plane_state->address);
+ &plane_state->address, tmz_surface,
+ force_disable_dcc);
}
return 0;
@@ -5540,6 +5784,12 @@ static int get_plane_formats(const struct drm_plane *plane,
formats[num_formats++] = DRM_FORMAT_NV12;
if (plane_cap && plane_cap->pixel_format_support.p010)
formats[num_formats++] = DRM_FORMAT_P010;
+ if (plane_cap && plane_cap->pixel_format_support.fp16) {
+ formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
+ formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
+ formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
+ formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
+ }
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -6092,7 +6342,7 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
- amdgpu_dm_initialize_dp_connector(dm, aconnector);
+ amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
out_free:
if (res) {
@@ -6567,6 +6817,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
unsigned long flags;
struct amdgpu_bo *abo;
uint64_t tiling_flags;
+ bool tmz_surface = false;
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
@@ -6619,6 +6870,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
}
fill_dc_scaling_info(new_plane_state,
@@ -6661,12 +6913,20 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ tmz_surface = amdgpu_bo_encrypted(abo);
+
amdgpu_bo_unreserve(abo);
fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
&bundle->plane_infos[planes_count],
- &bundle->flip_addrs[planes_count].address);
+ &bundle->flip_addrs[planes_count].address,
+ tmz_surface,
+ false);
+
+ DRM_DEBUG_DRIVER("plane: id=%d dcc_en=%d\n",
+ new_plane_state->plane->index,
+ bundle->plane_infos[planes_count].dcc.enable);
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
@@ -6807,7 +7067,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
}
mutex_lock(&dm->dc_lock);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_allow_active)
+ acrtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(acrtc_state->stream);
dc_commit_updates_for_stream(dm->dc,
@@ -6818,12 +7078,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_state);
if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
- acrtc_state->stream->psr_version &&
- !acrtc_state->stream->link->psr_feature_enabled)
+ acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
+ !acrtc_state->stream->link->psr_settings.psr_feature_enabled)
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
- acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active) {
+ acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
+ !acrtc_state->stream->link->psr_settings.psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -7137,7 +7397,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
/* i.e. reset mode */
if (dm_old_crtc_state->stream) {
- if (dm_old_crtc_state->stream->link->psr_allow_active)
+ if (dm_old_crtc_state->stream->link->psr_settings.psr_allow_active)
amdgpu_dm_psr_disable(dm_old_crtc_state->stream);
remove_stream(adev, acrtc, dm_old_crtc_state->stream);
@@ -7585,10 +7845,10 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
goto skip_modeset;
- new_stream = create_stream_for_sink(aconnector,
- &new_crtc_state->mode,
- dm_new_conn_state,
- dm_old_crtc_state->stream);
+ new_stream = create_validate_stream_for_sink(aconnector,
+ &new_crtc_state->mode,
+ dm_new_conn_state,
+ dm_old_crtc_state->stream);
/*
* we can have no stream on ACTION_SET if a display
@@ -7848,6 +8108,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
+ struct amdgpu_crtc *new_acrtc;
bool needs_reset;
int ret = 0;
@@ -7857,9 +8118,23 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state = to_dm_plane_state(new_plane_state);
dm_old_plane_state = to_dm_plane_state(old_plane_state);
- /*TODO Implement atomic check for cursor plane */
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ /*TODO Implement better atomic check for cursor plane */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR) {
+ if (!enable || !new_plane_crtc ||
+ drm_atomic_plane_disabling(plane->state, new_plane_state))
+ return 0;
+
+ new_acrtc = to_amdgpu_crtc(new_plane_crtc);
+
+ if ((new_plane_state->crtc_w > new_acrtc->max_cursor_width) ||
+ (new_plane_state->crtc_h > new_acrtc->max_cursor_height)) {
+ DRM_DEBUG_ATOMIC("Bad cursor size %d x %d\n",
+ new_plane_state->crtc_w, new_plane_state->crtc_h);
+ return -EINVAL;
+ }
+
return 0;
+ }
needs_reset = should_reset_plane(state, plane, old_plane_state,
new_plane_state);
@@ -8034,6 +8309,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane];
struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane];
uint64_t tiling_flags;
+ bool tmz_surface = false;
new_plane_crtc = new_plane_state->crtc;
new_dm_plane_state = to_dm_plane_state(new_plane_state);
@@ -8063,6 +8339,8 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
new_dm_plane_state->dc_state->gamma_correction;
bundle->surface_updates[num_plane].in_transfer_func =
new_dm_plane_state->dc_state->in_transfer_func;
+ bundle->surface_updates[num_plane].gamut_remap_matrix =
+ &new_dm_plane_state->dc_state->gamut_remap_matrix;
bundle->stream_update.gamut_remap =
&new_dm_crtc_state->stream->gamut_remap_matrix;
bundle->stream_update.output_csc_transform =
@@ -8079,14 +8357,15 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
bundle->surface_updates[num_plane].scaling_info = scaling_info;
if (amdgpu_fb) {
- ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ ret = get_fb_info(amdgpu_fb, &tiling_flags, &tmz_surface);
if (ret)
goto cleanup;
ret = fill_dc_plane_info_and_addr(
dm->adev, new_plane_state, tiling_flags,
plane_info,
- &flip_addr->address);
+ &flip_addr->address, tmz_surface,
+ false);
if (ret)
goto cleanup;
@@ -8586,8 +8865,17 @@ static void amdgpu_dm_set_psr_caps(struct dc_link *link)
return;
if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
dpcd_data, sizeof(dpcd_data))) {
- link->psr_feature_enabled = dpcd_data[0] ? true:false;
- DRM_INFO("PSR support:%d\n", link->psr_feature_enabled);
+ link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+
+ if (dpcd_data[0] == 0) {
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+ link->psr_settings.psr_feature_enabled = false;
+ } else {
+ link->psr_settings.psr_version = DC_PSR_VERSION_1;
+ link->psr_settings.psr_feature_enabled = true;
+ }
+
+ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
}
}
@@ -8602,16 +8890,14 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
struct dc_link *link = NULL;
struct psr_config psr_config = {0};
struct psr_context psr_context = {0};
- struct dc *dc = NULL;
bool ret = false;
if (stream == NULL)
return false;
link = stream->link;
- dc = link->ctx->dc;
- psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version;
+ psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
if (psr_config.psr_version > 0) {
psr_config.psr_exit_link_training_required = 0x1;
@@ -8623,7 +8909,7 @@ static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
}
- DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled);
+ DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 5cab3e65d992..d61186ff411d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -315,6 +315,7 @@ struct amdgpu_display_manager {
#endif
struct drm_atomic_state *cached_state;
+ struct dc_state *cached_dc_state;
struct dm_comressor_info compressor;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 2233d293a707..4dfb6b55bb2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -239,7 +239,8 @@ static int __set_output_tf(struct dc_transfer_func *func,
* instead to simulate this.
*/
gamma->type = GAMMA_CUSTOM;
- res = mod_color_calculate_degamma_params(func, gamma, true);
+ res = mod_color_calculate_degamma_params(NULL, func,
+ gamma, true);
} else {
/*
* Assume sRGB. The actual mapping will depend on whether the
@@ -271,7 +272,7 @@ static int __set_input_tf(struct dc_transfer_func *func,
__drm_lut_to_dc_gamma(lut, gamma, false);
- res = mod_color_calculate_degamma_params(func, gamma, true);
+ res = mod_color_calculate_degamma_params(NULL, func, gamma, true);
dc_gamma_release(&gamma);
return res ? 0 : -ENOMEM;
@@ -419,9 +420,21 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state)
{
const struct drm_color_lut *degamma_lut;
+ enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB;
uint32_t degamma_size;
int r;
+ /* Get the correct base transfer function for implicit degamma. */
+ switch (dc_plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ /* DC doesn't have a transfer function for BT601 specifically. */
+ tf = TRANSFER_FUNCTION_BT709;
+ break;
+ default:
+ break;
+ }
+
if (crtc->cm_has_degamma) {
degamma_lut = __extract_blob_lut(crtc->base.degamma_lut,
&degamma_size);
@@ -455,8 +468,7 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
* map these to the atomic one instead.
*/
if (crtc->cm_is_degamma_srgb)
- dc_plane_state->in_transfer_func->tf =
- TRANSFER_FUNCTION_SRGB;
+ dc_plane_state->in_transfer_func->tf = tf;
else
dc_plane_state->in_transfer_func->tf =
TRANSFER_FUNCTION_LINEAR;
@@ -471,7 +483,12 @@ int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
* in linear space. Assume that the input is sRGB.
*/
dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ dc_plane_state->in_transfer_func->tf = tf;
+
+ if (tf != TRANSFER_FUNCTION_SRGB &&
+ !mod_color_calculate_degamma_params(NULL,
+ dc_plane_state->in_transfer_func, NULL, false))
+ return -ENOMEM;
} else {
/* ...Otherwise we can just bypass the DGM block. */
dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 0461fecd68db..076af267b488 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,7 +32,7 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
-#include "dmub/inc/dmub_srv.h"
+#include "dmub/dmub_srv.h"
struct dmub_debugfs_trace_header {
uint32_t entry_count;
@@ -838,6 +838,44 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
}
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+/*
+ * Returns the HDCP capability of the Display (1.4 for now).
+ *
+ * NOTE* Not all HDMI displays report their HDCP caps even when they are capable.
+ * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable.
+ *
+ * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability
+ * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability
+ */
+static int hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ bool hdcp_cap, hdcp2_cap;
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id);
+
+ hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link);
+ hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link);
+
+
+ if (hdcp_cap)
+ seq_printf(m, "%s ", "HDCP1.4");
+ if (hdcp2_cap)
+ seq_printf(m, "%s ", "HDCP2.2");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_printf(m, "%s ", "None");
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+#endif
/* function description
*
* generic SDP message access for testing
@@ -964,6 +1002,9 @@ DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability);
+#endif
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
@@ -1019,12 +1060,23 @@ static const struct {
{"test_pattern", &dp_phy_test_pattern_fops},
{"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops},
+#endif
{"sdp_message", &sdp_message_fops},
{"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
{"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
{"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+static const struct {
+ char *name;
+ const struct file_operations *fops;
+} hdmi_debugfs_entries[] = {
+ {"hdcp_sink_capability", &hdcp_sink_capability_fops}
+};
+#endif
/*
* Force YUV420 output if available from the given mode
*/
@@ -1093,6 +1145,15 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) {
+ debugfs_create_file(hdmi_debugfs_entries[i].name,
+ 0644, dir, connector,
+ hdmi_debugfs_entries[i].fops);
+ }
+ }
+#endif
}
/*
@@ -1167,8 +1228,9 @@ static int current_backlight_read(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct dc *dc = adev->dm.dc;
- unsigned int backlight = dc_get_current_backlight_pwm(dc);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ unsigned int backlight = dc_link_get_backlight_level(dm->backlight_link);
seq_printf(m, "0x%x\n", backlight);
return 0;
@@ -1184,8 +1246,9 @@ static int target_backlight_read(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct dc *dc = adev->dm.dc;
- unsigned int backlight = dc_get_target_backlight_pwm(dc);
+ struct amdgpu_display_manager *dm = &adev->dm;
+
+ unsigned int backlight = dc_link_get_target_backlight_pwm(dm->backlight_link);
seq_printf(m, "0x%x\n", backlight);
return 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 78e1c11d4ae5..dcf84a61de37 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -398,15 +398,15 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
- memset(display, 0, sizeof(*display));
- memset(link, 0, sizeof(*link));
-
- display->index = aconnector->base.index;
-
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
return;
}
+
+ memset(display, 0, sizeof(*display));
+ memset(link, 0, sizeof(*link));
+
+ display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
if (aconnector->dc_sink != NULL)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index c20fb08c450b..b086d5c906e0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to find connector for link!");
+ DC_LOG_DC("Failed to find connector for link!\n");
return false;
}
@@ -554,6 +554,7 @@ enum dc_edid_status dm_helpers_read_local_edid(
struct dc_sink *sink)
{
struct amdgpu_dm_connector *aconnector = link->priv;
+ struct drm_connector *connector = &aconnector->base;
struct i2c_adapter *ddc;
int retry = 3;
enum dc_edid_status edid_status;
@@ -571,6 +572,15 @@ enum dc_edid_status dm_helpers_read_local_edid(
edid = drm_get_edid(&aconnector->base, ddc);
+ /* DP Compliance Test 4.2.2.6 */
+ if (link->aux_mode && connector->edid_corrupt)
+ drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum);
+
+ if (!edid && connector->edid_corrupt) {
+ connector->edid_corrupt = false;
+ return EDID_BAD_CHECKSUM;
+ }
+
if (!edid)
return EDID_NO_RESPONSE;
@@ -605,34 +615,10 @@ enum dc_edid_status dm_helpers_read_local_edid(
DRM_ERROR("EDID err: %d, on connector: %s",
edid_status,
aconnector->base.name);
- if (link->aux_mode) {
- union test_request test_request = { {0} };
- union test_response test_response = { {0} };
-
- dm_helpers_dp_read_dpcd(ctx,
- link,
- DP_TEST_REQUEST,
- &test_request.raw,
- sizeof(union test_request));
-
- if (!test_request.bits.EDID_READ)
- return edid_status;
- test_response.bits.EDID_CHECKSUM_WRITE = 1;
-
- dm_helpers_dp_write_dpcd(ctx,
- link,
- DP_TEST_EDID_CHECKSUM,
- &sink->dc_edid.raw_edid[sink->dc_edid.length-1],
- 1);
-
- dm_helpers_dp_write_dpcd(ctx,
- link,
- DP_TEST_RESPONSE,
- &test_response.raw,
- sizeof(test_response));
-
- }
+ /* DP Compliance Test 4.2.2.3 */
+ if (link->aux_mode)
+ drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, sink->dc_edid.raw_edid[sink->dc_edid.length-1]);
return edid_status;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index fabbe78d5aef..ae0a7ef1d595 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -41,53 +41,10 @@
#include "amdgpu_dm_debugfs.h"
#endif
-
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dc/dcn20/dcn20_resource.h"
#endif
-/* #define TRACE_DPCD */
-
-#ifdef TRACE_DPCD
-#define SIDE_BAND_MSG(address) (address >= DP_SIDEBAND_MSG_DOWN_REQ_BASE && address < DP_SINK_COUNT_ESI)
-
-static inline char *side_band_msg_type_to_str(uint32_t address)
-{
- static char str[10] = {0};
-
- if (address < DP_SIDEBAND_MSG_UP_REP_BASE)
- strcpy(str, "DOWN_REQ");
- else if (address < DP_SIDEBAND_MSG_DOWN_REP_BASE)
- strcpy(str, "UP_REP");
- else if (address < DP_SIDEBAND_MSG_UP_REQ_BASE)
- strcpy(str, "DOWN_REP");
- else
- strcpy(str, "UP_REQ");
-
- return str;
-}
-
-static void log_dpcd(uint8_t type,
- uint32_t address,
- uint8_t *data,
- uint32_t size,
- bool res)
-{
- DRM_DEBUG_KMS("Op: %s, addr: %04x, SideBand Msg: %s, Op res: %s\n",
- (type == DP_AUX_NATIVE_READ) ||
- (type == DP_AUX_I2C_READ) ?
- "Read" : "Write",
- address,
- SIDE_BAND_MSG(address) ?
- side_band_msg_type_to_str(address) : "Nop",
- res ? "OK" : "Fail");
-
- if (res) {
- print_hex_dump(KERN_INFO, "Body: ", DUMP_PREFIX_NONE, 16, 1, data, size, false);
- }
-}
-#endif
-
static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
@@ -136,17 +93,23 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
static void
dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
- struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
- struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
+ struct amdgpu_dm_connector *aconnector =
+ to_amdgpu_dm_connector(connector);
+ struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
- kfree(amdgpu_dm_connector->edid);
- amdgpu_dm_connector->edid = NULL;
+ if (aconnector->dc_sink) {
+ dc_link_remove_remote_sink(aconnector->dc_link,
+ aconnector->dc_sink);
+ dc_sink_release(aconnector->dc_sink);
+ }
+
+ kfree(aconnector->edid);
drm_encoder_cleanup(&amdgpu_encoder->base);
kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
- drm_dp_mst_put_port_malloc(amdgpu_dm_connector->port);
- kfree(amdgpu_dm_connector);
+ drm_dp_mst_put_port_malloc(aconnector->port);
+ kfree(aconnector);
}
static int
@@ -156,16 +119,16 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
to_amdgpu_dm_connector(connector);
int r;
- amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
- r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
- if (r)
+ r = drm_dp_mst_connector_late_register(connector,
+ amdgpu_dm_connector->port);
+ if (r < 0)
return r;
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
- return r;
+ return 0;
}
static void
@@ -435,46 +398,22 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
*/
amdgpu_dm_connector_funcs_reset(connector);
- DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
drm_dp_mst_get_port_malloc(port);
- DRM_DEBUG_KMS(":%d\n", connector->base.id);
-
return connector;
}
-static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-
- DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- if (aconnector->dc_sink) {
- amdgpu_dm_update_freesync_caps(connector, NULL);
- dc_link_remove_remote_sink(aconnector->dc_link,
- aconnector->dc_sink);
- dc_sink_release(aconnector->dc_sink);
- aconnector->dc_sink = NULL;
- aconnector->dc_link->cur_link_settings.lane_count = 0;
- }
-
- drm_connector_unregister(connector);
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
- .destroy_connector = dm_dp_destroy_mst_connector,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector)
+ struct amdgpu_dm_connector *aconnector,
+ int link_index)
{
- aconnector->dm_dp_aux.aux.name = "dmdc";
+ aconnector->dm_dp_aux.aux.name =
+ kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
+ link_index);
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index d6813ce67bbd..d2c56579a2cc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -32,7 +32,8 @@ struct amdgpu_dm_connector;
int dm_mst_get_pbn_divider(struct dc_link *link);
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
- struct amdgpu_dm_connector *aconnector);
+ struct amdgpu_dm_connector *aconnector,
+ int link_index);
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
diff --git a/drivers/gpu/drm/amd/display/dc/basics/Makefile b/drivers/gpu/drm/amd/display/dc/basics/Makefile
index 7ad0cad0f4ef..01b99e0d788e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/basics/Makefile
@@ -24,8 +24,7 @@
# It provides the general basic services required by other DAL
# subcomponents.
-BASICS = conversion.o fixpt31_32.o \
- log_helpers.o vector.o dc_common.o
+BASICS = conversion.o fixpt31_32.o vector.o dc_common.o
AMD_DAL_BASICS = $(addprefix $(AMDDALPATH)/dc/basics/,$(BASICS))
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 8edc2506d49e..bed91572f82a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -113,13 +113,19 @@ static void encoder_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_encoder_stream_setup_parameters_v1_5 *dig)
{
- struct dmub_rb_cmd_digx_encoder_control encoder_control = { 0 };
+ union dmub_rb_cmd cmd;
- encoder_control.header.type = DMUB_CMD__VBIOS;
- encoder_control.header.sub_type = DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
- encoder_control.encoder_control.dig.stream_param = *dig;
+ memset(&cmd, 0, sizeof(cmd));
- dc_dmub_srv_cmd_queue(dmcub, &encoder_control.header);
+ cmd.digx_encoder_control.header.type = DMUB_CMD__VBIOS;
+ cmd.digx_encoder_control.header.sub_type =
+ DMUB_CMD__VBIOS_DIGX_ENCODER_CONTROL;
+ cmd.digx_encoder_control.header.payload_bytes =
+ sizeof(cmd.digx_encoder_control) -
+ sizeof(cmd.digx_encoder_control.header);
+ cmd.digx_encoder_control.encoder_control.dig.stream_param = *dig;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -238,14 +244,19 @@ static void transmitter_control_dmcub(
struct dc_dmub_srv *dmcub,
struct dig_transmitter_control_parameters_v1_6 *dig)
{
- struct dmub_rb_cmd_dig1_transmitter_control transmitter_control;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
- transmitter_control.header.type = DMUB_CMD__VBIOS;
- transmitter_control.header.sub_type =
+ cmd.dig1_transmitter_control.header.type = DMUB_CMD__VBIOS;
+ cmd.dig1_transmitter_control.header.sub_type =
DMUB_CMD__VBIOS_DIG1_TRANSMITTER_CONTROL;
- transmitter_control.transmitter_control.dig = *dig;
+ cmd.dig1_transmitter_control.header.payload_bytes =
+ sizeof(cmd.dig1_transmitter_control) -
+ sizeof(cmd.dig1_transmitter_control.header);
+ cmd.dig1_transmitter_control.transmitter_control.dig = *dig;
- dc_dmub_srv_cmd_queue(dmcub, &transmitter_control.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -339,13 +350,18 @@ static void set_pixel_clock_dmcub(
struct dc_dmub_srv *dmcub,
struct set_pixel_clock_parameter_v1_7 *clk)
{
- struct dmub_rb_cmd_set_pixel_clock pixel_clock = { 0 };
+ union dmub_rb_cmd cmd;
- pixel_clock.header.type = DMUB_CMD__VBIOS;
- pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
- pixel_clock.pixel_clock.clk = *clk;
+ memset(&cmd, 0, sizeof(cmd));
- dc_dmub_srv_cmd_queue(dmcub, &pixel_clock.header);
+ cmd.set_pixel_clock.header.type = DMUB_CMD__VBIOS;
+ cmd.set_pixel_clock.header.sub_type = DMUB_CMD__VBIOS_SET_PIXEL_CLOCK;
+ cmd.set_pixel_clock.header.payload_bytes =
+ sizeof(cmd.set_pixel_clock) -
+ sizeof(cmd.set_pixel_clock.header);
+ cmd.set_pixel_clock.pixel_clock.clk = *clk;
+
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
@@ -705,13 +721,19 @@ static void enable_disp_power_gating_dmcub(
struct dc_dmub_srv *dmcub,
struct enable_disp_power_gating_parameters_v2_1 *pwr)
{
- struct dmub_rb_cmd_enable_disp_power_gating power_gating;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
- power_gating.header.type = DMUB_CMD__VBIOS;
- power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
- power_gating.power_gating.pwr = *pwr;
+ cmd.enable_disp_power_gating.header.type = DMUB_CMD__VBIOS;
+ cmd.enable_disp_power_gating.header.sub_type =
+ DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
+ cmd.enable_disp_power_gating.header.payload_bytes =
+ sizeof(cmd.enable_disp_power_gating) -
+ sizeof(cmd.enable_disp_power_gating.header);
+ cmd.enable_disp_power_gating.power_gating.pwr = *pwr;
- dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
}
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 3960a8db94cb..1e5a92b192a1 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -690,6 +690,26 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
struct dc_debug_options *dbg,
struct dc_state *context)
{
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16)) {
+ hack_disable_optional_pipe_split(v);
+ return;
+ }
+ }
+
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
hack_disable_optional_pipe_split(v);
@@ -702,7 +722,6 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz);
}
-
unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
/* for low power RV2 variants, the highest voltage level we want is 0 */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 8ec2dfe45d40..a5c2114e4292 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -90,7 +90,7 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
dc->hwss.exit_optimized_pwr_state(dc, dc->current_state);
if (edp_link) {
- clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active;
+ clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
dc_link_set_psr_allow_active(edp_link, false, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
index 26db1c5d4e4d..b210f8e9d592 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c
@@ -131,7 +131,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base)
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
int dprefclk_wdivider;
int dprefclk_src_sel;
- int dp_ref_clk_khz = 600000;
+ int dp_ref_clk_khz;
int target_div;
/* ASSERT DP Reference Clock source is from DFS*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
index 97b7f32294fd..c320b7af7d34 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr_vbios_smu.c
@@ -97,9 +97,6 @@ int rv1_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_di
VBIOSSMC_MSG_SetDispclkFreq,
requested_dispclk_khz / 1000);
- /* Actual dispclk set is returned in the parameter register */
- actual_dispclk_set_mhz = REG_READ(MP1_SMN_C2PMSG_83) * 1000;
-
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) {
if (clk_mgr->dfs_bypass_disp_clk != actual_dispclk_set_mhz)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8489f1e56892..6f93a6ca4cf0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -66,6 +66,8 @@
#include "dce/dce_i2c.h"
+#include "dmub/dmub_srv.h"
+
#define CTX \
dc->ctx
@@ -348,7 +350,7 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream == stream)
+ if (pipe->stream == stream && !pipe->top_pipe && !pipe->prev_odm_pipe)
break;
}
/* Stream not found */
@@ -365,6 +367,9 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
param.windowb_x_end = pipe->stream->timing.h_addressable;
param.windowb_y_end = pipe->stream->timing.v_addressable;
+ param.dsc_mode = pipe->stream->timing.flags.DSC ? 1:0;
+ param.odm_mode = pipe->next_odm_pipe ? 1:0;
+
/* Default to the union of both windows */
param.selection = UNION_WINDOW_A_B;
param.continuous_mode = continuous;
@@ -834,11 +839,10 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
- int count = 0;
- struct pipe_ctx *pipe;
PERF_TRACE();
for (i = 0; i < MAX_PIPES; i++) {
- pipe = &context->res_ctx.pipe_ctx[i];
+ int count = 0;
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
if (!pipe->plane_state)
continue;
@@ -1012,9 +1016,17 @@ static void program_timing_sync(
}
}
- /* set first pipe with plane as master */
+ /* set first unblanked pipe as master */
for (j = 0; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
if (j == 0)
break;
@@ -1035,9 +1047,17 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other pipes with plane as they have already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
for (j = j + 1; j < group_size; j++) {
- if (pipe_set[j]->plane_state) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
group_size--;
pipe_set[j] = pipe_set[group_size];
j--;
@@ -2205,7 +2225,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (should_program_abm) {
if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) {
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
} else {
pipe_ctx->stream_res.abm->funcs->set_abm_level(
pipe_ctx->stream_res.abm, stream->abm_level);
@@ -2518,6 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
+
commit_planes_for_stream(
dc,
srf_updates,
@@ -2641,33 +2667,12 @@ void dc_set_power_state(
void dc_resume(struct dc *dc)
{
-
uint32_t i;
for (i = 0; i < dc->link_count; i++)
core_link_resume(dc->links[i]);
}
-unsigned int dc_get_current_backlight_pwm(struct dc *dc)
-{
- struct abm *abm = dc->res_pool->abm;
-
- if (abm)
- return abm->funcs->get_current_backlight(abm);
-
- return 0;
-}
-
-unsigned int dc_get_target_backlight_pwm(struct dc *dc)
-{
- struct abm *abm = dc->res_pool->abm;
-
- if (abm)
- return abm->funcs->get_target_backlight(abm);
-
- return 0;
-}
-
bool dc_is_dmcu_initialized(struct dc *dc)
{
struct dmcu *dmcu = dc->res_pool->dmcu;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 67cfff1586e9..48ab51533d5d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -26,7 +26,7 @@
#include <linux/slab.h>
#include "dm_services.h"
-#include "atom.h"
+#include "atomfirmware.h"
#include "dm_helpers.h"
#include "dc.h"
#include "grph_object_id.h"
@@ -46,10 +46,11 @@
#include "dmcu.h"
#include "hw/clk_mgr.h"
#include "dce/dmub_psr.h"
+#include "dmub/dmub_srv.h"
+#include "inc/hw/panel_cntl.h"
#define DC_LOGGER_INIT(logger)
-
#define LINK_INFO(...) \
DC_LOG_HW_HOTPLUG( \
__VA_ARGS__)
@@ -64,11 +65,11 @@
enum {
PEAK_FACTOR_X1000 = 1006,
/*
- * Some receivers fail to train on first try and are good
- * on subsequent tries. 2 retries should be plenty. If we
- * don't have a successful training then we don't expect to
- * ever get one.
- */
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
@@ -79,7 +80,7 @@ static void dc_link_destruct(struct dc_link *link)
{
int i;
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -87,7 +88,10 @@ static void dc_link_destruct(struct dc_link *link)
if (link->ddc)
dal_ddc_service_destroy(&link->ddc);
- if(link->link_enc)
+ if (link->panel_cntl)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+
+ if (link->link_enc)
link->link_enc->funcs->destroy(&link->link_enc);
if (link->local_sink)
@@ -98,8 +102,8 @@ static void dc_link_destruct(struct dc_link *link)
}
struct gpio *get_hpd_gpio(struct dc_bios *dcb,
- struct graphics_object_id link_id,
- struct gpio_service *gpio_service)
+ struct graphics_object_id link_id,
+ struct gpio_service *gpio_service)
{
enum bp_result bp_result;
struct graphics_object_hpd_info hpd_info;
@@ -116,10 +120,9 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
return NULL;
}
- return dal_gpio_service_create_irq(
- gpio_service,
- pin_info.offset,
- pin_info.mask);
+ return dal_gpio_service_create_irq(gpio_service,
+ pin_info.offset,
+ pin_info.mask);
}
/*
@@ -134,13 +137,10 @@ struct gpio *get_hpd_gpio(struct dc_bios *dcb,
* @return
* true on success, false otherwise
*/
-static bool program_hpd_filter(
- const struct dc_link *link)
+static bool program_hpd_filter(const struct dc_link *link)
{
bool result = false;
-
struct gpio *hpd;
-
int delay_on_connect_in_ms = 0;
int delay_on_disconnect_in_ms = 0;
@@ -159,10 +159,10 @@ static bool program_hpd_filter(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* Program hpd filter to allow DP signal to settle */
/* 500: not able to detect MST <-> SST switch as HPD is low for
- * only 100ms on DELL U2413
- * 0: some passive dongle still show aux mode instead of i2c
- * 20-50:not enough to hide bouncing HPD with passive dongle.
- * also see intermittent i2c read issues.
+ * only 100ms on DELL U2413
+ * 0: some passive dongle still show aux mode instead of i2c
+ * 20-50: not enough to hide bouncing HPD with passive dongle.
+ * also see intermittent i2c read issues.
*/
delay_on_connect_in_ms = 80;
delay_on_disconnect_in_ms = 0;
@@ -175,7 +175,8 @@ static bool program_hpd_filter(
}
/* Obtain HPD handle */
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
if (!hpd)
return result;
@@ -226,8 +227,9 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
}
/* todo: may need to lock gpio access */
- hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
- if (hpd_pin == NULL)
+ hpd_pin = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (!hpd_pin)
goto hpd_gpio_failure;
dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT);
@@ -248,8 +250,7 @@ hpd_gpio_failure:
return false;
}
-static enum ddc_transaction_type get_ddc_transaction_type(
- enum signal_type sink_signal)
+static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal)
{
enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE;
@@ -270,7 +271,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
case SIGNAL_TYPE_DISPLAY_PORT_MST:
/* MST does not use I2COverAux, but there is the
* SPECIAL use case for "immediate dwnstrm device
- * access" (EPR#370830). */
+ * access" (EPR#370830).
+ */
transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
break;
@@ -281,9 +283,8 @@ static enum ddc_transaction_type get_ddc_transaction_type(
return transaction_type;
}
-static enum signal_type get_basic_signal_type(
- struct graphics_object_id encoder,
- struct graphics_object_id downstream)
+static enum signal_type get_basic_signal_type(struct graphics_object_id encoder,
+ struct graphics_object_id downstream)
{
if (downstream.type == OBJECT_TYPE_CONNECTOR) {
switch (downstream.id) {
@@ -369,10 +370,11 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
/* Open GPIO and set it to I2C mode */
/* Note: this GpioMode_Input will be converted
* to GpioConfigType_I2cAuxDualMode in GPIO component,
- * which indicates we need additional delay */
+ * which indicates we need additional delay
+ */
- if (GPIO_RESULT_OK != dal_ddc_open(
- ddc, GPIO_MODE_INPUT, GPIO_DDC_CONFIG_TYPE_MODE_I2C)) {
+ if (dal_ddc_open(ddc, GPIO_MODE_INPUT,
+ GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) {
dal_ddc_close(ddc);
return present;
@@ -406,25 +408,25 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
* @brief
* Detect output sink type
*/
-static enum signal_type link_detect_sink(
- struct dc_link *link,
- enum dc_detect_reason reason)
+static enum signal_type link_detect_sink(struct dc_link *link,
+ enum dc_detect_reason reason)
{
- enum signal_type result = get_basic_signal_type(
- link->link_enc->id, link->link_id);
+ enum signal_type result = get_basic_signal_type(link->link_enc->id,
+ link->link_id);
/* Internal digital encoder will detect only dongles
- * that require digital signal */
+ * that require digital signal
+ */
/* Detection mechanism is different
* for different native connectors.
* LVDS connector supports only LVDS signal;
* PCIE is a bus slot, the actual connector needs to be detected first;
* eDP connector supports only eDP signal;
- * HDMI should check straps for audio */
+ * HDMI should check straps for audio
+ */
/* PCIE detects the actual connector on add-on board */
-
if (link->link_id.id == CONNECTOR_ID_PCIE) {
/* ZAZTODO implement PCIE add-on card detection */
}
@@ -432,8 +434,10 @@ static enum signal_type link_detect_sink(
switch (link->link_id.id) {
case CONNECTOR_ID_HDMI_TYPE_A: {
/* check audio support:
- * if native HDMI is not supported, switch to DVI */
- struct audio_support *aud_support = &link->dc->res_pool->audio_support;
+ * if native HDMI is not supported, switch to DVI
+ */
+ struct audio_support *aud_support =
+ &link->dc->res_pool->audio_support;
if (!aud_support->hdmi_audio_native)
if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A)
@@ -461,16 +465,15 @@ static enum signal_type link_detect_sink(
return result;
}
-static enum signal_type decide_signal_from_strap_and_dongle_type(
- enum display_dongle_type dongle_type,
- struct audio_support *audio_support)
+static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type,
+ struct audio_support *audio_support)
{
enum signal_type signal = SIGNAL_TYPE_NONE;
switch (dongle_type) {
case DISPLAY_DONGLE_DP_HDMI_DONGLE:
if (audio_support->hdmi_audio_on_dongle)
- signal = SIGNAL_TYPE_HDMI_TYPE_A;
+ signal = SIGNAL_TYPE_HDMI_TYPE_A;
else
signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
break;
@@ -491,16 +494,14 @@ static enum signal_type decide_signal_from_strap_and_dongle_type(
return signal;
}
-static enum signal_type dp_passive_dongle_detection(
- struct ddc_service *ddc,
- struct display_sink_capability *sink_cap,
- struct audio_support *audio_support)
+static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc,
+ struct display_sink_capability *sink_cap,
+ struct audio_support *audio_support)
{
- dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
- ddc, sink_cap);
- return decide_signal_from_strap_and_dongle_type(
- sink_cap->dongle_type,
- audio_support);
+ dal_ddc_service_i2c_query_dp_dual_mode_adaptor(ddc, sink_cap);
+
+ return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type,
+ audio_support);
}
static void link_disconnect_sink(struct dc_link *link)
@@ -519,6 +520,96 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
link->local_sink = prev_sink;
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+bool dc_link_is_hdcp14(struct dc_link *link)
+{
+ bool ret = false;
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable,
+ * we can poll for bksv but some displays have an issue with this. Since its so rare
+ * for a display to not be 1.4 capable, this assumtion is ok
+ */
+ ret = true;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+bool dc_link_is_hdcp22(struct dc_link *link)
+{
+ bool ret = false;
+
+ switch (link->connector_signal) {
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE &&
+ link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable &&
+ (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0;
+ break;
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
+{
+ struct hdcp_protection_message msg22;
+ struct hdcp_protection_message msg14;
+
+ memset(&msg22, 0, sizeof(struct hdcp_protection_message));
+ memset(&msg14, 0, sizeof(struct hdcp_protection_message));
+ memset(link->hdcp_caps.rx_caps.raw, 0,
+ sizeof(link->hdcp_caps.rx_caps.raw));
+
+ if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->ddc->transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) ||
+ link->connector_signal == SIGNAL_TYPE_EDP) {
+ msg22.data = link->hdcp_caps.rx_caps.raw;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.raw);
+ msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS;
+ } else {
+ msg22.data = &link->hdcp_caps.rx_caps.fields.version;
+ msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version);
+ msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION;
+ }
+ msg22.version = HDCP_VERSION_22;
+ msg22.link = HDCP_LINK_PRIMARY;
+ msg22.max_retries = 5;
+ dc_process_hdcp_msg(signal, link, &msg22);
+
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ enum hdcp_message_status status = HDCP_MESSAGE_UNSUPPORTED;
+
+ msg14.data = &link->hdcp_caps.bcaps.raw;
+ msg14.length = sizeof(link->hdcp_caps.bcaps.raw);
+ msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS;
+ msg14.version = HDCP_VERSION_14;
+ msg14.link = HDCP_LINK_PRIMARY;
+ msg14.max_retries = 5;
+
+ status = dc_process_hdcp_msg(signal, link, &msg14);
+ }
+
+}
+#endif
static void read_current_link_settings_on_detect(struct dc_link *link)
{
@@ -532,18 +623,18 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
- status = core_link_read_dpcd(
- link,
- DP_LANE_COUNT_SET,
- &lane_count_set.raw,
- sizeof(lane_count_set));
+ status = core_link_read_dpcd(link,
+ DP_LANE_COUNT_SET,
+ &lane_count_set.raw,
+ sizeof(lane_count_set));
/* First DPCD read after VDD ON can fail if the particular board
* does not have HPD pin wired correctly. So if DPCD read fails,
* which it should never happen, retry a few times. Target worst
* case scenario of 80 ms.
*/
if (status == DC_OK) {
- link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+ link->cur_link_settings.lane_count =
+ lane_count_set.bits.LANE_COUNT_SET;
break;
}
@@ -552,7 +643,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
// Read DPCD 00100h to find if standard link rates are set
core_link_read_dpcd(link, DP_LINK_BW_SET,
- &link_bw_set, sizeof(link_bw_set));
+ &link_bw_set, sizeof(link_bw_set));
if (link_bw_set == 0) {
if (link->connector_signal == SIGNAL_TYPE_EDP) {
@@ -560,12 +651,12 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
* Read DPCD 00115h to find the edp link rate set used
*/
core_link_read_dpcd(link, DP_LINK_RATE_SET,
- &link_rate_set, sizeof(link_rate_set));
+ &link_rate_set, sizeof(link_rate_set));
// edp_supported_link_rates_count = 0 for DP
if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
link->cur_link_settings.link_rate =
- link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
link->cur_link_settings.link_rate_set = link_rate_set;
link->cur_link_settings.use_link_rate_set = true;
}
@@ -579,7 +670,7 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
}
// Read DPCD 00003h to find the max down spread.
core_link_read_dpcd(link, DP_MAX_DOWNSPREAD,
- &max_down_spread.raw, sizeof(max_down_spread));
+ &max_down_spread.raw, sizeof(max_down_spread));
link->cur_link_settings.link_spread =
max_down_spread.bits.MAX_DOWN_SPREAD ?
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
@@ -612,6 +703,12 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ /* In case of fallback to SST when topology discovery below fails
+ * HDCP caps will be querried again later by the upper layer (caller
+ * of this function). */
+ query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link);
+#endif
/*
* This call will initiate MST topology discovery. Which
* will detect MST ports and add new DRM connector DRM
@@ -683,12 +780,12 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid)
if (new_edid->length == 0)
return false;
- return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0);
+ return (memcmp(old_edid->raw_edid,
+ new_edid->raw_edid, new_edid->length) == 0);
}
-static bool wait_for_alt_mode(struct dc_link *link)
+static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
{
-
/**
* something is terribly wrong if time out is > 200ms. (5Hz)
* 500 microseconds * 400 tries us 200 ms
@@ -703,7 +800,7 @@ static bool wait_for_alt_mode(struct dc_link *link)
DC_LOGGER_INIT(link->ctx->logger);
- if (link->link_enc->funcs->is_in_alt_mode == NULL)
+ if (!link->link_enc->funcs->is_in_alt_mode)
return true;
is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc);
@@ -718,21 +815,21 @@ static bool wait_for_alt_mode(struct dc_link *link)
udelay(sleep_time_in_microseconds);
/* ask the link if alt mode is enabled, if so return ok */
if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) {
-
finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(
- link->ctx, finish_timestamp, enter_timestamp);
+ time_taken_in_ns =
+ dm_get_elapse_time_in_ns(link->ctx,
+ finish_timestamp,
+ enter_timestamp);
DC_LOG_WARNING("Alt mode entered finished after %llu ms\n",
div_u64(time_taken_in_ns, 1000000));
return true;
}
-
}
finish_timestamp = dm_get_timestamp(link->ctx);
time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp,
enter_timestamp);
DC_LOG_WARNING("Alt mode has timed out after %llu ms\n",
- div_u64(time_taken_in_ns, 1000000));
+ div_u64(time_taken_in_ns, 1000000));
return false;
}
@@ -768,30 +865,30 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false;
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
- link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink) {
-
+ link->connector_signal == SIGNAL_TYPE_EDP) &&
+ link->local_sink) {
// need to re-write OUI and brightness in resume case
if (link->connector_signal == SIGNAL_TYPE_EDP) {
dpcd_set_source_specific_data(link);
- dc_link_set_default_brightness_aux(link); //TODO: use cached
+ dc_link_set_default_brightness_aux(link);
+ //TODO: use cached
}
return true;
}
- if (false == dc_link_detect_sink(link, &new_connection_type)) {
+ if (!dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
return false;
}
prev_sink = link->local_sink;
- if (prev_sink != NULL) {
+ if (prev_sink) {
dc_sink_retain(prev_sink);
memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps));
}
- link_disconnect_sink(link);
+ link_disconnect_sink(link);
if (new_connection_type != dc_connection_none) {
link->type = new_connection_type;
link->link_state_valid = false;
@@ -838,35 +935,31 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_DISPLAY_PORT: {
-
/* wa HPD high coming too early*/
if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) {
-
/* if alt mode times out, return false */
- if (wait_for_alt_mode(link) == false) {
+ if (!wait_for_entering_dp_alt_mode(link))
return false;
- }
}
- if (!detect_dp(
- link,
- &sink_caps,
- &converter_disable_audio,
- aud_support, reason)) {
- if (prev_sink != NULL)
+ if (!detect_dp(link, &sink_caps,
+ &converter_disable_audio,
+ aud_support, reason)) {
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
// Check if dpcp block is the same
- if (prev_sink != NULL) {
- if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps)))
+ if (prev_sink) {
+ if (memcmp(&link->dpcd_caps, &prev_dpcd_caps,
+ sizeof(struct dpcd_caps)))
same_dpcd = false;
}
/* Active dongle downstream unplug*/
if (link->type == dc_connection_active_dongle &&
- link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
- if (prev_sink != NULL)
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
+ if (prev_sink)
/* Downstream unplug */
dc_sink_release(prev_sink);
return true;
@@ -874,7 +967,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n",
- link->link_index);
+ link->link_index);
/* Need to setup mst link_cap struct here
* otherwise dc_link_detect() will leave mst link_cap
* empty which leads to allocate_mst_payload() has "0"
@@ -882,15 +975,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
*/
dp_verify_mst_link_cap(link);
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
// For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified.
if (reason == DETECT_REASON_BOOT &&
- dc_ctx->dc->config.power_down_display_on_boot == false &&
- link->link_status.link_active == true)
+ !dc_ctx->dc->config.power_down_display_on_boot &&
+ link->link_status.link_active)
perform_dp_seamless_boot = true;
if (perform_dp_seamless_boot) {
@@ -903,24 +996,23 @@ static bool dc_link_detect_helper(struct dc_link *link,
default:
DC_ERROR("Invalid connector type! signal:%d\n",
- link->connector_signal);
- if (prev_sink != NULL)
+ link->connector_signal);
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
} /* switch() */
if (link->dpcd_caps.sink_count.bits.SINK_COUNT)
- link->dpcd_sink_count = link->dpcd_caps.sink_count.
- bits.SINK_COUNT;
+ link->dpcd_sink_count =
+ link->dpcd_caps.sink_count.bits.SINK_COUNT;
else
link->dpcd_sink_count = 1;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps.transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps.transaction_type);
- link->aux_mode = dal_ddc_service_is_in_aux_transaction_mode(
- link->ddc);
+ link->aux_mode =
+ dal_ddc_service_is_in_aux_transaction_mode(link->ddc);
sink_init_data.link = link;
sink_init_data.sink_signal = sink_caps.signal;
@@ -928,7 +1020,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
}
@@ -939,10 +1031,8 @@ static bool dc_link_detect_helper(struct dc_link *link,
/* dc_sink_create returns a new reference */
link->local_sink = sink;
- edid_status = dm_helpers_read_local_edid(
- link->ctx,
- link,
- sink);
+ edid_status = dm_helpers_read_local_edid(link->ctx,
+ link, sink);
switch (edid_status) {
case EDID_BAD_CHECKSUM:
@@ -950,7 +1040,6 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
-
/*
* Abort detection for non-DP connectors if we have
* no EDID
@@ -961,7 +1050,7 @@ static bool dc_link_detect_helper(struct dc_link *link,
*/
if (dc_is_hdmi_signal(link->connector_signal) ||
dc_is_dvi_signal(link->connector_signal)) {
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return false;
@@ -974,45 +1063,53 @@ static bool dc_link_detect_helper(struct dc_link *link,
link->ctx->dc->debug.disable_fec = true;
// Check if edid is the same
- if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
- same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if ((prev_sink) &&
+ (edid_status == EDID_THE_SAME || edid_status == EDID_OK))
+ same_edid = is_same_edid(&prev_sink->dc_edid,
+ &sink->dc_edid);
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
+ sink_caps.transaction_type ==
+ DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
* TODO debug why Dell 2413 doesn't like
* two link trainings
*/
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
// verify link cap for SST non-seamless boot
if (!perform_dp_seamless_boot)
dp_verify_link_cap_with_retries(link,
- &link->reported_link_cap,
- LINK_TRAINING_MAX_VERIFY_RETRY);
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
} else {
// If edid is the same, then discard new sink and revert back to original sink
if (same_edid) {
link_disconnect_remap(prev_sink, link);
sink = prev_sink;
prev_sink = NULL;
-
}
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ query_hdcp_capability(sink->sink_signal, link);
+#endif
}
/* HDMI-DVI Dongle */
if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
+ !sink->edid_caps.edid_hdmi)
sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
- &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
- DC_EDID_BLOCK_SIZE,
- "%s: [Block %d] ", sink->edid_caps.display_name, i);
+ &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE],
+ DC_EDID_BLOCK_SIZE,
+ "%s: [Block %d] ", sink->edid_caps.display_name, i);
}
DC_LOG_DETECTION_EDID_PARSER("%s: "
@@ -1047,17 +1144,18 @@ static bool dc_link_detect_helper(struct dc_link *link,
sink->edid_caps.audio_modes[i].sample_rate,
sink->edid_caps.audio_modes[i].sample_size);
}
-
} else {
/* From Connected-to-Disconnected. */
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Disconnected\n",
- link->link_index);
+ link->link_index);
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0;
- memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations));
+ memset(link->mst_stream_alloc_table.stream_allocations,
+ 0,
+ sizeof(link->mst_stream_alloc_table.stream_allocations));
}
link->type = dc_connection_none;
@@ -1071,16 +1169,15 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p dpcd same=%d edid same=%d\n",
- link->link_index, sink,
- (sink_caps.signal == SIGNAL_TYPE_NONE ?
- "Disconnected":"Connected"), prev_sink,
- same_dpcd, same_edid);
+ link->link_index, sink,
+ (sink_caps.signal ==
+ SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"),
+ prev_sink, same_dpcd, same_edid);
- if (prev_sink != NULL)
+ if (prev_sink)
dc_sink_release(prev_sink);
return true;
-
}
bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
@@ -1110,13 +1207,13 @@ bool dc_link_get_hpd_state(struct dc_link *dc_link)
return state;
}
-static enum hpd_source_id get_hpd_line(
- struct dc_link *link)
+static enum hpd_source_id get_hpd_line(struct dc_link *link)
{
struct gpio *hpd;
enum hpd_source_id hpd_id = HPD_SOURCEID_UNKNOWN;
- hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
+ hpd = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
if (hpd) {
switch (dal_irq_get_source(hpd)) {
@@ -1191,8 +1288,7 @@ static enum channel_id get_ddc_line(struct dc_link *link)
return channel;
}
-static enum transmitter translate_encoder_to_transmitter(
- struct graphics_object_id encoder)
+static enum transmitter translate_encoder_to_transmitter(struct graphics_object_id encoder)
{
switch (encoder.id) {
case ENCODER_ID_INTERNAL_UNIPHY:
@@ -1256,17 +1352,18 @@ static enum transmitter translate_encoder_to_transmitter(
}
}
-static bool dc_link_construct(
- struct dc_link *link,
- const struct link_init_data *init_params)
+static bool dc_link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
{
uint8_t i;
struct ddc_service_init_data ddc_service_init_data = { { 0 } };
struct dc_context *dc_ctx = init_params->ctx;
struct encoder_init_data enc_init_data = { 0 };
+ struct panel_cntl_init_data panel_cntl_init_data = { 0 };
struct integrated_info info = {{{ 0 }}};
struct dc_bios *bios = init_params->dc->ctx->dc_bios;
const struct dc_vbios_funcs *bp_funcs = bios->funcs;
+
DC_LOGGER_INIT(dc_ctx->logger);
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
@@ -1278,23 +1375,27 @@ static bool dc_link_construct(
link->ctx = dc_ctx;
link->link_index = init_params->link_index;
- memset(&link->preferred_training_settings, 0, sizeof(struct dc_link_training_overrides));
- memset(&link->preferred_link_setting, 0, sizeof(struct dc_link_settings));
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
- link->link_id = bios->funcs->get_connector_id(bios, init_params->connector_index);
+ link->link_id =
+ bios->funcs->get_connector_id(bios, init_params->connector_index);
if (link->link_id.type != OBJECT_TYPE_CONNECTOR) {
dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n",
- __func__, init_params->connector_index,
- link->link_id.type, OBJECT_TYPE_CONNECTOR);
+ __func__, init_params->connector_index,
+ link->link_id.type, OBJECT_TYPE_CONNECTOR);
goto create_fail;
}
if (link->dc->res_pool->funcs->link_init)
link->dc->res_pool->funcs->link_init(link);
- link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
- if (link->hpd_gpio != NULL) {
+ link->hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id,
+ link->ctx->gpio_service);
+ if (link->hpd_gpio) {
dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT);
dal_gpio_unlock_pin(link->hpd_gpio);
link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio);
@@ -1314,9 +1415,9 @@ static bool dc_link_construct(
link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
case CONNECTOR_ID_DISPLAY_PORT:
- link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
- if (link->hpd_gpio != NULL)
+ if (link->hpd_gpio)
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
@@ -1324,42 +1425,60 @@ static bool dc_link_construct(
case CONNECTOR_ID_EDP:
link->connector_signal = SIGNAL_TYPE_EDP;
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
link->irq_source_hpd_rx =
dal_irq_get_rx_source(link->hpd_gpio);
}
+
break;
case CONNECTOR_ID_LVDS:
link->connector_signal = SIGNAL_TYPE_LVDS;
break;
default:
- DC_LOG_WARNING("Unsupported Connector type:%d!\n", link->link_id.id);
+ DC_LOG_WARNING("Unsupported Connector type:%d!\n",
+ link->link_id.id);
goto create_fail;
}
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
- "signal %d\n",
- init_params->connector_index,
- link->connector_signal);
+ "signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
ddc_service_init_data.ctx = link->ctx;
ddc_service_init_data.id = link->link_id;
ddc_service_init_data.link = link;
link->ddc = dal_ddc_service_create(&ddc_service_init_data);
- if (link->ddc == NULL) {
+ if (!link->ddc) {
DC_ERROR("Failed to create ddc_service!\n");
goto ddc_create_fail;
}
link->ddc_hw_inst =
- dal_ddc_get_line(
- dal_ddc_service_get_ddc_pin(link->ddc));
+ dal_ddc_get_line(dal_ddc_service_get_ddc_pin(link->ddc));
+
+
+ if (link->dc->res_pool->funcs->panel_cntl_create &&
+ (link->link_id.id == CONNECTOR_ID_EDP ||
+ link->link_id.id == CONNECTOR_ID_LVDS)) {
+ panel_cntl_init_data.ctx = dc_ctx;
+ panel_cntl_init_data.inst = 0;
+ link->panel_cntl =
+ link->dc->res_pool->funcs->panel_cntl_create(
+ &panel_cntl_init_data);
+
+ if (link->panel_cntl == NULL) {
+ DC_ERROR("Failed to create link panel_cntl!\n");
+ goto panel_cntl_create_fail;
+ }
+ }
enc_init_data.ctx = dc_ctx;
- bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, &enc_init_data.encoder);
+ bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0,
+ &enc_init_data.encoder);
enc_init_data.connector = link->link_id;
enc_init_data.channel = get_ddc_line(link);
enc_init_data.hpd_source = get_hpd_line(link);
@@ -1367,11 +1486,11 @@ static bool dc_link_construct(
link->hpd_src = enc_init_data.hpd_source;
enc_init_data.transmitter =
- translate_encoder_to_transmitter(enc_init_data.encoder);
- link->link_enc = link->dc->res_pool->funcs->link_enc_create(
- &enc_init_data);
+ translate_encoder_to_transmitter(enc_init_data.encoder);
+ link->link_enc =
+ link->dc->res_pool->funcs->link_enc_create(&enc_init_data);
- if (link->link_enc == NULL) {
+ if (!link->link_enc) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1379,8 +1498,9 @@ static bool dc_link_construct(
link->link_enc_hw_inst = link->link_enc->transmitter;
for (i = 0; i < 4; i++) {
- if (BP_RESULT_OK !=
- bp_funcs->get_device_tag(dc_ctx->dc_bios, link->link_id, i, &link->device_tag)) {
+ if (bp_funcs->get_device_tag(dc_ctx->dc_bios,
+ link->link_id, i,
+ &link->device_tag) != BP_RESULT_OK) {
DC_ERROR("Failed to find device tag!\n");
goto device_tag_fail;
}
@@ -1388,13 +1508,14 @@ static bool dc_link_construct(
/* Look for device tag that matches connector signal,
* CRT for rgb, LCD for other supported signal tyes
*/
- if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, link->device_tag.dev_id))
+ if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios,
+ link->device_tag.dev_id))
continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT
- && link->connector_signal != SIGNAL_TYPE_RGB)
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT &&
+ link->connector_signal != SIGNAL_TYPE_RGB)
continue;
- if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD
- && link->connector_signal == SIGNAL_TYPE_RGB)
+ if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD &&
+ link->connector_signal == SIGNAL_TYPE_RGB)
continue;
break;
}
@@ -1406,16 +1527,16 @@ static bool dc_link_construct(
for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) {
struct external_display_path *path =
&info.ext_disp_conn_info.path[i];
- if (path->device_connector_id.enum_id == link->link_id.enum_id
- && path->device_connector_id.id == link->link_id.id
- && path->device_connector_id.type == link->link_id.type) {
- if (link->device_tag.acpi_device != 0
- && path->device_acpi_enum == link->device_tag.acpi_device) {
+ if (path->device_connector_id.enum_id == link->link_id.enum_id &&
+ path->device_connector_id.id == link->link_id.id &&
+ path->device_connector_id.type == link->link_id.type) {
+ if (link->device_tag.acpi_device != 0 &&
+ path->device_acpi_enum == link->device_tag.acpi_device) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
} else if (path->device_tag ==
- link->device_tag.dev_id.raw_device_tag) {
+ link->device_tag.dev_id.raw_device_tag) {
link->ddi_channel_mapping = path->channel_mapping;
link->chip_caps = path->caps;
}
@@ -1431,15 +1552,20 @@ static bool dc_link_construct(
*/
program_hpd_filter(link);
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
return true;
device_tag_fail:
link->link_enc->funcs->destroy(&link->link_enc);
link_enc_create_fail:
+ if (link->panel_cntl != NULL)
+ link->panel_cntl->funcs->destroy(&link->panel_cntl);
+panel_cntl_create_fail:
dal_ddc_service_destroy(&link->ddc);
ddc_create_fail:
create_fail:
- if (link->hpd_gpio != NULL) {
+ if (link->hpd_gpio) {
dal_gpio_destroy_irq(&link->hpd_gpio);
link->hpd_gpio = NULL;
}
@@ -2339,9 +2465,28 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+static struct abm *get_abm_from_stream_res(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = NULL;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
+ struct dc_stream_state *stream = pipe_ctx.stream;
+
+ if (stream && stream->link == link) {
+ abm = pipe_ctx.stream_res.abm;
+ break;
+ }
+ }
+ return abm;
+}
+
int dc_link_get_backlight_level(const struct dc_link *link)
{
- struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ struct abm *abm = get_abm_from_stream_res(link);
if (abm == NULL || abm->funcs->get_current_backlight == NULL)
return DC_ERROR_UNEXPECTED;
@@ -2349,71 +2494,63 @@ int dc_link_get_backlight_level(const struct dc_link *link)
return (int) abm->funcs->get_current_backlight(abm);
}
-bool dc_link_set_backlight_level(const struct dc_link *link,
- uint32_t backlight_pwm_u16_16,
- uint32_t frame_ramp)
+int dc_link_get_target_backlight_pwm(const struct dc_link *link)
{
- struct dc *dc = link->ctx->dc;
- struct abm *abm = dc->res_pool->abm;
- struct dmcu *dmcu = dc->res_pool->dmcu;
- unsigned int controller_id = 0;
- bool use_smooth_brightness = true;
- int i;
- DC_LOGGER_INIT(link->ctx->logger);
+ struct abm *abm = get_abm_from_stream_res(link);
- if ((dmcu == NULL) ||
- (abm == NULL) ||
- (abm->funcs->set_backlight_level_pwm == NULL))
- return false;
+ if (abm == NULL || abm->funcs->get_target_backlight == NULL)
+ return DC_ERROR_UNEXPECTED;
- use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+ return (int) abm->funcs->get_target_backlight(abm);
+}
- DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
- backlight_pwm_u16_16, backlight_pwm_u16_16);
+static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link)
+{
+ int i;
+ struct dc *dc = link->ctx->dc;
+ struct pipe_ctx *pipe_ctx = NULL;
- if (dc_is_embedded_signal(link->connector_signal)) {
- for (i = 0; i < MAX_PIPES; i++) {
- if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
- if (dc->current_state->res_ctx.
- pipe_ctx[i].stream->link
- == link) {
- /* DMCU -1 for all controller id values,
- * therefore +1 here
- */
- controller_id =
- dc->current_state->
- res_ctx.pipe_ctx[i].stream_res.tg->inst +
- 1;
-
- /* Disable brightness ramping when the display is blanked
- * as it can hang the DMCU
- */
- if (dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
- frame_ramp = 0;
- }
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) {
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+ break;
}
}
- abm->funcs->set_backlight_level_pwm(
- abm,
- backlight_pwm_u16_16,
- frame_ramp,
- controller_id,
- use_smooth_brightness);
}
- return true;
+ return pipe_ctx;
}
-bool dc_link_set_abm_disable(const struct dc_link *link)
+bool dc_link_set_backlight_level(const struct dc_link *link,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
{
struct dc *dc = link->ctx->dc;
- struct abm *abm = dc->res_pool->abm;
- if ((abm == NULL) || (abm->funcs->set_backlight_level_pwm == NULL))
- return false;
+ DC_LOGGER_INIT(link->ctx->logger);
+ DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
+ backlight_pwm_u16_16, backlight_pwm_u16_16);
+
+ if (dc_is_embedded_signal(link->connector_signal)) {
+ struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
- abm->funcs->set_abm_immediate_disable(abm);
+ if (pipe_ctx) {
+ /* Disable brightness ramping when the display is blanked
+ * as it can hang the DMCU
+ */
+ if (pipe_ctx->plane_state == NULL)
+ frame_ramp = 0;
+ } else {
+ ASSERT(false);
+ return false;
+ }
+ dc->hwss.set_backlight_level(
+ pipe_ctx,
+ backlight_pwm_u16_16,
+ frame_ramp);
+ }
return true;
}
@@ -2423,12 +2560,12 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if (psr != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_enable(psr, allow_active);
- else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
+ else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
- link->psr_allow_active = allow_active;
+ link->psr_settings.psr_allow_active = allow_active;
return true;
}
@@ -2439,9 +2576,9 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if (psr != NULL && link->psr_feature_enabled)
+ if (psr != NULL && link->psr_settings.psr_feature_enabled)
psr->funcs->psr_get_state(psr, psr_state);
- else if (dmcu != NULL && link->psr_feature_enabled)
+ else if (dmcu != NULL && link->psr_settings.psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
return true;
@@ -2612,14 +2749,14 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
+ link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
- link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
+ link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
/* psr_enabled == 0 indicates setup_psr did not succeed, but this
* should not happen since firmware should be running at this point
*/
- if (link->psr_feature_enabled == 0)
+ if (link->psr_settings.psr_feature_enabled == 0)
ASSERT(0);
return true;
@@ -2966,7 +3103,7 @@ void core_link_enable_stream(
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
@@ -3040,6 +3177,18 @@ void core_link_enable_stream(
if (pipe_ctx->stream->dpms_off)
return;
+ /* Have to setup DSC before DIG FE and BE are connected (which happens before the
+ * link training). This is to make sure the bandwidth sent to DIG BE won't be
+ * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag
+ * will be automatically set at a later time when the video is enabled
+ * (DP_VID_STREAM_EN = 1).
+ */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
+ dc_is_virtual_signal(pipe_ctx->stream->signal))
+ dp_set_dsc_enable(pipe_ctx, true);
+ }
+
status = enable_link(state, pipe_ctx);
if (status != DC_OK) {
@@ -3067,11 +3216,6 @@ void core_link_enable_stream(
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
COLOR_DEPTH_UNDEFINED);
- if (pipe_ctx->stream->timing.flags.DSC) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
- dc_is_virtual_signal(pipe_ctx->stream->signal))
- dp_set_dsc_enable(pipe_ctx, true);
- }
dc->hwss.enable_stream(pipe_ctx);
/* Set DPS PPS SDP (AKA "info frames") */
@@ -3101,6 +3245,10 @@ void core_link_enable_stream(
dp_set_dsc_enable(pipe_ctx, true);
}
+
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ core_link_set_avmute(pipe_ctx, false);
+ }
}
void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
@@ -3109,10 +3257,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ core_link_set_avmute(pipe_ctx, true);
+ }
+
#if defined(CONFIG_DRM_AMD_DC_HDCP)
update_psp_stream_config(pipe_ctx, true);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 256889eed93e..aefd29a440b5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -599,7 +599,7 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >
- payload->length ? true : false;
+ payload->length;
current_payload.address = payload->address;
current_payload.data = &payload->data[retrieved];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index aa3c45a69b5e..91cd884d6f25 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -13,7 +13,6 @@
#include "core_status.h"
#include "dpcd_defs.h"
-#include "resource.h"
#define DC_LOGGER \
link->ctx->logger
@@ -220,6 +219,30 @@ static enum dpcd_training_patterns
return dpcd_tr_pattern;
}
+static uint8_t dc_dp_initialize_scrambling_data_symbols(
+ struct dc_link *link,
+ enum dc_dp_training_pattern pattern)
+{
+ uint8_t disable_scrabled_data_symbols = 0;
+
+ switch (pattern) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ disable_scrabled_data_symbols = 1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ disable_scrabled_data_symbols = 0;
+ break;
+ default:
+ ASSERT(0);
+ DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n",
+ __func__, pattern);
+ break;
+ }
+ return disable_scrabled_data_symbols;
+}
+
static inline bool is_repeater(struct dc_link *link, uint32_t offset)
{
return (!link->is_lttpr_mode_transparent && offset != 0);
@@ -252,6 +275,9 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET]
= dpcd_pattern.raw;
@@ -1710,19 +1736,10 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
- /* Set Default link settings */
- struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
-
- /* Higher link settings based on feature supported */
- if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH2;
-
- if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE)
- max_link_cap.link_rate = LINK_RATE_HIGH3;
+ struct dc_link_settings max_link_cap = {0};
- if (link->link_enc->funcs->get_max_link_cap)
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ /* get max link encoder capability */
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2426,7 +2443,7 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
{
union dpcd_psr_configuration psr_configuration;
- if (!link->psr_feature_enabled)
+ if (!link->psr_settings.psr_feature_enabled)
return false;
dm_helpers_dp_read_dpcd(
@@ -2911,6 +2928,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
for (i = 0; i < MAX_PIPES; i++) {
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.blank_stream(pipe_ctx);
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
break;
}
@@ -2927,6 +2950,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_reallocate_mst_payload(link);
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
+ }
+
status = false;
if (out_link_loss)
*out_link_loss = true;
@@ -4227,6 +4256,21 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
void dpcd_set_source_specific_data(struct dc_link *link)
{
const uint32_t post_oui_delay = 30; // 30ms
+ uint8_t dspc = 0;
+ enum dc_status ret;
+
+ ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc,
+ sizeof(dspc));
+
+ if (ret != DC_OK) {
+ DC_LOG_ERROR("Error in DP aux read transaction,"
+ " not writing source specific data\n");
+ return;
+ }
+
+ /* Return if OUI unsupported */
+ if (!(dspc & DP_OUI_SUPPORT))
+ return;
if (!link->dc->vendor_signature.is_valid) {
struct dpcd_amd_signature amd_signature;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 51e0ee6e7695..6590f51caefa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -400,7 +400,7 @@ static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable)
struct dc_stream_state *stream = pipe_ctx->stream;
bool result = false;
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ if (dc_is_virtual_signal(stream->signal) || IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
result = true;
else
result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index f4bcc71b2920..0c5619364e7d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -532,6 +532,24 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_mpc_splits(struct pipe_ctx *pipe)
+{
+ int mpc_split_count = 0;
+ struct pipe_ctx *other_pipe = pipe->bottom_pipe;
+
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->bottom_pipe;
+ }
+ other_pipe = pipe->top_pipe;
+ while (other_pipe && other_pipe->plane_state == pipe->plane_state) {
+ mpc_split_count++;
+ other_pipe = other_pipe->top_pipe;
+ }
+
+ return mpc_split_count;
+}
+
int get_num_odm_splits(struct pipe_ctx *pipe)
{
int odm_split_count = 0;
@@ -556,16 +574,11 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
/*Check for mpc split*/
struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+ *split_count = get_num_mpc_splits(pipe_ctx);
while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
(*split_idx)++;
- (*split_count)++;
split_pipe = split_pipe->top_pipe;
}
- split_pipe = pipe_ctx->bottom_pipe;
- while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
- (*split_count)++;
- split_pipe = split_pipe->bottom_pipe;
- }
} else {
/*Get odm split index*/
struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
@@ -692,6 +705,9 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
/* Round up, assume original video size always even dimensions */
data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
+
+ data->viewport_unadjusted = data->viewport;
+ data->viewport_c_unadjusted = data->viewport_c;
}
static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -1061,8 +1077,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
calculate_viewport(pipe_ctx);
- if (pipe_ctx->plane_res.scl_data.viewport.height < 16 ||
- pipe_ctx->plane_res.scl_data.viewport.width < 16) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < 12 ||
+ pipe_ctx->plane_res.scl_data.viewport.width < 12) {
if (store_h_border_left) {
restore_border_left_from_dst(pipe_ctx,
store_h_border_left);
@@ -1358,9 +1374,6 @@ bool dc_add_plane_to_context(
dc_plane_state_retain(plane_state);
while (head_pipe) {
- tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
- ASSERT(tail_pipe);
-
free_pipe = acquire_free_pipe_for_head(context, pool, head_pipe);
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -1378,6 +1391,8 @@ bool dc_add_plane_to_context(
free_pipe->plane_state = plane_state;
if (head_pipe != free_pipe) {
+ tail_pipe = resource_get_tail_pipe(&context->res_ctx, head_pipe);
+ ASSERT(tail_pipe);
free_pipe->stream_res.tg = tail_pipe->stream_res.tg;
free_pipe->stream_res.abm = tail_pipe->stream_res.abm;
free_pipe->stream_res.opp = tail_pipe->stream_res.opp;
@@ -1545,35 +1560,6 @@ bool dc_add_all_planes_for_stream(
return add_all_planes_for_stream(dc, stream, &set, 1, context);
}
-
-static bool is_hdr_static_meta_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream)
-{
- if (cur_stream == NULL)
- return true;
-
- if (memcmp(&cur_stream->hdr_static_metadata,
- &new_stream->hdr_static_metadata,
- sizeof(struct dc_info_packet)) != 0)
- return true;
-
- return false;
-}
-
-static bool is_vsc_info_packet_changed(struct dc_stream_state *cur_stream,
- struct dc_stream_state *new_stream)
-{
- if (cur_stream == NULL)
- return true;
-
- if (memcmp(&cur_stream->vsc_infopacket,
- &new_stream->vsc_infopacket,
- sizeof(struct dc_info_packet)) != 0)
- return true;
-
- return false;
-}
-
static bool is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@@ -1608,15 +1594,9 @@ static bool are_stream_backends_same(
if (is_timing_changed(stream_a, stream_b))
return false;
- if (is_hdr_static_meta_changed(stream_a, stream_b))
- return false;
-
if (stream_a->dpms_off != stream_b->dpms_off)
return false;
- if (is_vsc_info_packet_changed(stream_a, stream_b))
- return false;
-
return true;
}
@@ -1756,21 +1736,6 @@ static struct audio *find_first_free_audio(
return 0;
}
-bool resource_is_stream_unchanged(
- struct dc_state *old_context, struct dc_stream_state *stream)
-{
- int i;
-
- for (i = 0; i < old_context->stream_count; i++) {
- struct dc_stream_state *old_stream = old_context->streams[i];
-
- if (are_stream_backends_same(old_stream, stream))
- return true;
- }
-
- return false;
-}
-
/**
* dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
*/
@@ -2025,17 +1990,6 @@ enum dc_status resource_map_pool_resources(
int pipe_idx = -1;
struct dc_bios *dcb = dc->ctx->dc_bios;
- /* TODO Check if this is needed */
- /*if (!resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- stream->bit_depth_params =
- old_context->streams[i]->bit_depth_params;
- stream->clamping = old_context->streams[i]->clamping;
- continue;
- }
- }
- */
-
calculate_phy_pix_clks(stream);
/* TODO: Check Linux */
@@ -2718,19 +2672,16 @@ bool pipe_need_reprogram(
if (is_timing_changed(pipe_ctx_old->stream, pipe_ctx->stream))
return true;
- if (is_hdr_static_meta_changed(pipe_ctx_old->stream, pipe_ctx->stream))
- return true;
-
if (pipe_ctx_old->stream->dpms_off != pipe_ctx->stream->dpms_off)
return true;
- if (is_vsc_info_packet_changed(pipe_ctx_old->stream, pipe_ctx->stream))
- return true;
-
if (false == pipe_ctx_old->stream->link->link_state_valid &&
false == pipe_ctx_old->stream->dpms_off)
return true;
+ if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
+ return true;
+
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
index a249a0e5edd0..9e16af22e4aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_sink.c
@@ -54,6 +54,7 @@ static bool dc_sink_construct(struct dc_sink *sink, const struct dc_sink_init_da
sink->ctx = link->ctx;
sink->dongle_max_pix_clk = init_params->dongle_max_pix_clk;
sink->converter_disable_audio = init_params->converter_disable_audio;
+ sink->is_mst_legacy = init_params->sink_is_legacy;
sink->dc_container_id = NULL;
sink->sink_id = init_params->link->ctx->dc_sink_id_count;
// increment dc_sink_id_count because we don't want two sinks with same ID
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 6ddbb00ed37a..4f0e7203dba4 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -231,34 +231,6 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
-static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
-{
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- unsigned int vupdate_line;
- unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
- struct dc_stream_state *stream = pipe_ctx->stream;
- unsigned int us_per_line;
-
- if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
- ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
-
- vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
- if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
- return;
-
- if (vpos >= vupdate_line)
- return;
-
- us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
- lines_to_vupdate = vupdate_line - vpos;
- us_to_vupdate = lines_to_vupdate * us_per_line;
-
- /* 70 us is a conservative estimate of cursor update time*/
- if (us_to_vupdate < 70)
- udelay(us_to_vupdate);
- }
-#endif
-}
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
@@ -298,9 +270,7 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
-
- delay_cursor_until_vupdate(pipe_ctx, dc);
- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_attribute(pipe_ctx);
@@ -309,7 +279,7 @@ bool dc_stream_set_cursor_attributes(
}
if (pipe_to_program)
- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true;
}
@@ -349,16 +319,14 @@ bool dc_stream_set_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
-
- delay_cursor_until_vupdate(pipe_ctx, dc);
- dc->hwss.pipe_control_lock(dc, pipe_to_program, true);
+ dc->hwss.cursor_lock(dc, pipe_to_program, true);
}
dc->hwss.set_cursor_position(pipe_ctx);
}
if (pipe_to_program)
- dc->hwss.pipe_control_lock(dc, pipe_to_program, false);
+ dc->hwss.cursor_lock(dc, pipe_to_program, false);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 1935cf6601eb..85908561c741 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -29,6 +29,9 @@
#include "dc_types.h"
#include "grph_object_defs.h"
#include "logger_types.h"
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+#include "hdcp_types.h"
+#endif
#include "gpio_types.h"
#include "link_service_types.h"
#include "grph_object_ctrl_defs.h"
@@ -39,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.76"
+#define DC_VER "3.2.84"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -95,6 +98,49 @@ struct dc_plane_cap {
} max_downscale_factor;
};
+// Color management caps (DPP and MPC)
+struct rom_curve_caps {
+ uint16_t srgb : 1;
+ uint16_t bt2020 : 1;
+ uint16_t gamma2_2 : 1;
+ uint16_t pq : 1;
+ uint16_t hlg : 1;
+};
+
+struct dpp_color_caps {
+ uint16_t dcn_arch : 1; // all DCE generations treated the same
+ // input lut is different than most LUTs, just plain 256-entry lookup
+ uint16_t input_lut_shared : 1; // shared with DGAM
+ uint16_t icsc : 1;
+ uint16_t dgam_ram : 1;
+ uint16_t post_csc : 1; // before gamut remap
+ uint16_t gamma_corr : 1;
+
+ // hdr_mult and gamut remap always available in DPP (in that order)
+ // 3d lut implies shaper LUT,
+ // it may be shared with MPC - check MPC:shared_3d_lut flag
+ uint16_t hw_3d_lut : 1;
+ uint16_t ogam_ram : 1; // blnd gam
+ uint16_t ocsc : 1;
+ struct rom_curve_caps dgam_rom_caps;
+ struct rom_curve_caps ogam_rom_caps;
+};
+
+struct mpc_color_caps {
+ uint16_t gamut_remap : 1;
+ uint16_t ogam_ram : 1;
+ uint16_t ocsc : 1;
+ uint16_t num_3dluts : 3; //3d lut always assumes a preceding shaper LUT
+ uint16_t shared_3d_lut:1; //can be in either DPP or MPC, but single instance
+
+ struct rom_curve_caps ogam_rom_caps;
+};
+
+struct dc_color_caps {
+ struct dpp_color_caps dpp;
+ struct mpc_color_caps mpc;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -117,9 +163,9 @@ struct dc_caps {
bool psp_setup_panel_mode;
bool extended_aux_timeout_support;
bool dmcub_support;
- bool hw_3d_lut;
enum dp_protocol_version max_dp_protocol_version;
struct dc_plane_cap planes[MAX_PLANES];
+ struct dc_color_caps color;
};
struct dc_bug_wa {
@@ -230,7 +276,8 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
- bool psr_on_dmub;
+ bool disable_dmcu;
+ bool enable_4to1MPC;
};
enum visual_confirm {
@@ -238,6 +285,7 @@ enum visual_confirm {
VISUAL_CONFIRM_SURFACE = 1,
VISUAL_CONFIRM_HDR = 2,
VISUAL_CONFIRM_MPCTREE = 4,
+ VISUAL_CONFIRM_PSR = 5,
};
enum dcc_option {
@@ -429,6 +477,7 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool disable_dsc;
+ bool enable_dram_clock_change_one_display_vactive;
};
struct dc_debug_data {
@@ -474,6 +523,7 @@ struct dc_bounding_box_overrides {
int urgent_latency_ns;
int percent_of_ideal_drambw;
int dram_clock_change_latency_ns;
+ int dummy_clock_change_latency_ns;
/* This forces a hard min on the DCFCLK we use
* for DML. Unlike the debug option for forcing
* DCFCLK, this override affects watermark calculations
@@ -987,6 +1037,7 @@ struct dpcd_caps {
union dpcd_fec_capability fec_cap;
struct dpcd_dsc_capabilities dsc_caps;
struct dc_lttpr_caps lttpr_caps;
+ struct psr_caps psr_caps;
};
@@ -1004,6 +1055,35 @@ union dpcd_sink_ext_caps {
uint8_t raw;
};
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+union hdcp_rx_caps {
+ struct {
+ uint8_t version;
+ uint8_t reserved;
+ struct {
+ uint8_t repeater : 1;
+ uint8_t hdcp_capable : 1;
+ uint8_t reserved : 6;
+ } byte0;
+ } fields;
+ uint8_t raw[3];
+};
+
+union hdcp_bcaps {
+ struct {
+ uint8_t HDCP_CAPABLE:1;
+ uint8_t REPEATER:1;
+ uint8_t RESERVED:6;
+ } bits;
+ uint8_t raw;
+};
+
+struct hdcp_caps {
+ union hdcp_rx_caps rx_caps;
+ union hdcp_bcaps bcaps;
+};
+#endif
+
#include "dc_link.h"
/*******************************************************************************
@@ -1046,7 +1126,7 @@ struct dc_sink {
void *priv;
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
-
+ bool is_mst_legacy;
struct dc_sink_dsc_caps dsc_caps;
struct dc_sink_fec_caps fec_caps;
@@ -1073,6 +1153,7 @@ struct dc_sink_init_data {
struct dc_link *link;
uint32_t dongle_max_pix_clk;
bool converter_disable_audio;
+ bool sink_is_legacy;
};
struct dc_sink *dc_sink_create(const struct dc_sink_init_data *init_params);
@@ -1104,9 +1185,16 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state);
void dc_resume(struct dc *dc);
-unsigned int dc_get_current_backlight_pwm(struct dc *dc);
-unsigned int dc_get_target_backlight_pwm(struct dc *dc);
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+/*
+ * HDCP Interfaces
+ */
+enum hdcp_message_status dc_process_hdcp_msg(
+ enum signal_type signal,
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info);
+#endif
bool dc_is_dmcu_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 59c298a6484f..eea2429ac67d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -25,7 +25,7 @@
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "../dmub/inc/dmub_srv.h"
+#include "../dmub/dmub_srv.h"
static void dc_dmub_srv_construct(struct dc_dmub_srv *dc_srv, struct dc *dc,
struct dmub_srv *dmub)
@@ -58,7 +58,7 @@ void dc_dmub_srv_destroy(struct dc_dmub_srv **dmub_srv)
}
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- struct dmub_cmd_header *cmd)
+ union dmub_rb_cmd *cmd)
{
struct dmub_srv *dmub = dc_dmub_srv->dmub;
struct dc_context *dc_ctx = dc_dmub_srv->ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 754b6077539c..a3a09ccb6d26 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -27,10 +27,9 @@
#define _DMUB_DC_SRV_H_
#include "os_types.h"
-#include "../dmub/inc/dmub_cmd.h"
+#include "dmub/dmub_srv.h"
struct dmub_srv;
-struct dmub_cmd_header;
struct dc_reg_helper_state {
bool gather_in_progress;
@@ -49,7 +48,7 @@ struct dc_dmub_srv {
};
void dc_dmub_srv_cmd_queue(struct dc_dmub_srv *dc_dmub_srv,
- struct dmub_cmd_header *cmd);
+ union dmub_rb_cmd *cmd);
void dc_dmub_srv_cmd_execute(struct dc_dmub_srv *dc_dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index bb2730e9521e..af177c087d3b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -740,5 +740,11 @@ struct dpcd_dsc_capabilities {
union dpcd_dsc_ext_capabilities dsc_ext_caps;
};
+/* These parameters are from PSR capabilities reported by Sink DPCD */
+struct psr_caps {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
+};
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 737048d8a96c..85a0170be544 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -50,7 +50,7 @@ static inline void submit_dmub_read_modify_write(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
@@ -73,7 +73,7 @@ static inline void submit_dmub_burst_write(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
ctx->dmub_srv->reg_helper_offload.gather_in_progress = gather;
@@ -92,7 +92,7 @@ static inline void submit_dmub_reg_wait(
gather = ctx->dmub_srv->reg_helper_offload.gather_in_progress;
ctx->dmub_srv->reg_helper_offload.gather_in_progress = false;
- dc_dmub_srv_cmd_queue(ctx->dmub_srv, &cmd_buf->header);
+ dc_dmub_srv_cmd_queue(ctx->dmub_srv, &offload->cmd_data);
memset(cmd_buf, 0, sizeof(*cmd_buf));
offload->reg_seq_count = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 00ff5e98278c..f63fc25aa6c5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -66,6 +66,22 @@ struct time_stamp {
struct link_trace {
struct time_stamp time_stamp;
};
+
+/* PSR feature flags */
+struct psr_settings {
+ bool psr_feature_enabled; // PSR is supported by sink
+ bool psr_allow_active; // PSR is currently active
+ enum dc_psr_version psr_version; // Internal PSR version, determined based on DPCD
+
+ /* These parameters are calculated in Driver,
+ * based on display timing and Sink capabilities.
+ * If VBLANK region is too small and Sink takes a long time
+ * to set up RFB, it may take an extra frame to enter PSR state.
+ */
+ bool psr_frame_capture_indication_req;
+ unsigned int psr_sdp_transmit_line_num_deadline;
+};
+
/*
* A link contains one or more sinks and their connected status.
* The currently active signal type (HDMI, DP-SST, DP-MST) is also reported.
@@ -118,6 +134,7 @@ struct dc_link {
struct dc_context *ctx;
+ struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
struct graphics_object_id link_id;
union ddi_channel_mapping ddi_channel_mapping;
@@ -126,11 +143,14 @@ struct dc_link {
uint32_t dongle_max_pix_clk;
unsigned short chip_caps;
unsigned int dpcd_sink_count;
+#if defined(CONFIG_DRM_AMD_DC_HDCP)
+ struct hdcp_caps hdcp_caps;
+#endif
enum edp_revision edp_revision;
- bool psr_feature_enabled;
- bool psr_allow_active;
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
+ struct psr_settings psr_settings;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
@@ -197,7 +217,7 @@ bool dc_link_set_default_brightness_aux(struct dc_link *link);
int dc_link_get_backlight_level(const struct dc_link *dc_link);
-bool dc_link_set_abm_disable(const struct dc_link *dc_link);
+int dc_link_get_target_backlight_pwm(const struct dc_link *link);
bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait);
@@ -290,6 +310,10 @@ bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
* DPCD access interfaces
*/
+#ifdef CONFIG_DRM_AMD_DC_HDCP
+bool dc_link_is_hdcp14(struct dc_link *link);
+bool dc_link_is_hdcp22(struct dc_link *link);
+#endif
void dc_link_set_drive_settings(struct dc *dc,
struct link_training_settings *lt_settings,
const struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index a5c7ef47b8d3..49aad691e687 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -167,8 +167,6 @@ struct dc_stream_state {
/* TODO: custom INFO packets */
/* TODO: ABM info (DMCU) */
- /* PSR info */
- unsigned char psr_version;
/* TODO: CEA VIC */
/* DMCU info */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 0d210104ba0a..f236da1c1859 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -862,4 +862,9 @@ struct dsc_dec_dpcd_caps {
uint32_t branch_max_line_width;
};
+enum dc_psr_version {
+ DC_PSR_VERSION_1 = 0,
+ DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fbfcff700971..f704a8fd52e8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o dmub_abm.o dce_panel_cntl.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index b8a3fc505c9b..4e87e70237e3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -55,7 +55,7 @@
#define MCP_DISABLE_ABM_IMMEDIATELY 255
-static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
+static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id, uint32_t panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
uint32_t rampingBoundary = 0xFFFF;
@@ -83,125 +83,12 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id)
return true;
}
-static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_abm *abm_dce)
-{
- uint64_t current_backlight;
- uint32_t round_result;
- uint32_t pwm_period_cntl, bl_period, bl_int_count;
- uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
- uint32_t bl_period_mask, bl_pwm_mask;
-
- pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
- REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
- REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
-
- bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
- REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
- REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
-
- if (bl_int_count == 0)
- bl_int_count = 16;
-
- bl_period_mask = (1 << bl_int_count) - 1;
- bl_period &= bl_period_mask;
-
- bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
-
- if (fractional_duty_cycle_en == 0)
- bl_pwm &= bl_pwm_mask;
- else
- bl_pwm &= 0xFFFF;
-
- current_backlight = bl_pwm << (1 + bl_int_count);
-
- if (bl_period == 0)
- bl_period = 0xFFFF;
-
- current_backlight = div_u64(current_backlight, bl_period);
- current_backlight = (current_backlight + 1) >> 1;
-
- current_backlight = (uint64_t)(current_backlight) * bl_period;
-
- round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
-
- round_result = (round_result >> (bl_int_count-1)) & 1;
-
- current_backlight >>= bl_int_count;
- current_backlight += round_result;
-
- return (uint32_t)(current_backlight);
-}
-
-static void driver_set_backlight_level(struct dce_abm *abm_dce,
- uint32_t backlight_pwm_u16_16)
-{
- uint32_t backlight_16bit;
- uint32_t masked_pwm_period;
- uint8_t bit_count;
- uint64_t active_duty_cycle;
- uint32_t pwm_period_bitcnt;
-
- /*
- * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
- * active duty cycle <= backlight period
- */
-
- /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
- */
- REG_GET_2(BL_PWM_PERIOD_CNTL,
- BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
- BL_PWM_PERIOD, &masked_pwm_period);
-
- if (pwm_period_bitcnt == 0)
- bit_count = 16;
- else
- bit_count = pwm_period_bitcnt;
-
- /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
- masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
-
- /* 1.2 Calculate integer active duty cycle required upper 16 bits
- * contain integer component, lower 16 bits contain fractional component
- * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
- */
- active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
-
- /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
- * components shift by bitCount then mask 16 bits and add rounding bit
- * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
- */
- backlight_16bit = active_duty_cycle >> bit_count;
- backlight_16bit &= 0xFFFF;
- backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
-
- /*
- * 2. Program register with updated value
- */
-
- /* 2.1 Lock group 2 backlight registers */
-
- REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
- BL_PWM_GRP1_REG_LOCK, 1);
-
- // 2.2 Write new active duty cycle
- REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
-
- /* 2.3 Unlock group 2 backlight registers */
- REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_LOCK, 0);
-
- /* 3 Wait for pending bit to be cleared */
- REG_WAIT(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
- 1, 10000);
-}
-
static void dmcu_set_backlight_level(
struct dce_abm *abm_dce,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp,
- uint32_t controller_id)
+ uint32_t controller_id,
+ uint32_t panel_id)
{
unsigned int backlight_8_bit = 0;
uint32_t s2;
@@ -213,7 +100,7 @@ static void dmcu_set_backlight_level(
// Take MSB of fractional part since backlight is not max
backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
- dce_abm_set_pipe(&abm_dce->base, controller_id);
+ dce_abm_set_pipe(&abm_dce->base, controller_id, panel_id);
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
@@ -248,10 +135,9 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
-static void dce_abm_init(struct abm *abm)
+static void dce_abm_init(struct abm *abm, uint32_t backlight)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- unsigned int backlight = calculate_16_bit_backlight_from_pwm(abm_dce);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
@@ -331,86 +217,12 @@ static bool dce_abm_set_level(struct abm *abm, uint32_t level)
return true;
}
-static bool dce_abm_immediate_disable(struct abm *abm)
+static bool dce_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
-
if (abm->dmcu_is_running == false)
return true;
- dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY);
-
- abm->stored_backlight_registers.BL_PWM_CNTL =
- REG_READ(BL_PWM_CNTL);
- abm->stored_backlight_registers.BL_PWM_CNTL2 =
- REG_READ(BL_PWM_CNTL2);
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
- REG_READ(BL_PWM_PERIOD_CNTL);
-
- REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &abm->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- return true;
-}
-
-static bool dce_abm_init_backlight(struct abm *abm)
-{
- struct dce_abm *abm_dce = TO_DCE_ABM(abm);
- uint32_t value;
-
- /* It must not be 0, so we have to restore them
- * Bios bug w/a - period resets to zero,
- * restoring to cache values which is always correct
- */
- REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
- if (value == 0 || value == 1) {
- if (abm->stored_backlight_registers.BL_PWM_CNTL != 0) {
- REG_WRITE(BL_PWM_CNTL,
- abm->stored_backlight_registers.BL_PWM_CNTL);
- REG_WRITE(BL_PWM_CNTL2,
- abm->stored_backlight_registers.BL_PWM_CNTL2);
- REG_WRITE(BL_PWM_PERIOD_CNTL,
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
- REG_UPDATE(LVTMA_PWRSEQ_REF_DIV,
- BL_PWM_REF_DIV,
- abm->stored_backlight_registers.
- LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- } else {
- /* TODO: Note: This should not really happen since VBIOS
- * should have initialized PWM registers on boot.
- */
- REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
- REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
- }
- } else {
- abm->stored_backlight_registers.BL_PWM_CNTL =
- REG_READ(BL_PWM_CNTL);
- abm->stored_backlight_registers.BL_PWM_CNTL2 =
- REG_READ(BL_PWM_CNTL2);
- abm->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
- REG_READ(BL_PWM_PERIOD_CNTL);
-
- REG_GET(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
- &abm->stored_backlight_registers.
- LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
- }
-
- /* Have driver take backlight control
- * TakeBacklightControl(true)
- */
- value = REG_READ(BIOS_SCRATCH_2);
- value |= ATOM_S2_VRI_BRIGHT_ENABLE;
- REG_WRITE(BIOS_SCRATCH_2, value);
-
- /* Enable the backlight output */
- REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
-
- /* Disable fractional pwm if configured */
- REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN,
- abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1);
-
- /* Unlock group 2 backlight registers */
- REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
- BL_PWM_GRP1_REG_LOCK, 0);
+ dce_abm_set_pipe(abm, MCP_DISABLE_ABM_IMMEDIATELY, panel_inst);
return true;
}
@@ -420,21 +232,18 @@ static bool dce_abm_set_backlight_level_pwm(
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
- bool use_smooth_brightness)
+ unsigned int panel_inst)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
backlight_pwm_u16_16, backlight_pwm_u16_16);
- /* If DMCU is in reset state, DMCU is uninitialized */
- if (use_smooth_brightness)
- dmcu_set_backlight_level(abm_dce,
- backlight_pwm_u16_16,
- frame_ramp,
- controller_id);
- else
- driver_set_backlight_level(abm_dce, backlight_pwm_u16_16);
+ dmcu_set_backlight_level(abm_dce,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ controller_id,
+ panel_inst);
return true;
}
@@ -442,12 +251,12 @@ static bool dce_abm_set_backlight_level_pwm(
static const struct abm_funcs dce_funcs = {
.abm_init = dce_abm_init,
.set_abm_level = dce_abm_set_level,
- .init_backlight = dce_abm_init_backlight,
.set_pipe = dce_abm_set_pipe,
.set_backlight_level_pwm = dce_abm_set_backlight_level_pwm,
.get_current_backlight = dce_abm_get_current_backlight,
.get_target_backlight = dce_abm_get_target_backlight,
- .set_abm_immediate_disable = dce_abm_immediate_disable
+ .init_abm_config = NULL,
+ .set_abm_immediate_disable = dce_abm_immediate_disable,
};
static void dce_abm_construct(
@@ -461,10 +270,6 @@ static void dce_abm_construct(
base->ctx = ctx;
base->funcs = &dce_funcs;
- base->stored_backlight_registers.BL_PWM_CNTL = 0;
- base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
- base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
- base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
base->dmcu_is_running = false;
abm_dce->regs = regs;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index ba0caaffa24b..9718a4823372 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -30,11 +30,6 @@
#include "abm.h"
#define ABM_COMMON_REG_LIST_DCE_BASE() \
- SR(BL_PWM_PERIOD_CNTL), \
- SR(BL_PWM_CNTL), \
- SR(BL_PWM_CNTL2), \
- SR(BL_PWM_GRP1_REG_LOCK), \
- SR(LVTMA_PWRSEQ_REF_DIV), \
SR(MASTER_COMM_CNTL_REG), \
SR(MASTER_COMM_CMD_REG), \
SR(MASTER_COMM_DATA_REG1)
@@ -85,15 +80,6 @@
.field_name = reg_name ## __ ## field_name ## post_fix
#define ABM_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh) \
- ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
- ABM_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
- ABM_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
- ABM_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh), \
- ABM_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
ABM_SF(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0, mask_sh), \
ABM_SF(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE1, mask_sh), \
@@ -178,19 +164,10 @@
type ABM1_HG_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_LS_REG_READ_MISSED_FRAME_CLEAR; \
type ABM1_BL_REG_READ_MISSED_FRAME_CLEAR; \
- type BL_PWM_PERIOD; \
- type BL_PWM_PERIOD_BITCNT; \
- type BL_ACTIVE_INT_FRAC_CNT; \
- type BL_PWM_FRACTIONAL_EN; \
type MASTER_COMM_INTERRUPT; \
type MASTER_COMM_CMD_REG_BYTE0; \
type MASTER_COMM_CMD_REG_BYTE1; \
- type MASTER_COMM_CMD_REG_BYTE2; \
- type BL_PWM_REF_DIV; \
- type BL_PWM_EN; \
- type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
- type BL_PWM_GRP1_REG_LOCK; \
- type BL_PWM_GRP1_REG_UPDATE_PENDING
+ type MASTER_COMM_CMD_REG_BYTE2
struct dce_abm_shift {
ABM_REG_FIELD_LIST(uint8_t);
@@ -201,10 +178,6 @@ struct dce_abm_mask {
};
struct dce_abm_registers {
- uint32_t BL_PWM_PERIOD_CNTL;
- uint32_t BL_PWM_CNTL;
- uint32_t BL_PWM_CNTL2;
- uint32_t LVTMA_PWRSEQ_REF_DIV;
uint32_t DC_ABM1_HG_SAMPLE_RATE;
uint32_t DC_ABM1_LS_SAMPLE_RATE;
uint32_t BL1_PWM_BL_UPDATE_SAMPLE_RATE;
@@ -219,7 +192,6 @@ struct dce_abm_registers {
uint32_t MASTER_COMM_CMD_REG;
uint32_t MASTER_COMM_DATA_REG1;
uint32_t BIOS_SCRATCH_2;
- uint32_t BL_PWM_GRP1_REG_LOCK;
};
struct dce_abm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 2e992fbc0d71..d2ad0504b0de 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -1014,39 +1014,6 @@ struct pixel_rate_range_table_entry {
unsigned short div_factor;
};
-static const struct pixel_rate_range_table_entry video_optimized_pixel_rates[] = {
- // /1.001 rates
- {25170, 25180, 25200, 1000, 1001}, //25.2MHz -> 25.17
- {59340, 59350, 59400, 1000, 1001}, //59.4Mhz -> 59.340
- {74170, 74180, 74250, 1000, 1001}, //74.25Mhz -> 74.1758
- {125870, 125880, 126000, 1000, 1001}, //126Mhz -> 125.87
- {148350, 148360, 148500, 1000, 1001}, //148.5Mhz -> 148.3516
- {167830, 167840, 168000, 1000, 1001}, //168Mhz -> 167.83
- {222520, 222530, 222750, 1000, 1001}, //222.75Mhz -> 222.527
- {257140, 257150, 257400, 1000, 1001}, //257.4Mhz -> 257.1429
- {296700, 296710, 297000, 1000, 1001}, //297Mhz -> 296.7033
- {342850, 342860, 343200, 1000, 1001}, //343.2Mhz -> 342.857
- {395600, 395610, 396000, 1000, 1001}, //396Mhz -> 395.6
- {409090, 409100, 409500, 1000, 1001}, //409.5Mhz -> 409.091
- {445050, 445060, 445500, 1000, 1001}, //445.5Mhz -> 445.055
- {467530, 467540, 468000, 1000, 1001}, //468Mhz -> 467.5325
- {519230, 519240, 519750, 1000, 1001}, //519.75Mhz -> 519.231
- {525970, 525980, 526500, 1000, 1001}, //526.5Mhz -> 525.974
- {545450, 545460, 546000, 1000, 1001}, //546Mhz -> 545.455
- {593400, 593410, 594000, 1000, 1001}, //594Mhz -> 593.4066
- {623370, 623380, 624000, 1000, 1001}, //624Mhz -> 623.377
- {692300, 692310, 693000, 1000, 1001}, //693Mhz -> 692.308
- {701290, 701300, 702000, 1000, 1001}, //702Mhz -> 701.2987
- {791200, 791210, 792000, 1000, 1001}, //792Mhz -> 791.209
- {890100, 890110, 891000, 1000, 1001}, //891Mhz -> 890.1099
- {1186810, 1186820, 1188000, 1000, 1001},//1188Mhz -> 1186.8131
-
- // *1.001 rates
- {27020, 27030, 27000, 1001, 1000}, //27Mhz
- {54050, 54060, 54000, 1001, 1000}, //54Mhz
- {108100, 108110, 108000, 1001, 1000},//108Mhz
-};
-
static bool dcn20_program_pix_clk(
struct clock_source *clock_source,
struct pixel_clk_params *pix_clk_params,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index c5aa1f48593a..5479d959ec62 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -27,10 +27,6 @@
#include "dc_types.h"
-#define BL_REG_LIST()\
- SR(LVTMA_PWRSEQ_CNTL), \
- SR(LVTMA_PWRSEQ_STATE)
-
#define HWSEQ_DCEF_REG_LIST_DCE8() \
.DCFE_CLOCK_CONTROL[0] = mmCRTC0_CRTC_DCFE_CLOCK_CONTROL, \
.DCFE_CLOCK_CONTROL[1] = mmCRTC1_CRTC_DCFE_CLOCK_CONTROL, \
@@ -94,20 +90,17 @@
SRII(BLND_CONTROL, BLND, 0),\
SRII(BLND_CONTROL, BLND, 1),\
SR(BLNDV_CONTROL),\
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_DCE8_REG_LIST() \
HWSEQ_DCEF_REG_LIST_DCE8(), \
HWSEQ_BLND_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC),\
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_DCE10_REG_LIST() \
HWSEQ_DCEF_REG_LIST(), \
HWSEQ_BLND_REG_LIST(), \
- HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
- BL_REG_LIST()
+ HWSEQ_PIXEL_RATE_REG_LIST(CRTC)
#define HWSEQ_ST_REG_LIST() \
HWSEQ_DCE11_REG_LIST_BASE(), \
@@ -134,8 +127,7 @@
SR(DCHUB_FB_LOCATION),\
SR(DCHUB_AGP_BASE),\
SR(DCHUB_AGP_BOT),\
- SR(DCHUB_AGP_TOP), \
- BL_REG_LIST()
+ SR(DCHUB_AGP_TOP)
#define HWSEQ_VG20_REG_LIST() \
HWSEQ_DCE120_REG_LIST(),\
@@ -144,8 +136,7 @@
#define HWSEQ_DCE112_REG_LIST() \
HWSEQ_DCE10_REG_LIST(), \
HWSEQ_PIXEL_RATE_REG_LIST(CRTC), \
- HWSEQ_PHYPLL_REG_LIST(CRTC), \
- BL_REG_LIST()
+ HWSEQ_PHYPLL_REG_LIST(CRTC)
#define HWSEQ_DCN_REG_LIST()\
SR(REFCLK_CNTL), \
@@ -207,8 +198,7 @@
SR(D3VGA_CONTROL), \
SR(D4VGA_CONTROL), \
SR(VGA_TEST_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
#define HWSEQ_DCN2_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
@@ -273,8 +263,7 @@
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
#define HWSEQ_DCN21_REG_LIST()\
HWSEQ_DCN_REG_LIST(), \
@@ -324,15 +313,9 @@
SR(D4VGA_CONTROL), \
SR(D5VGA_CONTROL), \
SR(D6VGA_CONTROL), \
- SR(DC_IP_REQUEST_CNTL), \
- BL_REG_LIST()
+ SR(DC_IP_REQUEST_CNTL)
struct dce_hwseq_registers {
-
- /* Backlight registers */
- uint32_t LVTMA_PWRSEQ_CNTL;
- uint32_t LVTMA_PWRSEQ_STATE;
-
uint32_t DCFE_CLOCK_CONTROL[6];
uint32_t DCFEV_CLOCK_CONTROL;
uint32_t DC_MEM_GLOBAL_PWR_REQ_CNTL;
@@ -465,26 +448,18 @@ struct dce_hwseq_registers {
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PHYPLL_PIXEL_RATE_SOURCE, mask_sh),\
HWS_SF1(blk, PHYPLL_PIXEL_RATE_CNTL, PIXEL_RATE_PLL_SOURCE, mask_sh)
-#define HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
-
#define HWSEQ_DCE8_MASK_SH_LIST(mask_sh)\
.DCFE_CLOCK_ENABLE = CRTC_DCFE_CLOCK_CONTROL__CRTC_DCFE_CLOCK_ENABLE ## mask_sh, \
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_SCL_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, V_UPDATE_LOCK, BLND_DCP_GRPH_SURF_V_UPDATE_LOCK, mask_sh),\
HWS_SF(BLND_, CONTROL, BLND_MODE, mask_sh),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE10_MASK_SH_LIST(mask_sh)\
HWSEQ_DCEF_MASK_SH_LIST(mask_sh, DCFE_),\
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND_),\
- HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_)
#define HWSEQ_DCE11_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE10_MASK_SH_LIST(mask_sh),\
@@ -507,8 +482,7 @@ struct dce_hwseq_registers {
HWSEQ_BLND_MASK_SH_LIST(mask_sh, BLND0_BLND_),\
HWSEQ_PIXEL_RATE_MASK_SH_LIST(mask_sh, CRTC0_),\
HWSEQ_PHYPLL_MASK_SH_LIST(mask_sh, CRTC0_),\
- HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWSEQ_GFX9_DCHUB_MASK_SH_LIST(mask_sh)
#define HWSEQ_VG20_MASK_SH_LIST(mask_sh)\
HWSEQ_DCE12_MASK_SH_LIST(mask_sh),\
@@ -570,8 +544,7 @@ struct dce_hwseq_registers {
HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
- HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh)
#define HWSEQ_DCN2_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -630,8 +603,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN19_PG_STATUS, DOMAIN19_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN20_PG_STATUS, DOMAIN20_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN21_PG_STATUS, DOMAIN21_PGFSM_PWR_STATUS, mask_sh), \
- HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh)
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
#define HWSEQ_DCN21_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
@@ -671,10 +643,7 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN16_PG_STATUS, DOMAIN16_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
- HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
- HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
- HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh)
+ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
#define HWSEQ_REG_FIELD_LIST(type) \
type DCFE_CLOCK_ENABLE; \
@@ -706,11 +675,7 @@ struct dce_hwseq_registers {
type PF_LFB_REGION;\
type PF_MAX_REGION;\
type ENABLE_L1_TLB;\
- type SYSTEM_ACCESS_MODE;\
- type LVTMA_BLON;\
- type LVTMA_DIGON;\
- type LVTMA_DIGON_OVRD;\
- type LVTMA_PWRSEQ_TARGET_STATE_R;
+ type SYSTEM_ACCESS_MODE;
#define HWSEQ_DCN_REG_FIELD_LIST(type) \
type HUBP_VTG_SEL; \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 8527cce81c6f..8d8c84c81b34 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -118,7 +118,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.enable_hpd = dce110_link_encoder_enable_hpd,
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
- .destroy = dce110_link_encoder_destroy
+ .destroy = dce110_link_encoder_destroy,
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap
};
static enum bp_result link_transmitter_control(
@@ -1389,3 +1390,20 @@ void dce110_link_encoder_disable_hpd(struct link_encoder *enc)
set_reg_field_value(value, 0, DC_HPD_CONTROL, DC_HPD_EN);
}
+
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+ /* Higher link settings based on feature supported */
+ if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ *link_settings = max_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index 3c9368df4093..384389f0e2c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -271,4 +271,7 @@ void dce110_psr_program_secondary_packet(struct link_encoder *enc,
bool dce110_is_dig_enabled(struct link_encoder *enc);
+void dce110_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
new file mode 100644
index 000000000000..ebff9b1e312e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dc_dmub_srv.h"
+#include "panel_cntl.h"
+#include "dce_panel_cntl.h"
+#include "atom.h"
+
+#define TO_DCE_PANEL_CNTL(panel_cntl)\
+ container_of(panel_cntl, struct dce_panel_cntl, base)
+
+#define CTX \
+ dce_panel_cntl->base.ctx
+
+#define DC_LOGGER \
+ dce_panel_cntl->base.ctx->logger
+
+#define REG(reg)\
+ dce_panel_cntl->regs->reg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_panel_cntl->shift->field_name, dce_panel_cntl->mask->field_name
+
+static unsigned int calculate_16_bit_backlight_from_pwm(struct dce_panel_cntl *dce_panel_cntl)
+{
+ uint64_t current_backlight;
+ uint32_t round_result;
+ uint32_t pwm_period_cntl, bl_period, bl_int_count;
+ uint32_t bl_pwm_cntl, bl_pwm, fractional_duty_cycle_en;
+ uint32_t bl_period_mask, bl_pwm_mask;
+
+ pwm_period_cntl = REG_READ(BL_PWM_PERIOD_CNTL);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, &bl_period);
+ REG_GET(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, &bl_int_count);
+
+ bl_pwm_cntl = REG_READ(BL_PWM_CNTL);
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, (uint32_t *)(&bl_pwm));
+ REG_GET(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, &fractional_duty_cycle_en);
+
+ if (bl_int_count == 0)
+ bl_int_count = 16;
+
+ bl_period_mask = (1 << bl_int_count) - 1;
+ bl_period &= bl_period_mask;
+
+ bl_pwm_mask = bl_period_mask << (16 - bl_int_count);
+
+ if (fractional_duty_cycle_en == 0)
+ bl_pwm &= bl_pwm_mask;
+ else
+ bl_pwm &= 0xFFFF;
+
+ current_backlight = bl_pwm << (1 + bl_int_count);
+
+ if (bl_period == 0)
+ bl_period = 0xFFFF;
+
+ current_backlight = div_u64(current_backlight, bl_period);
+ current_backlight = (current_backlight + 1) >> 1;
+
+ current_backlight = (uint64_t)(current_backlight) * bl_period;
+
+ round_result = (uint32_t)(current_backlight & 0xFFFFFFFF);
+
+ round_result = (round_result >> (bl_int_count-1)) & 1;
+
+ current_backlight >>= bl_int_count;
+ current_backlight += round_result;
+
+ return (uint32_t)(current_backlight);
+}
+
+uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t value;
+ uint32_t current_backlight;
+
+ /* It must not be 0, so we have to restore them
+ * Bios bug w/a - period resets to zero,
+ * restoring to cache values which is always correct
+ */
+ REG_GET(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, &value);
+
+ if (value == 0 || value == 1) {
+ if (panel_cntl->stored_backlight_registers.BL_PWM_CNTL != 0) {
+ REG_WRITE(BL_PWM_CNTL,
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL);
+ REG_WRITE(BL_PWM_CNTL2,
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2);
+ REG_WRITE(BL_PWM_PERIOD_CNTL,
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL);
+ REG_UPDATE(PWRSEQ_REF_DIV,
+ BL_PWM_REF_DIV,
+ panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ } else {
+ /* TODO: Note: This should not really happen since VBIOS
+ * should have initialized PWM registers on boot.
+ */
+ REG_WRITE(BL_PWM_CNTL, 0xC000FA00);
+ REG_WRITE(BL_PWM_PERIOD_CNTL, 0x000C0FA0);
+ }
+ } else {
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+ }
+
+ // Have driver take backlight control
+ // TakeBacklightControl(true)
+ value = REG_READ(BIOS_SCRATCH_2);
+ value |= ATOM_S2_VRI_BRIGHT_ENABLE;
+ REG_WRITE(BIOS_SCRATCH_2, value);
+
+ // Enable the backlight output
+ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1);
+
+ // Unlock group 2 backlight registers
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ current_backlight = calculate_16_bit_backlight_from_pwm(dce_panel_cntl);
+
+ return current_backlight;
+}
+
+bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t value;
+
+ REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+
+ return value;
+}
+
+bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+ uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
+
+ REG_GET(PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
+
+ REG_GET_2(PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
+
+ return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
+}
+
+void dce_store_backlight_level(struct panel_cntl *panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL =
+ REG_READ(BL_PWM_CNTL);
+ panel_cntl->stored_backlight_registers.BL_PWM_CNTL2 =
+ REG_READ(BL_PWM_CNTL2);
+ panel_cntl->stored_backlight_registers.BL_PWM_PERIOD_CNTL =
+ REG_READ(BL_PWM_PERIOD_CNTL);
+
+ REG_GET(PWRSEQ_REF_DIV, BL_PWM_REF_DIV,
+ &panel_cntl->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV);
+}
+
+void dce_driver_set_backlight(struct panel_cntl *panel_cntl,
+ uint32_t backlight_pwm_u16_16)
+{
+ uint32_t backlight_16bit;
+ uint32_t masked_pwm_period;
+ uint8_t bit_count;
+ uint64_t active_duty_cycle;
+ uint32_t pwm_period_bitcnt;
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
+
+ /*
+ * 1. Find 16 bit backlight active duty cycle, where 0 <= backlight
+ * active duty cycle <= backlight period
+ */
+
+ /* 1.1 Apply bitmask for backlight period value based on value of BITCNT
+ */
+ REG_GET_2(BL_PWM_PERIOD_CNTL,
+ BL_PWM_PERIOD_BITCNT, &pwm_period_bitcnt,
+ BL_PWM_PERIOD, &masked_pwm_period);
+
+ if (pwm_period_bitcnt == 0)
+ bit_count = 16;
+ else
+ bit_count = pwm_period_bitcnt;
+
+ /* e.g. maskedPwmPeriod = 0x24 when bitCount is 6 */
+ masked_pwm_period = masked_pwm_period & ((1 << bit_count) - 1);
+
+ /* 1.2 Calculate integer active duty cycle required upper 16 bits
+ * contain integer component, lower 16 bits contain fractional component
+ * of active duty cycle e.g. 0x21BDC0 = 0xEFF0 * 0x24
+ */
+ active_duty_cycle = backlight_pwm_u16_16 * masked_pwm_period;
+
+ /* 1.3 Calculate 16 bit active duty cycle from integer and fractional
+ * components shift by bitCount then mask 16 bits and add rounding bit
+ * from MSB of fraction e.g. 0x86F7 = ((0x21BDC0 >> 6) & 0xFFF) + 0
+ */
+ backlight_16bit = active_duty_cycle >> bit_count;
+ backlight_16bit &= 0xFFFF;
+ backlight_16bit += (active_duty_cycle >> (bit_count - 1)) & 0x1;
+
+ /*
+ * 2. Program register with updated value
+ */
+
+ /* 2.1 Lock group 2 backlight registers */
+
+ REG_UPDATE_2(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, 1,
+ BL_PWM_GRP1_REG_LOCK, 1);
+
+ // 2.2 Write new active duty cycle
+ REG_UPDATE(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, backlight_16bit);
+
+ /* 2.3 Unlock group 2 backlight registers */
+ REG_UPDATE(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_LOCK, 0);
+
+ /* 3 Wait for pending bit to be cleared */
+ REG_WAIT(BL_PWM_GRP1_REG_LOCK,
+ BL_PWM_GRP1_REG_UPDATE_PENDING, 0,
+ 1, 10000);
+}
+
+static void dce_panel_cntl_destroy(struct panel_cntl **panel_cntl)
+{
+ struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(*panel_cntl);
+
+ kfree(dce_panel_cntl);
+ *panel_cntl = NULL;
+}
+
+static const struct panel_cntl_funcs dce_link_panel_cntl_funcs = {
+ .destroy = dce_panel_cntl_destroy,
+ .hw_init = dce_panel_cntl_hw_init,
+ .is_panel_backlight_on = dce_is_panel_backlight_on,
+ .is_panel_powered_on = dce_is_panel_powered_on,
+ .store_backlight_level = dce_store_backlight_level,
+ .driver_set_backlight = dce_driver_set_backlight,
+};
+
+void dce_panel_cntl_construct(
+ struct dce_panel_cntl *dce_panel_cntl,
+ const struct panel_cntl_init_data *init_data,
+ const struct dce_panel_cntl_registers *regs,
+ const struct dce_panel_cntl_shift *shift,
+ const struct dce_panel_cntl_mask *mask)
+{
+ struct panel_cntl *base = &dce_panel_cntl->base;
+
+ base->stored_backlight_registers.BL_PWM_CNTL = 0;
+ base->stored_backlight_registers.BL_PWM_CNTL2 = 0;
+ base->stored_backlight_registers.BL_PWM_PERIOD_CNTL = 0;
+ base->stored_backlight_registers.LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV = 0;
+
+ dce_panel_cntl->regs = regs;
+ dce_panel_cntl->shift = shift;
+ dce_panel_cntl->mask = mask;
+
+ dce_panel_cntl->base.funcs = &dce_link_panel_cntl_funcs;
+ dce_panel_cntl->base.ctx = init_data->ctx;
+ dce_panel_cntl->base.inst = init_data->inst;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
new file mode 100644
index 000000000000..70ec691e14d2
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_PANEL_CNTL__DCE_H__
+#define __DC_PANEL_CNTL__DCE_H__
+
+#include "panel_cntl.h"
+
+/* set register offset with instance */
+#define DCE_PANEL_CNTL_SR(reg_name, block)\
+ .reg_name = mm ## block ## _ ## reg_name
+
+#define DCE_PANEL_CNTL_REG_LIST()\
+ DCE_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(BIOS_SCRATCH_2)
+
+#define DCN_PANEL_CNTL_SR(reg_name, block)\
+ .reg_name = BASE(mm ## block ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## _ ## reg_name
+
+#define DCN_PANEL_CNTL_REG_LIST()\
+ DCN_PANEL_CNTL_SR(PWRSEQ_CNTL, LVTMA), \
+ DCN_PANEL_CNTL_SR(PWRSEQ_STATE, LVTMA), \
+ DCE_PANEL_CNTL_SR(PWRSEQ_REF_DIV, LVTMA), \
+ SR(BL_PWM_CNTL), \
+ SR(BL_PWM_CNTL2), \
+ SR(BL_PWM_PERIOD_CNTL), \
+ SR(BL_PWM_GRP1_REG_LOCK), \
+ SR(BIOS_SCRATCH_2)
+
+#define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
+ DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD_BITCNT, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_ACTIVE_INT_FRAC_CNT, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_CNTL, BL_PWM_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, mask_sh), \
+ DCE_PANEL_CNTL_SF(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_UPDATE_PENDING, mask_sh)
+
+#define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
+ type LVTMA_BLON;\
+ type LVTMA_DIGON;\
+ type LVTMA_DIGON_OVRD;\
+ type LVTMA_PWRSEQ_TARGET_STATE_R; \
+ type BL_PWM_REF_DIV; \
+ type BL_PWM_EN; \
+ type BL_ACTIVE_INT_FRAC_CNT; \
+ type BL_PWM_FRACTIONAL_EN; \
+ type BL_PWM_PERIOD; \
+ type BL_PWM_PERIOD_BITCNT; \
+ type BL_PWM_GRP1_IGNORE_MASTER_LOCK_EN; \
+ type BL_PWM_GRP1_REG_LOCK; \
+ type BL_PWM_GRP1_REG_UPDATE_PENDING
+
+struct dce_panel_cntl_shift {
+ DCE_PANEL_CNTL_REG_FIELD_LIST(uint8_t);
+};
+
+struct dce_panel_cntl_mask {
+ DCE_PANEL_CNTL_REG_FIELD_LIST(uint32_t);
+};
+
+struct dce_panel_cntl_registers {
+ uint32_t PWRSEQ_CNTL;
+ uint32_t PWRSEQ_STATE;
+ uint32_t BL_PWM_CNTL;
+ uint32_t BL_PWM_CNTL2;
+ uint32_t BL_PWM_PERIOD_CNTL;
+ uint32_t BL_PWM_GRP1_REG_LOCK;
+ uint32_t PWRSEQ_REF_DIV;
+ uint32_t BIOS_SCRATCH_2;
+};
+
+struct dce_panel_cntl {
+ struct panel_cntl base;
+ const struct dce_panel_cntl_registers *regs;
+ const struct dce_panel_cntl_shift *shift;
+ const struct dce_panel_cntl_mask *mask;
+};
+
+void dce_panel_cntl_construct(
+ struct dce_panel_cntl *panel_cntl,
+ const struct panel_cntl_init_data *init_data,
+ const struct dce_panel_cntl_registers *regs,
+ const struct dce_panel_cntl_shift *shift,
+ const struct dce_panel_cntl_mask *mask);
+
+#endif /* __DC_PANEL_CNTL__DCE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 451574971b96..4cdaaf4d881c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -1336,7 +1336,6 @@ static void dce110_se_audio_setup(
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -1344,7 +1343,6 @@ static void dce110_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
new file mode 100644
index 000000000000..da0b29abfbda
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dmub_abm.h"
+#include "dce_abm.h"
+#include "dc.h"
+#include "dc_dmub_srv.h"
+#include "dmub/dmub_srv.h"
+#include "core_types.h"
+#include "dm_services.h"
+#include "reg_helper.h"
+#include "fixed31_32.h"
+
+#include "atom.h"
+
+#define TO_DMUB_ABM(abm)\
+ container_of(abm, struct dce_abm, base)
+
+#define REG(reg) \
+ (dce_abm->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dce_abm->abm_shift->field_name, dce_abm->abm_mask->field_name
+
+#define CTX \
+ dce_abm->base.ctx
+
+#define DISABLE_ABM_IMMEDIATELY 255
+
+static bool dmub_abm_set_pipe(struct abm *abm, uint32_t otg_inst, uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint32_t ramping_boundary = 0xFFFF;
+
+ cmd.abm_set_pipe.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pipe.header.sub_type = DMUB_CMD__ABM_SET_PIPE;
+ cmd.abm_set_pipe.abm_set_pipe_data.otg_inst = otg_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.panel_inst = panel_inst;
+ cmd.abm_set_pipe.abm_set_pipe_data.ramping_boundary = ramping_boundary;
+ cmd.abm_set_pipe.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pipe_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static void dmcub_set_backlight_level(
+ struct dce_abm *dce_abm,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp,
+ uint32_t otg_inst,
+ uint32_t panel_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dce_abm->base.ctx;
+ unsigned int backlight_8_bit = 0;
+ uint32_t s2;
+
+ if (backlight_pwm_u16_16 & 0x10000)
+ // Check for max backlight condition
+ backlight_8_bit = 0xFF;
+ else
+ // Take MSB of fractional part since backlight is not max
+ backlight_8_bit = (backlight_pwm_u16_16 >> 8) & 0xFF;
+
+ dmub_abm_set_pipe(&dce_abm->base, otg_inst, panel_inst);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL, BL1_PWM_USER_LEVEL, backlight_pwm_u16_16);
+
+ if (otg_inst == 0)
+ frame_ramp = 0;
+
+ cmd.abm_set_backlight.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_backlight.header.sub_type = DMUB_CMD__ABM_SET_BACKLIGHT;
+ cmd.abm_set_backlight.abm_set_backlight_data.frame_ramp = frame_ramp;
+ cmd.abm_set_backlight.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_backlight_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ // Update requested backlight level
+ s2 = REG_READ(BIOS_SCRATCH_2);
+
+ s2 &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
+ backlight_8_bit &= (ATOM_S2_CURRENT_BL_LEVEL_MASK >>
+ ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
+}
+
+static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
+{
+ union dmub_rb_cmd cmd;
+ uint32_t fractional_pwm = (dc->dc->config.disable_fractional_pwm == false) ? 1 : 0;
+
+ cmd.abm_set_pwm_frac.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_pwm_frac.header.sub_type = DMUB_CMD__ABM_SET_PWM_FRAC;
+ cmd.abm_set_pwm_frac.abm_set_pwm_frac_data.fractional_pwm = fractional_pwm;
+ cmd.abm_set_pwm_frac.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_pwm_frac_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
+static void dmub_abm_init(struct abm *abm, uint32_t backlight)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_HG_SAMPLE_RATE, 0x101);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x103);
+ REG_WRITE(DC_ABM1_LS_SAMPLE_RATE, 0x101);
+ REG_WRITE(BL1_PWM_BL_UPDATE_SAMPLE_RATE, 0x101);
+
+ REG_SET_3(DC_ABM1_HG_MISC_CTRL, 0,
+ ABM1_HG_NUM_OF_BINS_SEL, 0,
+ ABM1_HG_VMAX_SEL, 1,
+ ABM1_HG_BIN_BITWIDTH_SIZE_SEL, 0);
+
+ REG_SET_3(DC_ABM1_IPCSC_COEFF_SEL, 0,
+ ABM1_IPCSC_COEFF_SEL_R, 2,
+ ABM1_IPCSC_COEFF_SEL_G, 4,
+ ABM1_IPCSC_COEFF_SEL_B, 2);
+
+ REG_UPDATE(BL1_PWM_CURRENT_ABM_LEVEL,
+ BL1_PWM_CURRENT_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_TARGET_ABM_LEVEL,
+ BL1_PWM_TARGET_ABM_LEVEL, backlight);
+
+ REG_UPDATE(BL1_PWM_USER_LEVEL,
+ BL1_PWM_USER_LEVEL, backlight);
+
+ REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
+ ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
+ ABM1_LS_MAX_PIXEL_VALUE_THRES, 1000);
+
+ REG_SET_3(DC_ABM1_HGLS_REG_READ_PROGRESS, 0,
+ ABM1_HG_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_LS_REG_READ_MISSED_FRAME_CLEAR, 1,
+ ABM1_BL_REG_READ_MISSED_FRAME_CLEAR, 1);
+
+ dmub_abm_enable_fractional_pwm(abm->ctx);
+}
+
+static unsigned int dmub_abm_get_current_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_CURRENT_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static unsigned int dmub_abm_get_target_backlight(struct abm *abm)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+ unsigned int backlight = REG_READ(BL1_PWM_TARGET_ABM_LEVEL);
+
+ /* return backlight in hardware format which is unsigned 17 bits, with
+ * 1 bit integer and 16 bit fractional
+ */
+ return backlight;
+}
+
+static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ cmd.abm_set_level.header.type = DMUB_CMD__ABM;
+ cmd.abm_set_level.header.sub_type = DMUB_CMD__ABM_SET_LEVEL;
+ cmd.abm_set_level.abm_set_level_data.level = level;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_set_level_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static bool dmub_abm_immediate_disable(struct abm *abm, uint32_t panel_inst)
+{
+ dmub_abm_set_pipe(abm, DISABLE_ABM_IMMEDIATELY, panel_inst);
+
+ return true;
+}
+
+static bool dmub_abm_set_backlight_level_pwm(
+ struct abm *abm,
+ unsigned int backlight_pwm_u16_16,
+ unsigned int frame_ramp,
+ unsigned int otg_inst,
+ uint32_t panel_inst)
+{
+ struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
+
+ dmcub_set_backlight_level(dce_abm,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ otg_inst,
+ panel_inst);
+
+ return true;
+}
+
+static bool dmub_abm_init_config(struct abm *abm,
+ const char *src,
+ unsigned int bytes)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+
+ // TODO: Optimize by only reading back final 4 bytes
+ dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
+
+ // Copy iramtable into cw7
+ memcpy(dc->dmub_srv->dmub->scratch_mem_fb.cpu_addr, (void *)src, bytes);
+
+ // Fw will copy from cw7 to fw_state
+ cmd.abm_init_config.header.type = DMUB_CMD__ABM;
+ cmd.abm_init_config.header.sub_type = DMUB_CMD__ABM_INIT_CONFIG;
+ cmd.abm_init_config.abm_init_config_data.src.quad_part = dc->dmub_srv->dmub->scratch_mem_fb.gpu_addr;
+ cmd.abm_init_config.abm_init_config_data.bytes = bytes;
+ cmd.abm_init_config.header.payload_bytes = sizeof(struct dmub_cmd_abm_init_config_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
+static const struct abm_funcs abm_funcs = {
+ .abm_init = dmub_abm_init,
+ .set_abm_level = dmub_abm_set_level,
+ .set_pipe = dmub_abm_set_pipe,
+ .set_backlight_level_pwm = dmub_abm_set_backlight_level_pwm,
+ .get_current_backlight = dmub_abm_get_current_backlight,
+ .get_target_backlight = dmub_abm_get_target_backlight,
+ .set_abm_immediate_disable = dmub_abm_immediate_disable,
+ .init_abm_config = dmub_abm_init_config,
+};
+
+static void dmub_abm_construct(
+ struct dce_abm *abm_dce,
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct abm *base = &abm_dce->base;
+
+ base->ctx = ctx;
+ base->funcs = &abm_funcs;
+ base->dmcu_is_running = false;
+
+ abm_dce->regs = regs;
+ abm_dce->abm_shift = abm_shift;
+ abm_dce->abm_mask = abm_mask;
+}
+
+struct abm *dmub_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask)
+{
+ struct dce_abm *abm_dce = kzalloc(sizeof(*abm_dce), GFP_KERNEL);
+
+ if (abm_dce == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dmub_abm_construct(abm_dce, ctx, regs, abm_shift, abm_mask);
+
+ return &abm_dce->base;
+}
+
+void dmub_abm_destroy(struct abm **abm)
+{
+ struct dce_abm *abm_dce = TO_DMUB_ABM(*abm);
+
+ kfree(abm_dce);
+ *abm = NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
index 26583f346c39..3a5d5ac7a86e 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
+ * Copyright 2019 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -23,17 +23,18 @@
*
*/
-#include "core_types.h"
-#include "logger.h"
-#include "include/logger_interface.h"
-#include "dm_helpers.h"
+#ifndef __DMUB_ABM_H__
+#define __DMUB_ABM_H__
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count)
-{
- int i;
+#include "abm.h"
+#include "dce_abm.h"
- if (hex_data)
- for (i = 0; i < hex_data_count; i++)
- DC_LOG_DEBUG("%2.2X ", hex_data[i]);
-}
+struct abm *dmub_abm_create(
+ struct dc_context *ctx,
+ const struct dce_abm_registers *regs,
+ const struct dce_abm_shift *abm_shift,
+ const struct dce_abm_mask *abm_mask);
+void dmub_abm_destroy(struct abm **abm);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index bc109d4fc6e6..044a0133ebb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -26,13 +26,51 @@
#include "dmub_psr.h"
#include "dc.h"
#include "dc_dmub_srv.h"
-#include "../../dmub/inc/dmub_srv.h"
-#include "../../dmub/inc/dmub_gpint_cmd.h"
+#include "dmub/dmub_srv.h"
#include "core_types.h"
#define MAX_PIPES 6
/**
+ * Convert dmcub psr state to dmcu psr state.
+ */
+static void convert_psr_state(uint32_t *psr_state)
+{
+ if (*psr_state == 0)
+ *psr_state = 0;
+ else if (*psr_state == 0x10)
+ *psr_state = 1;
+ else if (*psr_state == 0x11)
+ *psr_state = 2;
+ else if (*psr_state == 0x20)
+ *psr_state = 3;
+ else if (*psr_state == 0x21)
+ *psr_state = 4;
+ else if (*psr_state == 0x30)
+ *psr_state = 5;
+ else if (*psr_state == 0x31)
+ *psr_state = 6;
+ else if (*psr_state == 0x40)
+ *psr_state = 7;
+ else if (*psr_state == 0x41)
+ *psr_state = 8;
+ else if (*psr_state == 0x42)
+ *psr_state = 9;
+ else if (*psr_state == 0x43)
+ *psr_state = 10;
+ else if (*psr_state == 0x44)
+ *psr_state = 11;
+ else if (*psr_state == 0x50)
+ *psr_state = 12;
+ else if (*psr_state == 0x51)
+ *psr_state = 13;
+ else if (*psr_state == 0x52)
+ *psr_state = 14;
+ else if (*psr_state == 0x53)
+ *psr_state = 15;
+}
+
+/**
* Get PSR state from firmware.
*/
static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
@@ -43,6 +81,8 @@ static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
dmub_srv_get_gpint_response(srv, psr_state);
+
+ convert_psr_state(psr_state);
}
/**
@@ -53,19 +93,23 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
+ if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED)
+ return false;
+
cmd.psr_set_version.header.type = DMUB_CMD__PSR;
cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
-
- if (stream->psr_version == 0x0) // Unsupported
- return false;
- else if (stream->psr_version == 0x1)
+ switch (stream->link->psr_settings.psr_version) {
+ case DC_PSR_VERSION_1:
cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
- else if (stream->psr_version == 0x2)
- cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
-
- cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+ break;
+ case DC_PSR_VERSION_UNSUPPORTED:
+ default:
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED;
+ break;
+ }
+ cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
@@ -89,7 +133,7 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
cmd.psr_enable.header.payload_bytes = 0; // Send header only
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
@@ -113,7 +157,7 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
cmd.psr_set_level.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_level_data);
cmd.psr_set_level.psr_set_level_data.psr_level = psr_level;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_set_level.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
@@ -162,7 +206,7 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
cmd.psr_copy_settings.header.payload_bytes = sizeof(struct dmub_cmd_psr_copy_settings_data);
// Hw insts
- copy_settings_data->dpphy_inst = psr_context->phyType;
+ copy_settings_data->dpphy_inst = psr_context->transmitterId;
copy_settings_data->aux_inst = psr_context->channel;
copy_settings_data->digfe_inst = psr_context->engineId;
copy_settings_data->digbe_inst = psr_context->transmitterId;
@@ -187,8 +231,10 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
+ copy_settings_data->debug.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
+ true : false;
- dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
dc_dmub_srv_wait_idle(dc->dmub_srv);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8f78bf9abbca..a28c4ae0f259 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -46,6 +46,7 @@
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "dce100/dce100_hw_sequencer.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -249,6 +250,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_100_REG_LIST(id),\
@@ -627,6 +640,23 @@ struct link_encoder *dce100_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce100_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct output_pixel_processor *dce100_opp_create(
struct dc_context *ctx,
uint32_t inst)
@@ -943,6 +973,7 @@ struct stream_encoder *dce100_find_first_free_match_stream_enc_for_link(
static const struct resource_funcs dce100_res_pool_funcs = {
.destroy = dce100_destroy_resource_pool,
.link_enc_create = dce100_link_encoder_create,
+ .panel_cntl_create = dce100_panel_cntl_create,
.validate_bandwidth = dce100_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index c279982947e1..b77e9dc16086 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -53,6 +53,7 @@
#include "abm.h"
#include "audio.h"
#include "reg_helper.h"
+#include "panel_cntl.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -697,31 +698,6 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
-/*todo: cloned in stream enc, fix*/
-bool dce110_is_panel_backlight_on(struct dc_link *link)
-{
- struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
- uint32_t value;
-
- REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
-
- return value;
-}
-
-bool dce110_is_panel_powered_on(struct dc_link *link)
-{
- struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
- uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
- REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
-
- REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
-
- return (pwr_seq_state == 1) || (dig_on == 1 && dig_on_ovrd == 1);
-}
-
static enum bp_result link_transmitter_control(
struct dc_bios *bios,
struct bp_transmitter_control *cntl)
@@ -810,7 +786,6 @@ void dce110_edp_power_control(
bool power_up)
{
struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hwseq = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
enum bp_result bp_result;
@@ -821,7 +796,11 @@ void dce110_edp_power_control(
return;
}
- if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
+ if (!link->panel_cntl)
+ return;
+
+ if (power_up !=
+ link->panel_cntl->funcs->is_panel_powered_on(link->panel_cntl)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -892,7 +871,6 @@ void dce110_edp_backlight_control(
bool enable)
{
struct dc_context *ctx = link->ctx;
- struct dce_hwseq *hws = ctx->dc->hwseq;
struct bp_transmitter_control cntl = { 0 };
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
@@ -901,7 +879,8 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && hws->funcs.is_panel_backlight_on(link)) {
+ if (enable && link->panel_cntl &&
+ link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -1087,7 +1066,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, false);
- dc_link_set_abm_disable(link);
+ link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1432,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
- pipe_ctx->stream->link->psr_feature_enabled = false;
+ pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
return DC_OK;
}
@@ -1838,7 +1817,7 @@ static bool should_enable_fbc(struct dc *dc,
return false;
/* PSR should not be enabled */
- if (pipe_ctx->stream->link->psr_feature_enabled)
+ if (pipe_ctx->stream->link->psr_settings.psr_feature_enabled)
return false;
/* Nothing to compress */
@@ -2376,6 +2355,7 @@ static void init_hw(struct dc *dc)
struct abm *abm;
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2422,12 +2402,17 @@ static void init_hw(struct dc *dc)
audio->funcs->hw_init(audio);
}
- abm = dc->res_pool->abm;
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
+ abm = dc->res_pool->abm;
+ if (abm != NULL)
+ abm->funcs->abm_init(abm, backlight);
+
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
@@ -2735,6 +2720,53 @@ void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.xfm, attributes);
}
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc *dc = link->ctx->dc;
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = link->panel_cntl;
+ struct dmcu *dmcu = dc->res_pool->dmcu;
+ bool fw_set_brightness = true;
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+ uint32_t controller_id = pipe_ctx->stream_res.tg->inst + 1;
+
+ if (abm == NULL || panel_cntl == NULL || (abm->funcs->set_backlight_level_pwm == NULL))
+ return false;
+
+ if (dmcu)
+ fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
+
+ if (!fw_set_brightness && panel_cntl->funcs->driver_set_backlight)
+ panel_cntl->funcs->driver_set_backlight(panel_cntl, backlight_pwm_u16_16);
+ else
+ abm->funcs->set_backlight_level_pwm(
+ abm,
+ backlight_pwm_u16_16,
+ frame_ramp,
+ controller_id,
+ link->panel_cntl->inst);
+
+ return true;
+}
+
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx)
+{
+ struct abm *abm = pipe_ctx->stream_res.abm;
+ struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
+
+ if (abm)
+ abm->funcs->set_abm_immediate_disable(abm,
+ pipe_ctx->stream->link->panel_cntl->inst);
+
+ if (panel_cntl)
+ panel_cntl->funcs->store_backlight_level(panel_cntl);
+}
+
static const struct hw_sequencer_funcs dce110_funcs = {
.program_gamut_remap = program_gamut_remap,
.program_output_csc = program_output_csc,
@@ -2757,6 +2789,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
.interdependent_update_lock = NULL,
+ .cursor_lock = dce_pipe_control_lock,
.prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
@@ -2768,7 +2801,9 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.edp_power_control = dce110_edp_power_control,
.edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
.set_cursor_position = dce110_set_cursor_position,
- .set_cursor_attribute = dce110_set_cursor_attribute
+ .set_cursor_attribute = dce110_set_cursor_attribute,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
@@ -2784,8 +2819,6 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 34be166e8ff0..fe5326df00f7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,9 +85,10 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
-bool dce110_is_panel_backlight_on(struct dc_link *link);
-
-bool dce110_is_panel_powered_on(struct dc_link *link);
+bool dce110_set_backlight_level(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+void dce110_set_abm_immediate_disable(struct pipe_ctx *pipe_ctx);
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
index 4245e1f818a3..e096d2b95ef9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_opp_csc_v.c
@@ -679,8 +679,7 @@ void dce110_opp_v_set_csc_default(
if (default_adjust->force_hw_default == false) {
const struct out_csc_color_matrix *elm;
/* currently parameter not in use */
- enum grph_color_adjust_option option =
- GRPH_COLOR_MATRIX_HW_DEFAULT;
+ enum grph_color_adjust_option option;
uint32_t i;
/*
* HW default false we program locally defined matrix
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index bf14e9ab040c..9597fc79d7fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
#include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -275,6 +276,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE110(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCE_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -673,6 +686,23 @@ static struct link_encoder *dce110_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce110_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct output_pixel_processor *dce110_opp_create(
struct dc_context *ctx,
uint32_t inst)
@@ -1203,6 +1233,7 @@ struct stream_encoder *dce110_find_first_free_match_stream_enc_for_link(
static const struct resource_funcs dce110_res_pool_funcs = {
.destroy = dce110_destroy_resource_pool,
.link_enc_create = dce110_link_encoder_create,
+ .panel_cntl_create = dce110_panel_cntl_create,
.validate_bandwidth = dce110_validate_bandwidth,
.validate_plane = dce110_validate_plane,
.acquire_idle_pipe_for_layer = dce110_acquire_underlay,
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 700ad8b3e54b..51b3fe502670 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -51,6 +51,7 @@
#include "dce/dce_dmcu.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -238,6 +239,18 @@ static const struct dce110_link_enc_aux_registers link_enc_aux_regs[] = {
aux_regs(5)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define hpd_regs(id)\
[id] = {\
HPD_REG_LIST(id)\
@@ -398,7 +411,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -631,6 +644,23 @@ struct link_encoder *dce112_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce112_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct input_pixel_processor *dce112_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -1021,6 +1051,7 @@ static void dce112_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce112_res_pool_funcs = {
.destroy = dce112_destroy_resource_pool,
.link_enc_create = dce112_link_encoder_create,
+ .panel_cntl_create = dce112_panel_cntl_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 53ab88ef71f5..8f362e8c1787 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -44,6 +44,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_ipp.h"
#include "dce/dce_mem_input.h"
+#include "dce/dce_panel_cntl.h"
#include "dce110/dce110_hw_sequencer.h"
#include "dce120/dce120_hw_sequencer.h"
@@ -293,6 +294,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE120(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCE12_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -503,7 +516,7 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = false,
- .fp16 = false
+ .fp16 = true
},
.max_upscale_factor = {
@@ -715,6 +728,23 @@ static struct link_encoder *dce120_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce120_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
static struct input_pixel_processor *dce120_ipp_create(
struct dc_context *ctx, uint32_t inst)
{
@@ -880,6 +910,7 @@ static void dce120_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce120_res_pool_funcs = {
.destroy = dce120_destroy_resource_pool,
.link_enc_create = dce120_link_encoder_create,
+ .panel_cntl_create = dce120_panel_cntl_create,
.validate_bandwidth = dce112_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce112_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
index 893261c81854..d2ceebdbdf51 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c
@@ -36,34 +36,6 @@
#include "dce/dce_8_0_d.h"
#include "dce/dce_8_0_sh_mask.h"
-struct dce80_hw_seq_reg_offsets {
- uint32_t crtc;
-};
-
-static const struct dce80_hw_seq_reg_offsets reg_offsets[] = {
-{
- .crtc = (mmCRTC0_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC1_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC2_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC3_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC4_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-},
-{
- .crtc = (mmCRTC5_CRTC_GSL_CONTROL - mmCRTC_GSL_CONTROL),
-}
-};
-
-#define HW_REG_CRTC(reg, id)\
- (reg + reg_offsets[id].crtc)
-
/*******************************************************************************
* Private definitions
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2ad5c28c6e66..a19be9de2df7 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -50,6 +50,7 @@
#include "dce/dce_hwseq.h"
#include "dce80/dce80_hw_sequencer.h"
#include "dce100/dce100_resource.h"
+#include "dce/dce_panel_cntl.h"
#include "reg_helper.h"
@@ -266,6 +267,18 @@ static const struct dce_stream_encoder_mask se_mask = {
SE_COMMON_MASK_SH_LIST_DCE80_100(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCE_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define opp_regs(id)\
[id] = {\
OPP_DCE_80_REG_LIST(id),\
@@ -728,6 +741,23 @@ struct link_encoder *dce80_link_encoder_create(
return &enc110->base;
}
+static struct panel_cntl *dce80_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dce80_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -909,6 +939,7 @@ static void dce80_destroy_resource_pool(struct resource_pool **pool)
static const struct resource_funcs dce80_res_pool_funcs = {
.destroy = dce80_destroy_resource_pool,
.link_enc_create = dce80_link_encoder_create,
+ .panel_cntl_create = dce80_panel_cntl_create,
.validate_bandwidth = dce80_validate_bandwidth,
.validate_plane = dce100_validate_plane,
.add_stream_to_ctx = dce100_add_stream_to_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index 0e682b5aa3eb..7f8456b9988b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -134,13 +134,6 @@ bool dpp1_get_optimal_number_of_taps(
struct scaler_data *scl_data,
const struct scaling_taps *in_taps)
{
- uint32_t pixel_width;
-
- if (scl_data->viewport.width > scl_data->recout.width)
- pixel_width = scl_data->recout.width;
- else
- pixel_width = scl_data->viewport.width;
-
/* Some ASICs does not support FP16 scaling, so we reject modes require this*/
if (scl_data->format == PIXEL_FORMAT_FP16 &&
dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index deccab0228d2..75637c291e75 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -93,7 +93,6 @@ void hubbub1_wm_read_state(struct hubbub *hubbub,
void hubbub1_allow_self_refresh_control(struct hubbub *hubbub, bool allow)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
-
/*
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 1 means do not allow stutter
* DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0 means allow stutter
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 31b64733d693..319366ebb44f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1139,6 +1139,8 @@ void hubp1_cursor_set_position(
int src_y_offset = pos->y - pos->y_hotspot - param->viewport.y;
int x_hotspot = pos->x_hotspot;
int y_hotspot = pos->y_hotspot;
+ int cursor_height = (int)hubp->curs_attr.height;
+ int cursor_width = (int)hubp->curs_attr.width;
uint32_t dst_x_offset;
uint32_t cur_en = pos->enable ? 1 : 0;
@@ -1152,10 +1154,16 @@ void hubp1_cursor_set_position(
if (hubp->curs_attr.address.quad_part == 0)
return;
+ // Rotated cursor width/height and hotspots tweaks for offset calculation
if (param->rotation == ROTATION_ANGLE_90 || param->rotation == ROTATION_ANGLE_270) {
- src_x_offset = pos->y - pos->y_hotspot - param->viewport.x;
- y_hotspot = pos->x_hotspot;
- x_hotspot = pos->y_hotspot;
+ swap(cursor_height, cursor_width);
+ if (param->rotation == ROTATION_ANGLE_90) {
+ src_x_offset = pos->x - pos->y_hotspot - param->viewport.x;
+ src_y_offset = pos->y - pos->x_hotspot - param->viewport.y;
+ }
+ } else if (param->rotation == ROTATION_ANGLE_180) {
+ src_x_offset = pos->x - param->viewport.x;
+ src_y_offset = pos->y - param->viewport.y;
}
if (param->mirror) {
@@ -1177,13 +1185,13 @@ void hubp1_cursor_set_position(
if (src_x_offset >= (int)param->viewport.width)
cur_en = 0; /* not visible beyond right edge*/
- if (src_x_offset + (int)hubp->curs_attr.width <= 0)
+ if (src_x_offset + cursor_width <= 0)
cur_en = 0; /* not visible beyond left edge*/
if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/
- if (src_y_offset + (int)hubp->curs_attr.height <= 0)
+ if (src_y_offset + cursor_height <= 0)
cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index b0357546471b..77f16921e7f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -737,7 +737,8 @@ void dcn10_bios_golden_init(struct dc *dc)
if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
if (allow_self_fresh_force_enable == false &&
dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
- dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
@@ -826,6 +827,14 @@ enum dc_status dcn10_enable_stream_timing(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
pipe_ctx->stream_res.tg->funcs->set_blank_color(
pipe_ctx->stream_res.tg,
@@ -903,7 +912,7 @@ static void dcn10_reset_back_end_for_pipe(
if (pipe_ctx->top_pipe == NULL) {
if (pipe_ctx->stream_res.abm)
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@@ -1238,12 +1247,13 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
void dcn10_init_hw(struct dc *dc)
{
- int i;
+ int i, j;
struct abm *abm = dc->res_pool->abm;
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dce_hwseq *hws = dc->hwseq;
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
+ uint32_t backlight = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -1333,17 +1343,28 @@ void dcn10_init_hw(struct dc *dc)
continue;
/*
- * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
- * which needs to read dpcd info with the help of aconnector.
- * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
- * cannot be read.
+ * If any of the displays are lit up turn them off.
+ * The reason is that some MST hubs cannot be turned off
+ * completely until we tell them to do so.
+ * If not turned off, then displays connected to MST hub
+ * won't light up.
*/
- if (dc->links[i]->priv) {
- /* if any of the displays are lit up turn them off */
- status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
- &dpcd_power_state, sizeof(dpcd_power_state));
- if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
- dp_receiver_power_ctrl(dc->links[i], false);
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
+ /* blank dp stream before power off receiver*/
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+ dp_receiver_power_ctrl(dc->links[i], false);
}
}
}
@@ -1361,17 +1382,54 @@ void dcn10_init_hw(struct dc *dc)
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
+ /* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on and seamless boot not enabled
+ */
+ if (dc->config.power_down_display_on_boot) {
+ struct dc_link *edp_link = get_edp_link(dc);
+
+ if (edp_link &&
+ edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwss.edp_backlight_control &&
+ dc->hwss.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwss.edp_backlight_control(edp_link, false);
+ dc->hwss.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwss.power_down) {
+ dc->hwss.power_down(dc);
+ break;
+ }
+
+ }
+ }
+ }
+
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
audio->funcs->hw_init(audio);
}
- if (abm != NULL) {
- abm->funcs->init_backlight(abm);
- abm->funcs->abm_init(abm);
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->panel_cntl)
+ backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
}
+ if (abm != NULL)
+ abm->funcs->abm_init(abm, backlight);
+
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@@ -1625,6 +1683,85 @@ void dcn10_pipe_control_lock(
hws->funcs.verify_allow_pstate_change_high(dc);
}
+/**
+ * delay_cursor_until_vupdate() - Delay cursor update if too close to VUPDATE.
+ *
+ * Software keepout workaround to prevent cursor update locking from stalling
+ * out cursor updates indefinitely or from old values from being retained in
+ * the case where the viewport changes in the same frame as the cursor.
+ *
+ * The idea is to calculate the remaining time from VPOS to VUPDATE. If it's
+ * too close to VUPDATE, then stall out until VUPDATE finishes.
+ *
+ * TODO: Optimize cursor programming to be once per frame before VUPDATE
+ * to avoid the need for this workaround.
+ */
+static void delay_cursor_until_vupdate(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct crtc_position position;
+ uint32_t vupdate_start, vupdate_end;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos;
+ unsigned int us_per_line, us_vupdate;
+
+ if (!dc->hwss.calc_vupdate_position || !dc->hwss.get_position)
+ return;
+
+ if (!pipe_ctx->stream_res.stream_enc || !pipe_ctx->stream_res.tg)
+ return;
+
+ dc->hwss.calc_vupdate_position(dc, pipe_ctx, &vupdate_start,
+ &vupdate_end);
+
+ dc->hwss.get_position(&pipe_ctx, 1, &position);
+ vpos = position.vertical_count;
+
+ /* Avoid wraparound calculation issues */
+ vupdate_start += stream->timing.v_total;
+ vupdate_end += stream->timing.v_total;
+ vpos += stream->timing.v_total;
+
+ if (vpos <= vupdate_start) {
+ /* VPOS is in VACTIVE or back porch. */
+ lines_to_vupdate = vupdate_start - vpos;
+ } else if (vpos > vupdate_end) {
+ /* VPOS is in the front porch. */
+ return;
+ } else {
+ /* VPOS is in VUPDATE. */
+ lines_to_vupdate = 0;
+ }
+
+ /* Calculate time until VUPDATE in microseconds. */
+ us_per_line =
+ stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate > 70)
+ return;
+
+ /* Stall out until the cursor update completes. */
+ if (vupdate_end < vupdate_start)
+ vupdate_end += stream->timing.v_total;
+ us_vupdate = (vupdate_end - vupdate_start + 1) * us_per_line;
+ udelay(us_to_vupdate + us_vupdate);
+}
+
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock)
+{
+ /* cursor lock is per MPCC tree, so only need to lock one pipe per stream */
+ if (!pipe || pipe->top_pipe)
+ return;
+
+ /* Prevent cursor lock from stalling out cursor updates. */
+ if (lock)
+ delay_cursor_until_vupdate(dc, pipe);
+
+ dc->res_pool->mpc->funcs->cursor_lock(dc->res_pool->mpc,
+ pipe->stream_res.opp->inst, lock);
+}
+
static bool wait_for_reset_trigger_to_occur(
struct dc_context *dc_ctx,
struct timing_generator *tg)
@@ -2085,25 +2222,25 @@ void dcn10_get_surface_visual_confirm_color(
switch (pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB8888:
- /* set boarder color to red */
+ /* set border color to red */
color->color_r_cr = color_value;
break;
case PIXEL_FORMAT_ARGB2101010:
- /* set boarder color to blue */
+ /* set border color to blue */
color->color_b_cb = color_value;
break;
case PIXEL_FORMAT_420BPP8:
- /* set boarder color to green */
+ /* set border color to green */
color->color_g_y = color_value;
break;
case PIXEL_FORMAT_420BPP10:
- /* set boarder color to yellow */
+ /* set border color to yellow */
color->color_g_y = color_value;
color->color_r_cr = color_value;
break;
case PIXEL_FORMAT_FP16:
- /* set boarder color to white */
+ /* set border color to white */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
color->color_g_y = color_value;
@@ -2128,25 +2265,25 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
- /* HDR10, ARGB2101010 - set boarder color to red */
+ /* HDR10, ARGB2101010 - set border color to red */
color->color_r_cr = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
- /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ /* FreeSync 2 ARGB2101010 - set border color to pink */
color->color_r_cr = color_value;
color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
- /* HDR10, FP16 - set boarder color to blue */
+ /* HDR10, FP16 - set border color to blue */
color->color_b_cb = color_value;
} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
- /* FreeSync 2 HDR - set boarder color to green */
+ /* FreeSync 2 HDR - set border color to green */
color->color_g_y = color_value;
}
break;
default:
- /* SDR - set boarder color to Gray */
+ /* SDR - set border color to Gray */
color->color_r_cr = color_value/2;
color->color_b_cb = color_value/2;
color->color_g_y = color_value/2;
@@ -2195,6 +2332,14 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
&blnd_cfg.black_color);
}
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ blnd_cfg.black_color.color_r_cr = blnd_cfg.black_color.color_g_y;
+
if (per_pixel_alpha)
blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
else
@@ -2431,12 +2576,12 @@ void dcn10_blank_pixel_data(
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+ stream->link->panel_cntl->inst);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
} else if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (stream_res->tg->funcs->set_blank)
stream_res->tg->funcs->set_blank(stream_res->tg, blank);
}
@@ -3226,7 +3371,7 @@ int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
-static void dcn10_calc_vupdate_position(
+void dcn10_calc_vupdate_position(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 16a50e05ffbf..42b6e016d71e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -34,6 +34,11 @@ struct dc;
void dcn10_hw_sequencer_construct(struct dc *dc);
int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void dcn10_calc_vupdate_position(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line);
void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
enum dc_status dcn10_enable_stream_timing(
struct pipe_ctx *pipe_ctx,
@@ -49,6 +54,7 @@ void dcn10_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+void dcn10_cursor_lock(struct dc *dc, struct pipe_ctx *pipe, bool lock);
void dcn10_blank_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index dd02d3983695..7cb8c3fb2665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -50,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
+ .cursor_lock = dcn10_cursor_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
@@ -71,6 +72,9 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn10_private_funcs = {
@@ -87,8 +91,6 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index d3617d6785a7..7fd385be3f3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -90,7 +90,8 @@ static const struct link_encoder_funcs dcn10_lnk_enc_funcs = {
.is_dig_enabled = dcn10_is_dig_enabled,
.get_dig_frontend = dcn10_get_dig_frontend,
.get_dig_mode = dcn10_get_dig_mode,
- .destroy = dcn10_link_encoder_destroy
+ .destroy = dcn10_link_encoder_destroy,
+ .get_max_link_cap = dcn10_link_encoder_get_max_link_cap,
};
static enum bp_result link_transmitter_control(
@@ -1370,7 +1371,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
DC_HPD_EN, 0);
}
-
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -1425,3 +1425,19 @@ enum signal_type dcn10_get_dig_mode(
return SIGNAL_TYPE_NONE;
}
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+
+ /* Higher link settings based on feature supported */
+ if (enc->features.flags.bits.IS_HBR2_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH2;
+
+ if (enc->features.flags.bits.IS_HBR3_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+
+ *link_settings = max_link_cap;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index 762109174fb8..68395bcc24fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -575,4 +575,7 @@ void dcn10_aux_initialize(struct dcn10_link_encoder *enc10);
enum signal_type dcn10_get_dig_mode(
struct link_encoder *enc);
+
+void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
#endif /* __DC_LINK_ENCODER__DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 04f863499cfb..3fcd408e9103 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -223,6 +223,9 @@ struct mpcc *mpc1_insert_plane(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, dpp_id);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, tree->opp_id);
+ /* Configure VUPDATE lock set for this MPCC to map to the OPP */
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, tree->opp_id);
+
/* update mpc tree mux setting */
if (tree->opp_list == insert_above_mpcc) {
/* insert the toppest mpcc */
@@ -318,6 +321,7 @@ void mpc1_remove_mpcc(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
/* mark this mpcc as not in use */
mpc10->mpcc_in_use_mask &= ~(1 << mpcc_id);
@@ -328,6 +332,7 @@ void mpc1_remove_mpcc(
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
}
}
@@ -361,6 +366,7 @@ void mpc1_mpc_init(struct mpc *mpc)
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
}
@@ -381,6 +387,7 @@ void mpc1_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id)
REG_SET(MPCC_TOP_SEL[mpcc_id], 0, MPCC_TOP_SEL, 0xf);
REG_SET(MPCC_BOT_SEL[mpcc_id], 0, MPCC_BOT_SEL, 0xf);
REG_SET(MPCC_OPP_ID[mpcc_id], 0, MPCC_OPP_ID, 0xf);
+ REG_SET(MPCC_UPDATE_LOCK_SEL[mpcc_id], 0, MPCC_UPDATE_LOCK_SEL, 0xf);
mpc1_init_mpcc(&(mpc->mpcc_array[mpcc_id]), mpcc_id);
@@ -453,6 +460,13 @@ void mpc1_read_mpcc_state(
MPCC_BUSY, &s->busy);
}
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+
+ REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
+}
+
static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -464,6 +478,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.assert_mpcc_idle_before_connect = mpc1_assert_mpcc_idle_before_connect,
.init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
.update_blending = mpc1_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
.set_denorm = NULL,
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 962a68e322ee..66a4719c22a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -39,11 +39,12 @@
SRII(MPCC_BG_G_Y, MPCC, inst),\
SRII(MPCC_BG_R_CR, MPCC, inst),\
SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_BG_B_CB, MPCC, inst),\
- SRII(MPCC_SM_CONTROL, MPCC, inst)
+ SRII(MPCC_SM_CONTROL, MPCC, inst),\
+ SRII(MPCC_UPDATE_LOCK_SEL, MPCC, inst)
#define MPC_OUT_MUX_COMMON_REG_LIST_DCN1_0(inst) \
- SRII(MUX, MPC_OUT, inst)
+ SRII(MUX, MPC_OUT, inst),\
+ VUPDATE_SRII(CUR, VUPDATE_LOCK_SET, inst)
#define MPC_COMMON_REG_VARIABLE_LIST \
uint32_t MPCC_TOP_SEL[MAX_MPCC]; \
@@ -55,7 +56,9 @@
uint32_t MPCC_BG_R_CR[MAX_MPCC]; \
uint32_t MPCC_BG_B_CB[MAX_MPCC]; \
uint32_t MPCC_SM_CONTROL[MAX_MPCC]; \
- uint32_t MUX[MAX_OPP];
+ uint32_t MUX[MAX_OPP]; \
+ uint32_t MPCC_UPDATE_LOCK_SEL[MAX_MPCC]; \
+ uint32_t CUR[MAX_OPP];
#define MPC_COMMON_MASK_SH_LIST_DCN1_0(mask_sh)\
SF(MPCC0_MPCC_TOP_SEL, MPCC_TOP_SEL, mask_sh),\
@@ -78,7 +81,8 @@
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FIELD_ALT, mask_sh),\
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_FRAME_POL, mask_sh),\
SF(MPCC0_MPCC_SM_CONTROL, MPCC_SM_FORCE_NEXT_TOP_POL, mask_sh),\
- SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh)
+ SF(MPC_OUT0_MUX, MPC_OUT_MUX, mask_sh),\
+ SF(MPCC0_MPCC_UPDATE_LOCK_SEL, MPCC_UPDATE_LOCK_SEL, mask_sh)
#define MPC_REG_FIELD_LIST(type) \
type MPCC_TOP_SEL;\
@@ -101,7 +105,9 @@
type MPCC_SM_FIELD_ALT;\
type MPCC_SM_FORCE_NEXT_FRAME_POL;\
type MPCC_SM_FORCE_NEXT_TOP_POL;\
- type MPC_OUT_MUX;
+ type MPC_OUT_MUX;\
+ type MPCC_UPDATE_LOCK_SEL;\
+ type CUR_VUPDATE_LOCK_SET;
struct dcn_mpc_registers {
MPC_COMMON_REG_VARIABLE_LIST
@@ -192,4 +198,6 @@ void mpc1_read_mpcc_state(
int mpcc_inst,
struct mpcc_state *s);
+void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 17d96ec6acd8..ec0ab42becba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,6 +299,7 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
+ int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -315,8 +316,9 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- if (optc1->vstartup_start > asic_blank_end)
- v_fp2 = optc1->vstartup_start - asic_blank_end;
+ vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
+ if (vertical_line_start < 0)
+ v_fp2 = -vertical_line_start;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 9a459a8fe8a0..8d1e52fb0393 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -158,6 +158,7 @@ struct dcn_optc_registers {
uint32_t OTG_GSL_WINDOW_Y;
uint32_t OTG_VUPDATE_KEEPOUT;
uint32_t OTG_CRC_CNTL;
+ uint32_t OTG_CRC_CNTL2;
uint32_t OTG_CRC0_DATA_RG;
uint32_t OTG_CRC0_DATA_B;
uint32_t OTG_CRC0_WINDOWA_X_CONTROL;
@@ -475,7 +476,11 @@ struct dcn_optc_registers {
type OPTC_DSC_SLICE_WIDTH;\
type OPTC_SEGMENT_WIDTH;\
type OPTC_DWB0_SOURCE_SELECT;\
- type OPTC_DWB1_SOURCE_SELECT;
+ type OPTC_DWB1_SOURCE_SELECT;\
+ type OTG_CRC_DSC_MODE;\
+ type OTG_CRC_DATA_STREAM_COMBINE_MODE;\
+ type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
+ type OTG_CRC_DATA_FORMAT;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 07265ca7d28c..17d5cb422025 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -51,6 +51,7 @@
#include "dce112/dce112_resource.h"
#include "dcn10_hubp.h"
#include "dcn10_hubbub.h"
+#include "dce/dce_panel_cntl.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
@@ -181,6 +182,14 @@ enum dcn10_clk_src_array_id {
.reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## 0 ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## 0 ## _ ## block ## id
+
+/* set field/register/bitfield name */
+#define SFRB(field_name, reg_name, bitfield, post_fix)\
+ .field_name = reg_name ## __ ## bitfield ## post_fix
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIF_BASE__INST0_SEG ## seg
@@ -321,6 +330,18 @@ static const struct dcn10_link_enc_mask le_mask = {
LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
static const struct dce110_aux_registers_shift aux_shift = {
DCN10_AUX_MASK_SH_LIST(__SHIFT)
};
@@ -419,11 +440,13 @@ static const struct dcn_mpc_registers mpc_regs = {
};
static const struct dcn_mpc_shift mpc_shift = {
- MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT)
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(__SHIFT),\
+ SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, __SHIFT)
};
static const struct dcn_mpc_mask mpc_mask = {
- MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),
+ MPC_COMMON_MASK_SH_LIST_DCN1_0(_MASK),\
+ SFRB(CUR_VUPDATE_LOCK_SET, CUR0_VUPDATE_LOCK_SET0, CUR0_VUPDATE_LOCK_SET, _MASK)
};
#define tg_regs(id)\
@@ -807,6 +830,23 @@ struct link_encoder *dcn10_link_encoder_create(
return &enc10->base;
}
+static struct panel_cntl *dcn10_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dcn10_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -1081,24 +1121,6 @@ static enum dc_status build_mapped_resource(
{
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1291,6 +1313,7 @@ static const struct dc_cap_funcs cap_funcs = {
static const struct resource_funcs dcn10_res_pool_funcs = {
.destroy = dcn10_destroy_resource_pool,
.link_enc_create = dcn10_link_encoder_create,
+ .panel_cntl_create = dcn10_panel_cntl_create,
.validate_bandwidth = dcn_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn10_acquire_idle_pipe_for_layer,
.validate_plane = dcn10_validate_plane,
@@ -1353,6 +1376,40 @@ static bool dcn10_resource_construct(
/* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
dc->caps.force_dp_tps4_for_cp2520 = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 1;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 0;
+ dc->caps.color.dpp.ogam_ram = 1; // RGAM on DCN1
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 1;
+
+ /* no post-blend color operations */
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 0;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 0;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 7eba9333c328..07b2f9399671 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1274,7 +1274,6 @@ static void enc1_se_audio_setup(
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
- uint32_t speakers = 0;
uint32_t channels = 0;
ASSERT(audio_info);
@@ -1282,7 +1281,6 @@ static void enc1_se_audio_setup(
/* This should not happen.it does so we don't get BSOD*/
return;
- speakers = audio_info->flags.info.ALLSPEAKERS;
channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
/* setup the audio stream source select (audio -> dig mapping) */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
index 501532dd523a..c478213ba7ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h
@@ -80,6 +80,7 @@ struct dcn20_hubbub {
const struct dcn_hubbub_mask *masks;
unsigned int debug_test_index_pstate;
struct dcn_watermark_set watermarks;
+ int num_vmid;
struct dcn20_vmid vmid[16];
unsigned int detile_buf_size;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 22f421e82733..da5333d165ac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -961,8 +961,7 @@ void dcn20_blank_pixel_data(
width = width / odm_cnt;
if (blank) {
- if (stream_res->abm)
- stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
if (dc->debug.visual_confirm != VISUAL_CONFIRM_DISABLE) {
test_pattern = CONTROLLER_DP_TEST_PATTERN_COLORSQUARES;
@@ -997,7 +996,8 @@ void dcn20_blank_pixel_data(
if (!blank)
if (stream_res->abm) {
- stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
+ stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1,
+ stream->link->panel_cntl->inst);
stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
}
}
@@ -1478,8 +1478,11 @@ static void dcn20_program_pipe(
if (pipe_ctx->update_flags.bits.odm)
hws->funcs.update_odm(dc, context, pipe_ctx);
- if (pipe_ctx->update_flags.bits.enable)
+ if (pipe_ctx->update_flags.bits.enable) {
dcn20_enable_plane(dc, pipe_ctx, context);
+ if (dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes)
+ dc->res_pool->hubbub->funcs->force_wm_propagate_to_pipes(dc->res_pool->hubbub);
+ }
if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
@@ -2037,8 +2040,7 @@ static void dcn20_reset_back_end_for_pipe(
*/
if (pipe_ctx->top_pipe == NULL) {
- if (pipe_ctx->stream_res.abm)
- pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+ dc->hwss.set_abm_immediate_disable(pipe_ctx);
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
@@ -2171,6 +2173,13 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
*/
mpcc_id = hubp->inst;
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
+ !pipe_ctx->update_flags.bits.mpcc) {
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ return;
+ }
+
/* check if this MPCC is already being used */
new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
/* remove MPCC if being used */
@@ -2294,7 +2303,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
- REG_WRITE(REFCLK_CNTL, 0);
+ if (REG(REFCLK_CNTL))
+ REG_WRITE(REFCLK_CNTL, 0);
//
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index 1e73357eda34..2fbde4241559 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -52,6 +52,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -82,6 +83,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.init_vm_ctx = dcn20_init_vm_ctx,
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn20_private_funcs = {
@@ -97,8 +101,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
index e4ac73035c84..8d209dae66e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c
@@ -49,6 +49,12 @@
#define IND_REG(index) \
(enc10->link_regs->index)
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
static struct mpll_cfg dcn2_mpll_cfg[] = {
// RBR
@@ -260,6 +266,38 @@ void dcn20_link_encoder_enable_dp_output(
}
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t is_in_usb_c_dp4_mode = 0;
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+
+ /* in usb c dp2 mode, max lane count is 2 */
+ if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+ }
+
+}
+
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+ bool is_usb_c_alt_mode = false;
+
+ if (enc->features.flags.bits.DP_IS_USB_C) {
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
+ }
+
+ return is_usb_c_alt_mode;
+}
+
#define AUX_REG(reg)\
(enc10->aux_regs->reg)
@@ -338,6 +376,8 @@ static const struct link_encoder_funcs dcn20_link_enc_funcs = {
.fec_is_active = enc2_fec_is_active,
.get_dig_mode = dcn10_get_dig_mode,
.get_dig_frontend = dcn10_get_dig_frontend,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn20_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
index 8cab8107fd94..284a1ee4d249 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h
@@ -343,6 +343,10 @@ void dcn20_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+void dcn20_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
void dcn20_link_encoder_construct(
struct dcn20_link_encoder *enc20,
const struct encoder_init_data *init_data,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index de9c857ab3e9..99cc095dc33c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -452,7 +452,7 @@ void mpc2_set_output_gamma(
next_mode = LUT_RAM_A;
mpc20_power_on_ogam_lut(mpc, mpcc_id, true);
- mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A ? true:false);
+ mpc20_configure_ogam_lut(mpc, mpcc_id, next_mode == LUT_RAM_A);
if (next_mode == LUT_RAM_A)
mpc2_program_luta(mpc, mpcc_id, params);
@@ -545,6 +545,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.mpc_init = mpc1_mpc_init,
.mpc_init_single_inst = mpc1_mpc_init_single_inst,
.update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
.get_mpcc_for_dpp = mpc2_get_mpcc_for_dpp,
.wait_for_idle = mpc2_assert_idle_mpcc,
.assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
index c78fd5123497..496658f420db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.h
@@ -179,7 +179,8 @@
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MAX_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_G_Y, MPC_OUT_DENORM_CLAMP_MIN_G_Y, mask_sh),\
SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MAX_B_CB, mask_sh),\
- SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh)
+ SF(MPC_OUT0_DENORM_CLAMP_B_CB, MPC_OUT_DENORM_CLAMP_MIN_B_CB, mask_sh),\
+ SF(CUR_VUPDATE_LOCK_SET0, CUR_VUPDATE_LOCK_SET, mask_sh)
/*
* DCN2 MPC_OCSC debug status register:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index d875b0c38fde..8c16967fe018 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -409,6 +409,18 @@ void optc2_program_manual_trigger(struct timing_generator *optc)
OTG_TRIGA_MANUAL_TRIG, 1);
}
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET_2(OTG_CRC_CNTL2, 0,
+ OTG_CRC_DSC_MODE, params->dsc_mode,
+ OTG_CRC_DATA_STREAM_COMBINE_MODE, params->odm_mode);
+
+ return optc1_configure_crc(optc, params);
+}
+
static struct timing_generator_funcs dcn20_tg_funcs = {
.validate_timing = optc1_validate_timing,
.program_timing = optc1_program_timing,
@@ -452,7 +464,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = {
.clear_optc_underflow = optc1_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc1_get_crc,
- .configure_crc = optc1_configure_crc,
+ .configure_crc = optc2_configure_crc,
.set_dsc_config = optc2_set_dsc_config,
.set_dwb_source = optc2_set_dwb_source,
.set_odm_bypass = optc2_set_odm_bypass,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
index 239cc40ae474..e0a0a8a8e2c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
@@ -36,6 +36,7 @@
SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OTG_CRC_CNTL2, OTG, inst),\
SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
@@ -62,6 +63,10 @@
SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DSC_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_COMBINE_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_STREAM_SPLIT_MODE, mask_sh),\
+ SF(OTG0_OTG_CRC_CNTL2, OTG_CRC_DATA_FORMAT, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG1_SRC_SEL, mask_sh),\
SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_NUM_OF_INPUT_SEGMENT, mask_sh),\
@@ -109,4 +114,6 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc);
void optc2_setup_manual_trigger(struct timing_generator *optc);
void optc2_program_manual_trigger(struct timing_generator *optc);
bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+bool optc2_configure_crc(struct timing_generator *optc,
+ const struct crc_params *params);
#endif /* __DC_OPTC_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 5cdbba0cd873..cef1aa938ab5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -61,6 +61,7 @@
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
#include "dc_link_ddc.h"
+#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -508,6 +509,10 @@ enum dcn20_clk_src_array_id {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIO_BASE__INST0_SEG ## seg
@@ -687,6 +692,18 @@ static const struct dcn10_link_enc_mask le_mask = {
DPCS_DCN2_MASK_SH_LIST(_MASK)
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define ipp_regs(id)\
[id] = {\
IPP_REG_LIST_DCN20(id),\
@@ -1289,6 +1306,23 @@ struct link_encoder *dcn20_link_encoder_create(
return &enc20->enc10.base;
}
+static struct panel_cntl *dcn20_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
struct clock_source *dcn20_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -1619,24 +1653,6 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status status = DC_OK;
struct pipe_ctx *pipe_ctx = resource_get_head_pipe_for_stream(&context->res_ctx, stream);
- /*TODO Seems unneeded anymore */
- /* if (old_context && resource_is_stream_unchanged(old_context, stream)) {
- if (stream != NULL && old_context->streams[i] != NULL) {
- todo: shouldn't have to copy missing parameter here
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- stream->clamping.pixel_encoding =
- stream->timing.pixel_encoding;
-
- resource_build_bit_depth_reduction_params(stream,
- &stream->bit_depth_params);
- build_clamping_params(stream);
-
- continue;
- }
- }
- */
-
if (!pipe_ctx)
return DC_ERROR_UNEXPECTED;
@@ -1647,22 +1663,32 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
}
-static void acquire_dsc(struct resource_context *res_ctx,
- const struct resource_pool *pool,
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
struct display_stream_compressor **dsc,
int pipe_idx)
{
int i;
+ const struct resource_pool *pool = dc->res_pool;
+ struct display_stream_compressor *dsc_old = dc->current_state->res_ctx.pipe_ctx[pipe_idx].stream_res.dsc;
- ASSERT(*dsc == NULL);
+ ASSERT(*dsc == NULL); /* If this ASSERT fails, dsc was not released properly */
*dsc = NULL;
+ /* Always do 1-to-1 mapping when number of DSCs is same as number of pipes */
if (pool->res_cap->num_dsc == pool->res_cap->num_opp) {
*dsc = pool->dscs[pipe_idx];
res_ctx->is_dsc_acquired[pipe_idx] = true;
return;
}
+ /* Return old DSC to avoid the need for re-programming */
+ if (dsc_old && !res_ctx->is_dsc_acquired[dsc_old->inst]) {
+ *dsc = dsc_old;
+ res_ctx->is_dsc_acquired[dsc_old->inst] = true;
+ return ;
+ }
+
/* Find first free DSC */
for (i = 0; i < pool->res_cap->num_dsc; i++)
if (!res_ctx->is_dsc_acquired[i]) {
@@ -1694,7 +1720,6 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
{
enum dc_status result = DC_OK;
int i;
- const struct resource_pool *pool = dc->res_pool;
/* Get a DSC if required and available */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1706,7 +1731,7 @@ enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc,
if (pipe_ctx->stream_res.dsc)
continue;
- acquire_dsc(&dc_ctx->res_ctx, pool, &pipe_ctx->stream_res.dsc, i);
+ dcn20_acquire_dsc(dc, &dc_ctx->res_ctx, &pipe_ctx->stream_res.dsc, i);
/* The number of DSCs can be less than the number of pipes */
if (!pipe_ctx->stream_res.dsc) {
@@ -1834,12 +1859,13 @@ static void swizzle_to_dml_params(
}
bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
struct resource_context *res_ctx,
- const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe)
{
int pipe_idx = next_odm_pipe->pipe_idx;
+ const struct resource_pool *pool = dc->res_pool;
*next_odm_pipe = *prev_odm_pipe;
@@ -1897,7 +1923,7 @@ bool dcn20_split_stream_for_odm(
}
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
if (next_odm_pipe->stream->timing.flags.DSC == 1) {
- acquire_dsc(res_ctx, pool, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
+ dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
return false;
@@ -1935,8 +1961,6 @@ void dcn20_split_stream_for_mpc(
secondary_pipe->top_pipe = primary_pipe;
ASSERT(primary_pipe->plane_state);
- resource_build_scaling_params(primary_pipe);
- resource_build_scaling_params(secondary_pipe);
}
void dcn20_populate_dml_writeback_from_context(
@@ -2212,12 +2236,12 @@ int dcn20_populate_dml_pipes_from_context(
|| pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
- pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
- pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
- pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
- pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
- pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
- pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+ pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
+ pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
+ pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
+ pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
+ pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
+ pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2566,13 +2590,15 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split,
+ int *split,
bool *merge)
{
int i, pipe_idx, vlevel_split;
int plane_count = 0;
bool force_split = false;
bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
+ struct vba_vars_st *v = &context->bw_ctx.dml.vba;
+ int max_mpc_comb = v->maxMpcComb;
if (context->stream_count > 1) {
if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
@@ -2580,10 +2606,22 @@ int dcn20_validate_apply_pipe_split_flags(
} else if (dc->debug.force_single_disp_pipe_split)
force_split = true;
- /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ /**
+ * Workaround for avoiding pipe-split in cases where we'd split
+ * planes that are too small, resulting in splits that aren't
+ * valid for the scaler.
+ */
+ if (pipe->plane_state &&
+ (pipe->plane_state->dst_rect.width <= 16 ||
+ pipe->plane_state->dst_rect.height <= 16 ||
+ pipe->plane_state->src_rect.width <= 16 ||
+ pipe->plane_state->src_rect.height <= 16))
+ avoid_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
if (pipe->stream && !pipe->prev_odm_pipe &&
(!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
++plane_count;
@@ -2598,26 +2636,35 @@ int dcn20_validate_apply_pipe_split_flags(
continue;
for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++)
- if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1)
+ if (v->NoOfDPP[vlevel][0][pipe_idx] == 1 &&
+ v->ModeSupport[vlevel][0])
break;
/* Impossible to not split this pipe */
if (vlevel > context->bw_ctx.dml.soc.num_states)
vlevel = vlevel_split;
+ else
+ max_mpc_comb = 0;
pipe_idx++;
}
- context->bw_ctx.dml.vba.maxMpcComb = 0;
+ v->maxMpcComb = max_mpc_comb;
}
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
+ int pipe_plane = v->pipe_plane[pipe_idx];
+ bool split4mpc = context->stream_count == 1 && plane_count == 1
+ && dc->config.enable_4to1MPC && dc->res_pool->pipe_count >= 4;
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
- split[i] = true;
+ if (force_split || v->NoOfDPP[vlevel][max_mpc_comb][pipe_plane] > 1) {
+ if (split4mpc)
+ split[i] = 4;
+ else
+ split[i] = 2;
+ }
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
pipe->stream->view_format ==
@@ -2626,50 +2673,75 @@ int dcn20_validate_apply_pipe_split_flags(
TIMING_3D_FORMAT_TOP_AND_BOTTOM ||
pipe->stream->timing.timing_3d_format ==
TIMING_3D_FORMAT_SIDE_BY_SIDE))
- split[i] = true;
+ split[i] = 2;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
- split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ split[i] = 2;
+ v->ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
-
- if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
- /*Already split odm pipe tree, don't try to split again*/
- split[i] = false;
- split[pipe->prev_odm_pipe->pipe_idx] = false;
- } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
- && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
- /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
- split[i] = false;
- split[pipe->top_pipe->pipe_idx] = false;
- } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
- if (split[i] == false) {
- /*Exiting mpc/odm combine*/
- merge[i] = true;
- if (pipe->prev_odm_pipe) {
- ASSERT(0); /*should not actually happen yet*/
- merge[pipe->prev_odm_pipe->pipe_idx] = true;
- } else
- merge[pipe->top_pipe->pipe_idx] = true;
- } else {
- /*Transition from mpc combine to odm combine or vice versa*/
- ASSERT(0); /*should not actually happen yet*/
- split[i] = true;
- merge[i] = true;
+ v->ODMCombineEnabled[pipe_plane] =
+ v->ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (v->ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ if (get_num_mpc_splits(pipe) == 1) {
+ /*If need split for mpc but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 MPC */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 MPC */
+ else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 2 -> 1 MPC */
+ } else if (get_num_mpc_splits(pipe) == 3) {
+ /*If need split for mpc but 4 way split already*/
+ if (split[i] == 2 && ((pipe->top_pipe && !pipe->top_pipe->top_pipe)
+ || !pipe->bottom_pipe)) {
+ merge[i] = true; /* 4 -> 2 MPC */
+ } else if (split[i] == 0 && pipe->top_pipe &&
+ pipe->top_pipe->plane_state == pipe->plane_state)
+ merge[i] = true; /* 4 -> 1 MPC */
+ split[i] = 0;
+ } else if (get_num_odm_splits(pipe)) {
+ /* ODM -> MPC transition */
+ ASSERT(0); /* NOT expected yet */
if (pipe->prev_odm_pipe) {
- split[pipe->prev_odm_pipe->pipe_idx] = true;
- merge[pipe->prev_odm_pipe->pipe_idx] = true;
- } else {
- split[pipe->top_pipe->pipe_idx] = true;
- merge[pipe->top_pipe->pipe_idx] = true;
+ split[i] = 0;
+ merge[i] = true;
+ }
+ }
+ } else {
+ if (get_num_odm_splits(pipe) == 1) {
+ /*If need split for odm but 2 way split already*/
+ if (split[i] == 4)
+ split[i] = 2; /* 2 -> 4 ODM */
+ else if (split[i] == 2)
+ split[i] = 0; /* 2 -> 2 ODM */
+ else if (pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ } else if (get_num_odm_splits(pipe) == 3) {
+ /*If need split for odm but 4 way split already*/
+ if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
+ || !pipe->next_odm_pipe)) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* 4 -> 2 ODM */
+ } else if (split[i] == 0 && pipe->prev_odm_pipe) {
+ ASSERT(0); /* NOT expected yet */
+ merge[i] = true; /* exit ODM */
+ }
+ split[i] = 0;
+ } else if (get_num_mpc_splits(pipe)) {
+ /* MPC -> ODM transition */
+ ASSERT(0); /* NOT expected yet */
+ if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
+ split[i] = 0;
+ merge[i] = true;
}
}
}
/* Adjust dppclk when split is forced, do not bother with dispclk */
- if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
- context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
+ if (split[i] != 0 && v->NoOfDPP[vlevel][max_mpc_comb][pipe_idx] == 1)
+ v->RequiredDPPCLK[vlevel][max_mpc_comb][pipe_idx] /= 2;
pipe_idx++;
}
@@ -2685,7 +2757,7 @@ bool dcn20_fast_validate_bw(
int *vlevel_out)
{
bool out = false;
- bool split[MAX_PIPES] = { false };
+ int split[MAX_PIPES] = { 0 };
int pipe_cnt, i, pipe_idx, vlevel;
ASSERT(pipes);
@@ -2727,7 +2799,7 @@ bool dcn20_fast_validate_bw(
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
ASSERT(hsplit_pipe);
if (!dcn20_split_stream_for_odm(
- &context->res_ctx, dc->res_pool,
+ dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
@@ -2745,7 +2817,7 @@ bool dcn20_fast_validate_bw(
&& context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx])
goto validate_fail;
- if (split[i]) {
+ if (split[i] == 2) {
if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) {
/* pipe not split previously needs split */
hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe);
@@ -2756,14 +2828,17 @@ bool dcn20_fast_validate_bw(
}
if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) {
if (!dcn20_split_stream_for_odm(
- &context->res_ctx, dc->res_pool,
+ dc, &context->res_ctx,
pipe, hsplit_pipe))
goto validate_fail;
dcn20_build_mapped_resource(dc, context, pipe->stream);
- } else
+ } else {
dcn20_split_stream_for_mpc(
- &context->res_ctx, dc->res_pool,
- pipe, hsplit_pipe);
+ &context->res_ctx, dc->res_pool,
+ pipe, hsplit_pipe);
+ if (!resource_build_scaling_params(pipe) || !resource_build_scaling_params(hsplit_pipe))
+ goto validate_fail;
+ }
pipe_split_from[hsplit_pipe->pipe_idx] = pipe_idx;
}
} else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) {
@@ -3003,7 +3078,7 @@ void dcn20_calculate_dlg_params(
pipe_idx,
cstate_en,
context->bw_ctx.bw.dcn.clk.p_state_change_support,
- false, false, false);
+ false, false, true);
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
@@ -3064,25 +3139,34 @@ validate_out:
return out;
}
-
-bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
- bool fast_validate)
+/*
+ * This must be noinline to ensure anything that deals with FP registers
+ * is contained within this call; previously our compiling with hard-float
+ * would result in fp instructions being emitted outside of the boundaries
+ * of the DC_FP_START/END macros, which makes sense as the compiler has no
+ * idea about what is wrapped and what is not
+ *
+ * This is largely just a workaround to avoid breakage introduced with 5.6,
+ * ideally all fp-using code should be moved into its own file, only that
+ * should be compiled with hard-float, and all code exported from there
+ * should be strictly wrapped with DC_FP_START/END
+ */
+static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
+ struct dc_state *context, bool fast_validate)
{
bool voltage_supported = false;
bool full_pstate_supported = false;
bool dummy_pstate_supported = false;
double p_state_latency_us;
- DC_FP_START();
p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us;
context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support =
dc->debug.disable_dram_clock_change_vactive_support;
+ context->bw_ctx.dml.soc.allow_dram_clock_one_display_vactive =
+ dc->debug.enable_dram_clock_change_one_display_vactive;
if (fast_validate) {
- voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true);
-
- DC_FP_END();
- return voltage_supported;
+ return dcn20_validate_bandwidth_internal(dc, context, true);
}
// Best case, we support full UCLK switch latency
@@ -3111,7 +3195,15 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
restore_dml_state:
context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us;
+ return voltage_supported;
+}
+bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context,
+ bool fast_validate)
+{
+ bool voltage_supported = false;
+ DC_FP_START();
+ voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate);
DC_FP_END();
return voltage_supported;
}
@@ -3170,8 +3262,6 @@ static struct dc_cap_funcs cap_funcs = {
enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
- enum dc_status result = DC_OK;
-
enum surface_pixel_format surf_pix_format = plane_state->format;
unsigned int bpp = resource_pixel_format_to_bpp(surf_pix_format);
@@ -3183,12 +3273,13 @@ enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_stat
swizzle = DC_SW_64KB_S;
plane_state->tiling_info.gfx9.swizzle = swizzle;
- return result;
+ return DC_OK;
}
static struct resource_funcs dcn20_res_pool_funcs = {
.destroy = dcn20_destroy_resource_pool,
.link_enc_create = dcn20_link_encoder_create,
+ .panel_cntl_create = dcn20_panel_cntl_create,
.validate_bandwidth = dcn20_validate_bandwidth,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -3427,6 +3518,13 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
bb->dram_clock_change_latency_us =
dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
}
+
+ if ((int)(bb->dummy_pstate_latency_us * 1000)
+ != dc->bb_overrides.dummy_clock_change_latency_ns
+ && dc->bb_overrides.dummy_clock_change_latency_ns) {
+ bb->dummy_pstate_latency_us =
+ dc->bb_overrides.dummy_clock_change_latency_ns / 1000.0;
+ }
}
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
@@ -3662,9 +3760,42 @@ static bool dcn20_resource_construct(
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
- dc->caps.hw_3d_lut = true;
dc->caps.extended_aux_timeout_support = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2, only MPC ROM
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) {
dc->debug = debug_defaults_drv;
} else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index 9d5bff9455fd..2c1959845c29 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -123,7 +123,7 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split,
+ int *split,
bool *merge);
void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -135,10 +135,14 @@ void dcn20_split_stream_for_mpc(
struct pipe_ctx *primary_pipe,
struct pipe_ctx *secondary_pipe);
bool dcn20_split_stream_for_odm(
+ const struct dc *dc,
struct resource_context *res_ctx,
- const struct resource_pool *pool,
struct pipe_ctx *prev_odm_pipe,
struct pipe_ctx *next_odm_pipe);
+void dcn20_acquire_dsc(const struct dc *dc,
+ struct resource_context *res_ctx,
+ struct display_stream_compressor **dsc,
+ int pipe_idx);
struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
struct resource_context *res_ctx,
const struct resource_pool *pool,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index 5e2d14b897af..129f0b62f751 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -49,11 +49,6 @@
#define FN(reg_name, field_name) \
hubbub1->shifts->field_name, hubbub1->masks->field_name
-#ifdef NUM_VMID
-#undef NUM_VMID
-#endif
-#define NUM_VMID 16
-
static uint32_t convert_and_clamp(
uint32_t wm_ns,
uint32_t refclk_mhz,
@@ -138,7 +133,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
dcn21_dchvm_init(hubbub);
- return NUM_VMID;
+ return hubbub1->num_vmid;
}
bool hubbub21_program_urgent_watermarks(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index d285ba622d61..960a0716dde5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -778,21 +778,28 @@ void dmcub_PLAT_54186_wa(struct hubp *hubp, struct surface_flip_registers *flip_
{
struct dc_dmub_srv *dmcub = hubp->ctx->dmub_srv;
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa = { 0 };
-
- PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
- PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C = flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
- PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
- PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
- PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
- PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
- PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
+ union dmub_rb_cmd cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.PLAT_54186_wa.header.type = DMUB_CMD__PLAT_54186_WA;
+ cmd.PLAT_54186_wa.header.payload_bytes = sizeof(cmd.PLAT_54186_wa.flip);
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_C =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_C;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH;
+ cmd.PLAT_54186_wa.flip.DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C =
+ flip_regs->DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C;
+ cmd.PLAT_54186_wa.flip.flip_params.grph_stereo = flip_regs->grph_stereo;
+ cmd.PLAT_54186_wa.flip.flip_params.hubp_inst = hubp->inst;
+ cmd.PLAT_54186_wa.flip.flip_params.immediate = flip_regs->immediate;
+ cmd.PLAT_54186_wa.flip.flip_params.tmz_surface = flip_regs->tmz_surface;
+ cmd.PLAT_54186_wa.flip.flip_params.vmid = flip_regs->vmid;
PERF_TRACE(); // TODO: remove after performance is stable.
- dc_dmub_srv_cmd_queue(dmcub, &PLAT_54186_wa.header);
+ dc_dmub_srv_cmd_queue(dmcub, &cmd);
PERF_TRACE(); // TODO: remove after performance is stable.
dc_dmub_srv_cmd_execute(dmcub);
PERF_TRACE(); // TODO: remove after performance is stable.
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index b9ff9767e08f..a5baef7e7a7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -53,6 +53,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
.interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -85,11 +86,10 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.optimize_pwr_state = dcn21_optimize_pwr_state,
.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
- .set_cursor_position = dcn10_set_cursor_position,
- .set_cursor_attribute = dcn10_set_cursor_attribute,
- .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
- .optimize_pwr_state = dcn21_optimize_pwr_state,
- .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
};
static const struct hwseq_private_funcs dcn21_private_funcs = {
@@ -105,8 +105,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
- .is_panel_backlight_on = dce110_is_panel_backlight_on,
- .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
index e45683ac871a..aa46c35b05a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c
@@ -203,29 +203,6 @@ static bool update_cfg_data(
return true;
}
-void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc,
- struct dc_link_settings *link_settings)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value);
-
- if (!value && link_settings->lane_count > LANE_COUNT_TWO)
- link_settings->lane_count = LANE_COUNT_TWO;
-}
-
-bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc)
-{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t value;
-
- REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value);
-
- // if value == 1 alt mode is disabled, otherwise it is enabled
- return !value;
-}
-
bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
@@ -348,8 +325,8 @@ static const struct link_encoder_funcs dcn21_link_enc_funcs = {
.fec_set_ready = enc2_fec_set_ready,
.fec_is_active = enc2_fec_is_active,
.get_dig_frontend = dcn10_get_dig_frontend,
- .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode,
- .get_max_link_cap = dcn21_link_encoder_get_max_link_cap,
+ .is_in_alt_mode = dcn20_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn20_link_encoder_get_max_link_cap,
};
void dcn21_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index b25484aa8222..f00a56835084 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -61,6 +61,7 @@
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce110/dce110_resource.h"
+#include "dce/dce_panel_cntl.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@@ -85,6 +86,7 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_abm.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -284,7 +286,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.dram_channel_width_bytes = 4,
.fabric_datapath_to_dcn_data_return_bytes = 32,
.dcn_downspread_percent = 0.5,
- .downspread_percent = 0.5,
+ .downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
@@ -340,6 +342,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
mm ## block ## id ## _ ## reg_name
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
/* NBIO */
#define NBIO_BASE_INNER(seg) \
NBIF0_BASE__INST0_SEG ## seg
@@ -799,7 +805,7 @@ static const struct resource_caps res_cap_rn = {
.num_pll = 5, // maybe 3 because the last two used for USB-c
.num_dwb = 1,
.num_ddc = 5,
- .num_vmid = 1,
+ .num_vmid = 16,
.num_dsc = 3,
};
@@ -991,9 +997,12 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
pool->base.dp_clock_source = NULL;
}
-
- if (pool->base.abm != NULL)
- dce_abm_destroy(&pool->base.abm);
+ if (pool->base.abm != NULL) {
+ if (pool->base.abm->ctx->dc->config.disable_dmcu)
+ dmub_abm_destroy(&pool->base.abm);
+ else
+ dce_abm_destroy(&pool->base.abm);
+ }
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
@@ -1286,6 +1295,7 @@ static struct hubbub *dcn21_hubbub_create(struct dc_context *ctx)
vmid->shifts = &vmid_shifts;
vmid->masks = &vmid_masks;
}
+ hubbub->num_vmid = res_cap_rn.num_vmid;
return &hubbub->base;
}
@@ -1374,64 +1384,50 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
{
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
- unsigned int i, j, k;
- int closest_clk_lvl;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int j;
// Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
- /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
- dcn2_1_soc.clock_limits[0].state = 0;
- dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
- dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
- dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
- dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
-
- /*
- * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
- * as indicator
- */
-
- closest_clk_lvl = -1;
- /* index currently being filled */
- k = 1;
- for (i = 1; i < clk_table->num_entries; i++) {
- /* loop backwards, skip duplicate state*/
- for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
+ ASSERT(clk_table->num_entries);
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn2_1_soc.num_states - 1; j >= 0; j--) {
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
}
}
- /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
- if (closest_clk_lvl != -1) {
- dcn2_1_soc.clock_limits[k].state = i;
- dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- k++;
- }
+ clock_limits[i].state = i;
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ clock_limits[i].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ clock_limits[i].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ clock_limits[i].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn2_1_soc.clock_limits[i] = clock_limits[i];
+ if (clk_table->num_entries) {
+ dcn2_1_soc.num_states = clk_table->num_entries;
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
}
- dcn2_1_soc.num_states = k;
}
- /* duplicate last level */
- dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
- dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
-
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
@@ -1602,6 +1598,18 @@ static const struct dcn10_link_enc_registers link_enc_regs[] = {
link_regs(4, E),
};
+static const struct dce_panel_cntl_registers panel_cntl_regs[] = {
+ { DCN_PANEL_CNTL_REG_LIST() }
+};
+
+static const struct dce_panel_cntl_shift panel_cntl_shift = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_panel_cntl_mask panel_cntl_mask = {
+ DCE_PANEL_CNTL_MASK_SH_LIST(_MASK)
+};
+
#define aux_regs(id)\
[id] = {\
DCN2_AUX_REG_LIST(id)\
@@ -1687,6 +1695,24 @@ static struct link_encoder *dcn21_link_encoder_create(
return &enc21->enc10.base;
}
+
+static struct panel_cntl *dcn21_panel_cntl_create(const struct panel_cntl_init_data *init_data)
+{
+ struct dce_panel_cntl *panel_cntl =
+ kzalloc(sizeof(struct dce_panel_cntl), GFP_KERNEL);
+
+ if (!panel_cntl)
+ return NULL;
+
+ dce_panel_cntl_construct(panel_cntl,
+ init_data,
+ &panel_cntl_regs[init_data->inst],
+ &panel_cntl_shift,
+ &panel_cntl_mask);
+
+ return &panel_cntl->base;
+}
+
#define CTX ctx
#define REG(reg_name) \
@@ -1705,12 +1731,8 @@ static int dcn21_populate_dml_pipes_from_context(
{
uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes);
int i;
- struct resource_context *res_ctx = &context->res_ctx;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (!res_ctx->pipe_ctx[i].stream)
- continue;
+ for (i = 0; i < pipe_cnt; i++) {
pipes[i].pipe.src.hostvm = 1;
pipes[i].pipe.src.gpuvm = 1;
@@ -1735,6 +1757,7 @@ enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_stat
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
+ .panel_cntl_create = dcn21_panel_cntl_create,
.validate_bandwidth = dcn21_validate_bandwidth,
.populate_dml_pipes = dcn21_populate_dml_pipes_from_context,
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
@@ -1781,7 +1804,6 @@ static bool dcn21_resource_construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 256;
dc->caps.dmdata_alloc_size = 2048;
- dc->caps.hw_3d_lut = true;
dc->caps.max_slave_planes = 1;
dc->caps.post_blend_color_processing = true;
@@ -1790,6 +1812,40 @@ static bool dcn21_resource_construct(
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
@@ -1842,17 +1898,19 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- pool->base.dmcu = dcn21_dmcu_create(ctx,
- &dmcu_regs,
- &dmcu_shift,
- &dmcu_mask);
- if (pool->base.dmcu == NULL) {
- dm_error("DC: failed to create dmcu!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
+ if (!dc->config.disable_dmcu) {
+ pool->base.dmcu = dcn21_dmcu_create(ctx,
+ &dmcu_regs,
+ &dmcu_shift,
+ &dmcu_mask);
+ if (pool->base.dmcu == NULL) {
+ dm_error("DC: failed to create dmcu!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
}
- if (dc->debug.disable_dmcu) {
+ if (dc->config.disable_dmcu) {
pool->base.psr = dmub_psr_create(ctx);
if (pool->base.psr == NULL) {
@@ -1862,15 +1920,16 @@ static bool dcn21_resource_construct(
}
}
- pool->base.abm = dce_abm_create(ctx,
+ if (dc->config.disable_dmcu)
+ pool->base.abm = dmub_abm_create(ctx,
+ &abm_regs,
+ &abm_shift,
+ &abm_mask);
+ else
+ pool->base.abm = dce_abm_create(ctx,
&abm_regs,
&abm_shift,
&abm_mask);
- if (pool->base.abm == NULL) {
- dm_error("DC: failed to create abm!\n");
- BREAK_TO_DEBUGGER();
- goto create_fail;
- }
pool->base.pp_smu = dcn21_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 7ee8b8460a9b..e34c3376efc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -63,10 +63,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dml_common_defs.o := $(dml_ccflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
- dml_common_defs.o
ifdef CONFIG_DRM_AMD_DC_DCN
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 5bbbafacc720..80170f9721ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2599,21 +2599,44 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
}
}
+ {
+ float SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank = 999999;
+ int PlaneWithMinActiveDRAMClockChangeMargin = -1;
+
mode_lib->vba.MinActiveDRAMClockChangeMargin = 999999;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
< mode_lib->vba.MinActiveDRAMClockChangeMargin) {
mode_lib->vba.MinActiveDRAMClockChangeMargin =
mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ if (mode_lib->vba.BlendingAndTiming[k] == k) {
+ PlaneWithMinActiveDRAMClockChangeMargin = k;
+ } else {
+ for (j = 0; j < mode_lib->vba.NumberOfActivePlanes; ++j) {
+ if (mode_lib->vba.BlendingAndTiming[k] == j) {
+ PlaneWithMinActiveDRAMClockChangeMargin = j;
+ }
+ }
+ }
}
}
mode_lib->vba.MinActiveDRAMClockChangeLatencySupported =
mode_lib->vba.MinActiveDRAMClockChangeMargin
+ mode_lib->vba.DRAMClockChangeLatency;
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (!((k == PlaneWithMinActiveDRAMClockChangeMargin) && (mode_lib->vba.BlendingAndTiming[k] == k))
+ && !(mode_lib->vba.BlendingAndTiming[k] == PlaneWithMinActiveDRAMClockChangeMargin)
+ && mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]
+ < SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank) {
+ SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank =
+ mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k];
+ }
+ }
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+ mode_lib->vba.DRAMClockChangeWatermark += 25;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
@@ -2622,13 +2645,17 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.MinTTUVBlank[k] += 25;
}
}
- mode_lib->vba.DRAMClockChangeWatermark += 25;
+
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else {
- if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
+ if ((mode_lib->vba.SynchronizedVBlank
+ || mode_lib->vba.NumberOfActivePlanes == 1
+ || (SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0 &&
+ mode_lib->vba.AllowDramClockChangeOneDisplayVactive))
+ && mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vblank;
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
if (!mode_lib->vba.AllowDRAMClockChangeDuringVBlank[k]) {
@@ -2640,6 +2667,7 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_unsupported;
}
}
+ }
for (k = 0; k <= mode_lib->vba.soc.num_states; k++)
for (j = 0; j < 2; j++)
mode_lib->vba.DRAMClockChangeSupport[k][j] = mode_lib->vba.DRAMClockChangeSupport[0][0];
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
index 8c86b63ddf07..1e557ddcb638 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
@@ -26,7 +26,6 @@
#ifndef __DML20_DISPLAY_RQ_DLG_CALC_H__
#define __DML20_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
index 0378406bf7e7..0d53e871a9d1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -26,7 +26,6 @@
#ifndef __DML20V2_DISPLAY_RQ_DLG_CALC_H__
#define __DML20V2_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index e6617c958bb8..a576eed94d9b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3190,6 +3190,7 @@ static void CalculateFlipSchedule(
double TimeForFetchingRowInVBlankImmediateFlip;
double ImmediateFlipBW;
double HostVMInefficiencyFactor;
+ double VRatioClamped;
if (GPUVMEnable == true && HostVMEnable == true) {
HostVMInefficiencyFactor =
@@ -3222,31 +3223,32 @@ static void CalculateFlipSchedule(
*DestinationLinesToRequestRowInImmediateFlip = dml_ceil(4.0 * (TimeForFetchingRowInVBlankImmediateFlip / LineTime), 1) / 4.0;
*final_flip_bw = dml_max(PDEAndMetaPTEBytesPerFrame * HostVMInefficiencyFactor / (*DestinationLinesToRequestVMInImmediateFlip * LineTime), (MetaRowBytes + DPTEBytesPerRow) * HostVMInefficiencyFactor / (*DestinationLinesToRequestRowInImmediateFlip * LineTime));
+ VRatioClamped = (VRatio < 1.0) ? 1.0 : VRatio;
if (SourcePixelFormat == dm_420_8 || SourcePixelFormat == dm_420_10) {
if (GPUVMEnable == true && DCCEnable != true) {
min_row_time = dml_min(
- dpte_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / (VRatio / 2));
+ dpte_row_height * LineTime / VRatioClamped,
+ dpte_row_height_chroma * LineTime / (VRatioClamped / 2));
} else if (GPUVMEnable != true && DCCEnable == true) {
min_row_time = dml_min(
- meta_row_height * LineTime / VRatio,
- meta_row_height_chroma * LineTime / (VRatio / 2));
+ meta_row_height * LineTime / VRatioClamped,
+ meta_row_height_chroma * LineTime / (VRatioClamped / 2));
} else {
min_row_time = dml_min4(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio,
- dpte_row_height_chroma * LineTime / (VRatio / 2),
- meta_row_height_chroma * LineTime / (VRatio / 2));
+ dpte_row_height * LineTime / VRatioClamped,
+ meta_row_height * LineTime / VRatioClamped,
+ dpte_row_height_chroma * LineTime / (VRatioClamped / 2),
+ meta_row_height_chroma * LineTime / (VRatioClamped / 2));
}
} else {
if (GPUVMEnable == true && DCCEnable != true) {
- min_row_time = dpte_row_height * LineTime / VRatio;
+ min_row_time = dpte_row_height * LineTime / VRatioClamped;
} else if (GPUVMEnable != true && DCCEnable == true) {
- min_row_time = meta_row_height * LineTime / VRatio;
+ min_row_time = meta_row_height * LineTime / VRatioClamped;
} else {
min_row_time = dml_min(
- dpte_row_height * LineTime / VRatio,
- meta_row_height * LineTime / VRatio);
+ dpte_row_height * LineTime / VRatioClamped,
+ meta_row_height * LineTime / VRatioClamped);
}
}
@@ -5944,7 +5946,7 @@ static void CalculateMetaAndPTETimes(
* PixelPTEReqHeightY[k];
}
dpte_groups_per_row_luma_ub = dml_ceil(
- dpte_row_width_luma_ub[k] / dpte_group_width_luma,
+ (float) dpte_row_width_luma_ub[k] / dpte_group_width_luma,
1);
time_per_pte_group_nom_luma[k] = DST_Y_PER_PTE_ROW_NOM_L[k] * HTotal[k]
/ PixelClock[k] / dpte_groups_per_row_luma_ub;
@@ -5968,7 +5970,7 @@ static void CalculateMetaAndPTETimes(
* PixelPTEReqHeightC[k];
}
dpte_groups_per_row_chroma_ub = dml_ceil(
- dpte_row_width_chroma_ub[k]
+ (float) dpte_row_width_chroma_ub[k]
/ dpte_group_width_chroma,
1);
time_per_pte_group_nom_chroma[k] = DST_Y_PER_PTE_ROW_NOM_C[k]
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index a38baa73d484..90a5fefef05b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1200,7 +1200,7 @@ static void dml_rq_dlg_get_dlg_params(
min_hratio_fact_l = 1.0;
min_hratio_fact_c = 1.0;
- if (htaps_l <= 1)
+ if (hratio_l <= 1)
min_hratio_fact_l = 2.0;
else if (htaps_l <= 6) {
if ((hratio_l * 2.0) > 4.0)
@@ -1216,7 +1216,7 @@ static void dml_rq_dlg_get_dlg_params(
hscale_pixel_rate_l = min_hratio_fact_l * dppclk_freq_in_mhz;
- if (htaps_c <= 1)
+ if (hratio_c <= 1)
min_hratio_fact_c = 2.0;
else if (htaps_c <= 6) {
if ((hratio_c * 2.0) > 4.0)
@@ -1490,19 +1490,30 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_pte_group_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) dpte_groups_per_row_ub_l);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
+ if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l >= (unsigned int)dml_pow(2, 13))
+ disp_dlg_regs->refcyc_per_pte_group_vblank_l = (1 << 13) - 1;
+ else
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_l < (unsigned int)dml_pow(2, 13));
if (dual_plane) {
disp_dlg_regs->refcyc_per_pte_group_vblank_c = (unsigned int) (dst_y_per_row_vblank
* (double) htotal * ref_freq_to_pix_freq
/ (double) dpte_groups_per_row_ub_c);
- ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
+ if ((refclk_freq_in_mhz / ref_freq_to_pix_freq < 28) &&
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c >= (unsigned int)dml_pow(2, 13))
+ disp_dlg_regs->refcyc_per_pte_group_vblank_c = (1 << 13) - 1;
+ else
+ ASSERT(disp_dlg_regs->refcyc_per_pte_group_vblank_c
< (unsigned int)dml_pow(2, 13));
}
- disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
+ if (src->dcc)
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l =
(unsigned int) (dst_y_per_row_vblank * (double) htotal
* ref_freq_to_pix_freq / (double) meta_chunks_per_row_ub_l);
+ else
+ disp_dlg_regs->refcyc_per_meta_chunk_vblank_l = 0;
ASSERT(disp_dlg_regs->refcyc_per_meta_chunk_vblank_l < (unsigned int)dml_pow(2, 13));
disp_dlg_regs->refcyc_per_meta_chunk_vblank_c =
@@ -1522,8 +1533,8 @@ static void dml_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_vm_group_vblank = get_refcyc_per_vm_group_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
disp_dlg_regs->refcyc_per_vm_group_flip = get_refcyc_per_vm_group_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
- disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
- disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz;
+ disp_dlg_regs->refcyc_per_vm_req_vblank = get_refcyc_per_vm_req_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
+ disp_dlg_regs->refcyc_per_vm_req_flip = get_refcyc_per_vm_req_flip(mode_lib, e2e_pipe_param, num_pipes, pipe_idx) * refclk_freq_in_mhz * dml_pow(2, 10);
// Clamp to max for now
if (disp_dlg_regs->refcyc_per_vm_group_vblank >= (unsigned int)dml_pow(2, 23))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
index 83e95f8cbff2..e8f7785e3fc6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
@@ -26,7 +26,7 @@
#ifndef __DML21_DISPLAY_RQ_DLG_CALC_H__
#define __DML21_DISPLAY_RQ_DLG_CALC_H__
-#include "../dml_common_defs.h"
+#include "dm_services.h"
#include "../display_rq_dlg_helpers.h"
struct display_mode_lib;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index cf2758ca5b02..c77c3d827e4a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -25,8 +25,10 @@
#ifndef __DISPLAY_MODE_LIB_H__
#define __DISPLAY_MODE_LIB_H__
-
-#include "dml_common_defs.h"
+#include "dm_services.h"
+#include "dc_features.h"
+#include "display_mode_structs.h"
+#include "display_mode_enums.h"
#include "display_mode_vba.h"
enum dml_project {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 687010c17324..439ffd04be34 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -118,9 +118,11 @@ struct _vcs_dpi_soc_bounding_box_st {
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
bool disable_dram_clock_change_vactive_support;
+ bool allow_dram_clock_one_display_vactive;
};
struct _vcs_dpi_ip_params_st {
+ bool use_min_dcfclk;
bool gpuvm_enable;
bool hostvm_enable;
unsigned int gpuvm_max_page_table_levels;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index 6b525c52124c..b19988f54721 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -224,6 +224,7 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.DummyPStateCheck = soc->dram_clock_change_latency_us == soc->dummy_pstate_latency_us;
mode_lib->vba.DRAMClockChangeSupportsVActive = !soc->disable_dram_clock_change_vactive_support ||
mode_lib->vba.DummyPStateCheck;
+ mode_lib->vba.AllowDramClockChangeOneDisplayVactive = soc->allow_dram_clock_one_display_vactive;
mode_lib->vba.Downspreading = soc->downspread_percent;
mode_lib->vba.DRAMChannelWidth = soc->dram_channel_width_bytes; // new!
@@ -280,6 +281,7 @@ static void fetch_ip_params(struct display_mode_lib *mode_lib)
ip_params_st *ip = &mode_lib->vba.ip;
// IP Parameters
+ mode_lib->vba.UseMinimumRequiredDCFCLK = ip->use_min_dcfclk;
mode_lib->vba.MaxNumDPP = ip->max_num_dpp;
mode_lib->vba.MaxNumOTG = ip->max_num_otg;
mode_lib->vba.MaxNumHDMIFRLOutputs = ip->max_num_hdmi_frl_outputs;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 5d82fc5a7ed7..3f559e725ab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -27,8 +27,6 @@
#ifndef __DML2_DISPLAY_MODE_VBA_H__
#define __DML2_DISPLAY_MODE_VBA_H__
-#include "dml_common_defs.h"
-
struct display_mode_lib;
void ModeSupportAndSystemConfiguration(struct display_mode_lib *mode_lib);
@@ -898,6 +896,8 @@ struct vba_vars_st {
bool dummystring[DC__NUM_DPP__MAX];
double BPP;
enum odm_combine_policy ODMCombinePolicy;
+ bool UseMinimumRequiredDCFCLK;
+ bool AllowDramClockChangeOneDisplayVactive;
};
bool CalculateMinAndMaxPrefetchMode(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index 1f24db830737..2555ef0358c2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -26,7 +26,6 @@
#ifndef __DISPLAY_RQ_DLG_HELPERS_H__
#define __DISPLAY_RQ_DLG_HELPERS_H__
-#include "dml_common_defs.h"
#include "display_mode_lib.h"
/* Function: Printer functions
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 304164986bd8..9c06913ad767 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -26,8 +26,6 @@
#ifndef __DISPLAY_RQ_DLG_CALC_H__
#define __DISPLAY_RQ_DLG_CALC_H__
-#include "dml_common_defs.h"
-
struct display_mode_lib;
#include "display_rq_dlg_helpers.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
index ded71ea82413..02e06c9b3230 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml_inline_defs.h
@@ -26,7 +26,6 @@
#ifndef __DML_INLINE_DEFS_H__
#define __DML_INLINE_DEFS_H__
-#include "dml_common_defs.h"
#include "dcn_calc_math.h"
#include "dml_logger.h"
@@ -75,6 +74,18 @@ static inline double dml_floor(double a, double granularity)
return (double) dcn_bw_floor2(a, granularity);
}
+static inline double dml_round(double a)
+{
+ double round_pt = 0.5;
+ double ceil = dml_ceil(a, 1);
+ double floor = dml_floor(a, 1);
+
+ if (a - floor >= round_pt)
+ return ceil;
+ else
+ return floor;
+}
+
static inline int dml_log2(double x)
{
return dml_round((double)dcn_bw_log(x, 2));
@@ -112,7 +123,7 @@ static inline double dml_log(double x, double base)
static inline unsigned int dml_round_to_multiple(unsigned int num,
unsigned int multiple,
- bool up)
+ unsigned char up)
{
unsigned int remainder;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 3f66868df171..ea29cf95d470 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -28,8 +28,6 @@ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dsc/dc_dsc.o := $(dsc_ccflags)
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 87d682d25278..0c7f247bb7de 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -22,10 +22,12 @@
* Author: AMD
*/
+#include <drm/drm_dsc.h>
#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
#include "dc.h"
+#include "rc_calc.h"
/* This module's internal functions */
@@ -129,7 +131,7 @@ static bool dsc_line_buff_depth_from_dpcd(int dpcd_line_buff_bit_depth, int *lin
static bool dsc_throughput_from_dpcd(int dpcd_throughput, int *throughput)
{
switch (dpcd_throughput) {
- case DP_DSC_THROUGHPUT_MODE_0_UPSUPPORTED:
+ case DP_DSC_THROUGHPUT_MODE_0_UNSUPPORTED:
*throughput = 0;
break;
case DP_DSC_THROUGHPUT_MODE_0_170:
@@ -304,22 +306,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
return (value + 9) / 10;
}
-static inline uint32_t calc_dsc_bpp_x16(uint32_t stream_bandwidth_kbps, uint32_t pix_clk_100hz, uint32_t bpp_increment_div)
-{
- uint32_t dsc_target_bpp_x16;
- float f_dsc_target_bpp;
- float f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f;
- uint32_t precision = bpp_increment_div; // bpp_increment_div is actually precision
-
- f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz;
-
- // Round down to the nearest precision stop to bring it into DSC spec range
- dsc_target_bpp_x16 = (uint32_t)(f_dsc_target_bpp * precision);
- dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision;
-
- return dsc_target_bpp_x16;
-}
-
/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock
* and uncompressed bandwidth.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 03ae15946c6d..667afbc260f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+#include <drm/drm_dsc.h>
#include "os_types.h"
#include "rc_calc.h"
@@ -40,7 +41,8 @@
break
-void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum max_min max_min, float bpp)
+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
+ enum max_min max_min, float bpp)
{
int mode = MODE_SELECT(444, 422, 420);
int sel = table_hash(mode, bpc, max_min);
@@ -85,7 +87,7 @@ void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum ma
memcpy(qps, table[index].qps, sizeof(qp_set));
}
-double dsc_roundf(double num)
+static double dsc_roundf(double num)
{
if (num < 0.0)
num = num - 0.5;
@@ -95,7 +97,7 @@ double dsc_roundf(double num)
return (int)(num);
}
-double dsc_ceil(double num)
+static double dsc_ceil(double num)
{
double retval = (int)num;
@@ -105,7 +107,7 @@ double dsc_ceil(double num)
return (int)retval;
}
-void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
{
int *p = ofs;
@@ -160,7 +162,7 @@ void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
}
}
-int median3(int a, int b, int c)
+static int median3(int a, int b, int c)
{
if (a > b)
swap(a, b);
@@ -172,13 +174,25 @@ int median3(int a, int b, int c)
return b;
}
-void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version)
+static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
+ enum bits_per_comp bpc, u8 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width, int slice_height,
+ int minor_version)
{
+ float bpp;
float bpp_group;
float initial_xmit_delay_factor;
int padding_pixels;
int i;
+ bpp = ((float)drm_bpp / 16.0);
+ /* in native_422 or native_420 modes, the bits_per_pixel is double the
+ * target bpp (the latter is what calc_rc_params expects)
+ */
+ if (is_navite_422_or_420)
+ bpp /= 2.0;
+
rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
@@ -251,3 +265,128 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->rc_buf_thresh[13] = 8064;
}
+static u32 _do_bytes_per_pixel_calc(int slice_width, u8 drm_bpp,
+ bool is_navite_422_or_420)
+{
+ float bpp;
+ u32 bytes_per_pixel;
+ double d_bytes_per_pixel;
+
+ bpp = ((float)drm_bpp / 16.0);
+ d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
+ // TODO: Make sure the formula for calculating this is precise (ceiling
+ // vs. floor, and at what point they should be applied)
+ if (is_navite_422_or_420)
+ d_bytes_per_pixel /= 2;
+
+ bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
+
+ return bytes_per_pixel;
+}
+
+static u32 _do_calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+ u32 bpp_increment_div)
+{
+ u32 dsc_target_bpp_x16;
+ float f_dsc_target_bpp;
+ float f_stream_bandwidth_100bps;
+ // bpp_increment_div is actually precision
+ u32 precision = bpp_increment_div;
+
+ f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f;
+ f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz;
+
+ // Round down to the nearest precision stop to bring it into DSC spec
+ // range
+ dsc_target_bpp_x16 = (u32)(f_dsc_target_bpp * precision);
+ dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision;
+
+ return dsc_target_bpp_x16;
+}
+
+/**
+ * calc_rc_params - reads the user's cmdline mode
+ * @rc: DC internal DSC parameters
+ * @pps: DRM struct with all required DSC values
+ *
+ * This function expects a drm_dsc_config data struct with all the required DSC
+ * values previously filled out by our driver and based on this information it
+ * computes some of the DSC values.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ */
+void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
+{
+ enum colour_mode mode;
+ enum bits_per_comp bpc;
+ bool is_navite_422_or_420;
+ u8 drm_bpp = pps->bits_per_pixel;
+ int slice_width = pps->slice_width;
+ int slice_height = pps->slice_height;
+
+ mode = pps->convert_rgb ? CM_RGB : (pps->simple_422 ? CM_444 :
+ (pps->native_422 ? CM_422 :
+ pps->native_420 ? CM_420 : CM_444));
+ bpc = (pps->bits_per_component == 8) ? BPC_8 : (pps->bits_per_component == 10)
+ ? BPC_10 : BPC_12;
+
+ is_navite_422_or_420 = pps->native_422 || pps->native_420;
+
+ DC_FP_START();
+ _do_calc_rc_params(rc, mode, bpc, drm_bpp, is_navite_422_or_420,
+ slice_width, slice_height,
+ pps->dsc_version_minor);
+ DC_FP_END();
+}
+
+/**
+ * calc_dsc_bytes_per_pixel - calculate bytes per pixel
+ * @pps: DRM struct with all required DSC values
+ *
+ * Based on the information inside drm_dsc_config, this function calculates the
+ * total of bytes per pixel.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ *
+ * Return:
+ * Return the number of bytes per pixel
+ */
+u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
+
+{
+ u32 ret;
+ u8 drm_bpp = pps->bits_per_pixel;
+ int slice_width = pps->slice_width;
+ bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
+
+ DC_FP_START();
+ ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp,
+ is_navite_422_or_420);
+ DC_FP_END();
+ return ret;
+}
+
+/**
+ * calc_dsc_bpp_x16 - retrieve the dsc bits per pixel
+ * @stream_bandwidth_kbps:
+ * @pix_clk_100hz:
+ * @bpp_increment_div:
+ *
+ * Calculate the total of bits per pixel for DSC configuration.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ */
+u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+ u32 bpp_increment_div)
+{
+ u32 dsc_bpp;
+
+ DC_FP_START();
+ dsc_bpp = _do_calc_dsc_bpp_x16(stream_bandwidth_kbps, pix_clk_100hz,
+ bpp_increment_div);
+ DC_FP_END();
+ return dsc_bpp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index b6b1f09c2009..21723fa6561e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -77,7 +77,10 @@ struct qp_entry {
typedef struct qp_entry qp_table[];
-void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version);
+void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
+u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
+u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+ u32 bpp_increment_div);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 1f6e63b71456..ef830aded5b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -27,8 +27,6 @@
#include "dscc_types.h"
#include "rc_calc.h"
-double dsc_ceil(double num);
-
static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from)
{
to->line_buf_depth = from->line_buf_depth;
@@ -100,34 +98,13 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params)
{
- enum colour_mode mode = pps->convert_rgb ? CM_RGB :
- (pps->simple_422 ? CM_444 :
- (pps->native_422 ? CM_422 :
- pps->native_420 ? CM_420 : CM_444));
- enum bits_per_comp bpc = (pps->bits_per_component == 8) ? BPC_8 :
- (pps->bits_per_component == 10) ? BPC_10 : BPC_12;
- float bpp = ((float) pps->bits_per_pixel / 16.0);
- int slice_width = pps->slice_width;
- int slice_height = pps->slice_height;
int ret;
struct rc_params rc;
struct drm_dsc_config dsc_cfg;
- double d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
-
- // TODO: Make sure the formula for calculating this is precise (ceiling vs. floor, and at what point they should be applied)
- if (pps->native_422 || pps->native_420)
- d_bytes_per_pixel /= 2;
-
- dsc_params->bytes_per_pixel = (uint32_t)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- /* in native_422 or native_420 modes, the bits_per_pixel is double the target bpp
- * (the latter is what calc_rc_params expects)
- */
- if (pps->native_422 || pps->native_420)
- bpp /= 2.0;
+ dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps);
- calc_rc_params(&rc, mode, bpc, bpp, slice_width, slice_height, pps->dsc_version_minor);
+ calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset);
diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
index 6f730b5bfe42..5e384a8a83dc 100644
--- a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
+++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
@@ -322,3 +322,92 @@ static const struct protection_properties dp_11_protection = {
.process_transaction = dp_11_process_transaction
};
+static const struct protection_properties *get_protection_properties_by_signal(
+ struct dc_link *link,
+ enum signal_type st,
+ enum hdcp_version version)
+{
+ switch (version) {
+ case HDCP_VERSION_14:
+ switch (st) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return &hdmi_14_protection;
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ if (link &&
+ (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
+ link->dpcd_caps.dongle_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER)) {
+ return &non_supported_protection;
+ }
+ return &dp_11_protection;
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ return &dp_11_protection;
+ default:
+ return &non_supported_protection;
+ }
+ break;
+ case HDCP_VERSION_22:
+ switch (st) {
+ case SIGNAL_TYPE_DVI_SINGLE_LINK:
+ case SIGNAL_TYPE_DVI_DUAL_LINK:
+ case SIGNAL_TYPE_HDMI_TYPE_A:
+ return &hdmi_14_protection; //todo version2.2
+ case SIGNAL_TYPE_DISPLAY_PORT:
+ case SIGNAL_TYPE_DISPLAY_PORT_MST:
+ case SIGNAL_TYPE_EDP:
+ return &dp_11_protection; //todo version2.2
+ default:
+ return &non_supported_protection;
+ }
+ break;
+ default:
+ return &non_supported_protection;
+ }
+}
+
+enum hdcp_message_status dc_process_hdcp_msg(
+ enum signal_type signal,
+ struct dc_link *link,
+ struct hdcp_protection_message *message_info)
+{
+ enum hdcp_message_status status = HDCP_MESSAGE_FAILURE;
+ uint32_t i = 0;
+
+ const struct protection_properties *protection_props;
+
+ if (!message_info)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ if (message_info->msg_id < HDCP_MESSAGE_ID_READ_BKSV ||
+ message_info->msg_id >= HDCP_MESSAGE_ID_MAX)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ protection_props =
+ get_protection_properties_by_signal(
+ link,
+ signal,
+ message_info->version);
+
+ if (!protection_props->supported)
+ return HDCP_MESSAGE_UNSUPPORTED;
+
+ if (protection_props->process_transaction(
+ link,
+ message_info)) {
+ status = HDCP_MESSAGE_SUCCESS;
+ } else {
+ for (i = 0; i < message_info->max_retries; i++) {
+ if (protection_props->process_transaction(
+ link,
+ message_info)) {
+ status = HDCP_MESSAGE_SUCCESS;
+ break;
+ }
+ }
+ }
+
+ return status;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index d523fc9547e7..c7fd702a4a87 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -38,6 +38,7 @@
#endif
#include "dwb.h"
#include "mcif_wb.h"
+#include "panel_cntl.h"
#define MAX_CLOCK_SOURCES 7
@@ -92,6 +93,8 @@ struct clk_bw_params;
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link);
+ struct panel_cntl*(*panel_cntl_create)(
+ const struct panel_cntl_init_data *panel_cntl_init_data);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
bool (*validate_bandwidth)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index d607b3191954..e8ce8c85adf1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -27,27 +27,17 @@
#include "dm_services_types.h"
-struct abm_backlight_registers {
- unsigned int BL_PWM_CNTL;
- unsigned int BL_PWM_CNTL2;
- unsigned int BL_PWM_PERIOD_CNTL;
- unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
-};
-
struct abm {
struct dc_context *ctx;
const struct abm_funcs *funcs;
bool dmcu_is_running;
- /* registers setting needs to be saved and restored at InitBacklight */
- struct abm_backlight_registers stored_backlight_registers;
};
struct abm_funcs {
- void (*abm_init)(struct abm *abm);
+ void (*abm_init)(struct abm *abm, uint32_t back_light);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
- bool (*set_abm_immediate_disable)(struct abm *abm);
- bool (*set_pipe)(struct abm *abm, unsigned int controller_id);
- bool (*init_backlight)(struct abm *abm);
+ bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
+ bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
/* backlight_pwm_u16_16 is unsigned 32 bit,
* 16 bit integer + 16 fractional, where 1.0 is max backlight value.
@@ -56,10 +46,13 @@ struct abm_funcs {
unsigned int backlight_pwm_u16_16,
unsigned int frame_ramp,
unsigned int controller_id,
- bool use_smooth_brightness);
+ unsigned int panel_inst);
unsigned int (*get_current_backlight)(struct abm *abm);
unsigned int (*get_target_backlight)(struct abm *abm);
+ bool (*init_abm_config)(struct abm *abm,
+ const char *src,
+ unsigned int bytes);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index f5dd0cc73c63..47a566d82d6e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -144,6 +144,8 @@ struct hubbub_funcs {
void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow);
void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub);
+
+ void (*force_wm_propagate_to_pipes)(struct hubbub *hubbub);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 094afc4c8173..50ee8aa7ec3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -210,6 +210,22 @@ struct mpc_funcs {
struct mpcc_blnd_cfg *blnd_cfg,
int mpcc_id);
+ /*
+ * Lock cursor updates for the specified OPP.
+ * OPP defines the set of MPCC that are locked together for cursor.
+ *
+ * Parameters:
+ * [in] mpc - MPC context.
+ * [in] opp_id - The OPP to lock cursor updates on
+ * [in] lock - lock/unlock the OPP
+ *
+ * Return: void
+ */
+ void (*cursor_lock)(
+ struct mpc *mpc,
+ int opp_id,
+ bool lock);
+
struct mpcc* (*get_mpcc_for_dpp)(
struct mpc_tree *tree,
int dpp_id);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
new file mode 100644
index 000000000000..f9ab5abb6462
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+/*
+ * panel_cntl.h
+ *
+ * Created on: Oct 6, 2015
+ * Author: yonsun
+ */
+
+#ifndef DC_PANEL_CNTL_H_
+#define DC_PANEL_CNTL_H_
+
+#include "dc_types.h"
+
+#define MAX_BACKLIGHT_LEVEL 0xFFFF
+
+struct panel_cntl_backlight_registers {
+ unsigned int BL_PWM_CNTL;
+ unsigned int BL_PWM_CNTL2;
+ unsigned int BL_PWM_PERIOD_CNTL;
+ unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
+};
+
+struct panel_cntl_funcs {
+ void (*destroy)(struct panel_cntl **panel_cntl);
+ uint32_t (*hw_init)(struct panel_cntl *panel_cntl);
+ bool (*is_panel_backlight_on)(struct panel_cntl *panel_cntl);
+ bool (*is_panel_powered_on)(struct panel_cntl *panel_cntl);
+ void (*store_backlight_level)(struct panel_cntl *panel_cntl);
+ void (*driver_set_backlight)(struct panel_cntl *panel_cntl,
+ uint32_t backlight_pwm_u16_16);
+};
+
+struct panel_cntl_init_data {
+ struct dc_context *ctx;
+ uint32_t inst;
+};
+
+struct panel_cntl {
+ const struct panel_cntl_funcs *funcs;
+ struct dc_context *ctx;
+ uint32_t inst;
+ /* registers setting needs to be saved and restored at InitBacklight */
+ struct panel_cntl_backlight_registers stored_backlight_registers;
+};
+
+#endif /* DC_PANEL_CNTL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index e5e7d94026fc..f803191e3134 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -117,6 +117,9 @@ struct crc_params {
enum crc_selection selection;
+ uint8_t dsc_mode;
+ uint8_t odm_mode;
+
bool continuous_mode;
bool enable;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
index fecc80c47c26..2947d1b15512 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
@@ -173,6 +173,8 @@ struct scaler_data {
struct scaling_taps taps;
struct rect viewport;
struct rect viewport_c;
+ struct rect viewport_unadjusted;
+ struct rect viewport_c_unadjusted;
struct rect recout;
struct scaling_ratios ratios;
struct scl_inits inits;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index d4c1fb242c63..8e72f077e552 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -75,9 +75,13 @@ struct hw_sequencer_funcs {
void (*wait_for_mpcc_disconnect)(struct dc *dc,
struct resource_pool *res_pool,
struct pipe_ctx *pipe_ctx);
+ void (*edp_backlight_control)(
+ struct dc_link *link,
+ bool enable);
void (*program_triplebuffer)(const struct dc *dc,
struct pipe_ctx *pipe_ctx, bool enableTripleBuffer);
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
+ void (*power_down)(struct dc *dc);
/* Pipe Lock Related */
void (*pipe_control_lock)(struct dc *dc,
@@ -86,11 +90,17 @@ struct hw_sequencer_funcs {
struct dc_state *context, bool lock);
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
bool flip_immediate);
+ void (*cursor_lock)(struct dc *dc, struct pipe_ctx *pipe, bool lock);
/* Timing Related */
void (*get_position)(struct pipe_ctx **pipe_ctx, int num_pipes,
struct crtc_position *position);
int (*get_vupdate_offset_from_vsync)(struct pipe_ctx *pipe_ctx);
+ void (*calc_vupdate_position)(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ uint32_t *start_line,
+ uint32_t *end_line);
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
int group_size, struct pipe_ctx *grouped_pipes[]);
void (*enable_timing_synchronization)(struct dc *dc,
@@ -187,6 +197,12 @@ struct hw_sequencer_funcs {
unsigned int bufSize, unsigned int mask);
void (*clear_status_bits)(struct dc *dc, unsigned int mask);
+ bool (*set_backlight_level)(struct pipe_ctx *pipe_ctx,
+ uint32_t backlight_pwm_u16_16,
+ uint32_t frame_ramp);
+
+ void (*set_abm_immediate_disable)(struct pipe_ctx *pipe_ctx);
+
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index 52a26e6be066..36e906bb6bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -100,8 +100,6 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
- bool (*is_panel_backlight_on)(struct dc_link *link);
- bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index ca4c36c0c9bc..a9be495af922 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -138,9 +138,6 @@ struct pipe_ctx *find_idle_secondary_pipe(
const struct resource_pool *pool,
const struct pipe_ctx *primary_pipe);
-bool resource_is_stream_unchanged(
- struct dc_state *old_context, struct dc_stream_state *stream);
-
bool resource_validate_attach_surfaces(
const struct dc_validation_set set[],
int set_count,
@@ -180,6 +177,8 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+int get_num_mpc_splits(struct pipe_ctx *pipe);
+
int get_num_odm_splits(struct pipe_ctx *pipe);
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index c34eba19860a..6d7bca562eec 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -108,7 +108,7 @@
#define ASSERT(expr) ASSERT_CRITICAL(expr)
#else
-#define ASSERT(expr) WARN_ON(!(expr))
+#define ASSERT(expr) WARN_ON_ONCE(!(expr))
#endif
#define BREAK_TO_DEBUGGER() ASSERT(0)
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
index 3464b2d5b89a..348e9a600a72 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_link_encoder.c
@@ -84,6 +84,14 @@ static void virtual_link_encoder_destroy(struct link_encoder **enc)
*enc = NULL;
}
+static void virtual_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ /* Set Default link settings */
+ struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
+ *link_settings = max_link_cap;
+}
static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
.validate_output_with_stream =
@@ -94,6 +102,7 @@ static const struct link_encoder_funcs virtual_lnk_enc_funcs = {
.enable_dp_output = virtual_link_encoder_enable_dp_output,
.enable_dp_mst_output = virtual_link_encoder_enable_dp_mst_output,
.disable_output = virtual_link_encoder_disable_output,
+ .get_max_link_cap = virtual_link_encoder_get_max_link_cap,
.dp_set_lane_settings = virtual_link_encoder_dp_set_lane_settings,
.dp_set_phy_pattern = virtual_link_encoder_dp_set_phy_pattern,
.update_mst_stream_allocation_table =
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index c2671f2616c8..26d94eb5ab58 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -64,10 +64,11 @@
* other component within DAL.
*/
-#include "dmub_types.h"
-#include "dmub_cmd.h"
-#include "dmub_gpint_cmd.h"
-#include "dmub_rb.h"
+#include "inc/dmub_types.h"
+#include "inc/dmub_cmd.h"
+#include "inc/dmub_gpint_cmd.h"
+#include "inc/dmub_cmd_dal.h"
+#include "inc/dmub_rb.h"
#if defined(__cplusplus)
extern "C" {
@@ -75,7 +76,6 @@ extern "C" {
/* Forward declarations */
struct dmub_srv;
-struct dmub_cmd_header;
struct dmub_srv_common_regs;
/* enum dmub_status - return code for dmcub functions */
@@ -151,6 +151,7 @@ struct dmub_srv_region_params {
uint32_t inst_const_size;
uint32_t bss_data_size;
uint32_t vbios_size;
+ const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
};
@@ -457,7 +458,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub);
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd);
+ const union dmub_rb_cmd *cmd);
/**
* dmub_srv_cmd_execute() - Executes a queued sequence to the dmub
@@ -565,6 +566,16 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub,
enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
uint32_t *response);
+/**
+ * dmub_flush_buffer_mem() - Read back entire frame buffer region.
+ * This ensures that the write from x86 has been flushed and will not
+ * hang the DMCUB.
+ * @fb: frame buffer to flush
+ *
+ * Can be called after software initialization.
+ */
+void dmub_flush_buffer_mem(const struct dmub_fb *fb);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 10b5fa9d2588..599bf2055bcb 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -228,6 +228,7 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t smu_optimizations_en;
uint8_t frame_delay;
uint8_t frame_cap_ind;
+ struct dmub_psr_debug_flags debug;
};
struct dmub_rb_cmd_psr_copy_settings {
@@ -260,6 +261,8 @@ struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_abm_set_pipe_data {
uint32_t ramping_boundary;
uint32_t otg_inst;
+ uint32_t panel_inst;
+ uint32_t set_pipe_option;
};
struct dmub_rb_cmd_abm_set_pipe {
@@ -303,6 +306,16 @@ struct dmub_rb_cmd_abm_set_pwm_frac {
struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
+struct dmub_cmd_abm_init_config_data {
+ union dmub_addr src;
+ uint16_t bytes;
+};
+
+struct dmub_rb_cmd_abm_init_config {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_init_config_data abm_init_config_data;
+};
+
union dmub_rb_cmd {
struct dmub_rb_cmd_read_modify_write read_modify_write;
struct dmub_rb_cmd_reg_field_update_sequence reg_field_update_seq;
@@ -324,6 +337,7 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_set_level abm_set_level;
struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
+ struct dmub_rb_cmd_abm_init_config abm_init_config;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index d37535d21928..e42de9ded275 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,17 +32,16 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SET_VERSION = 0,
- DMUB_CMD__PSR_COPY_SETTINGS = 1,
- DMUB_CMD__PSR_ENABLE = 2,
- DMUB_CMD__PSR_DISABLE = 3,
- DMUB_CMD__PSR_SET_LEVEL = 4,
+ DMUB_CMD__PSR_SET_VERSION = 0,
+ DMUB_CMD__PSR_COPY_SETTINGS = 1,
+ DMUB_CMD__PSR_ENABLE = 2,
+ DMUB_CMD__PSR_DISABLE = 3,
+ DMUB_CMD__PSR_SET_LEVEL = 4,
};
enum psr_version {
- PSR_VERSION_1 = 0x10, // PSR Version 1
- PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_1 = 0,
+ PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
};
enum dmub_cmd_abm_type {
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
index df875fdd2ab0..2ae48c18bb5b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_rb.h
@@ -33,8 +33,6 @@
extern "C" {
#endif
-struct dmub_cmd_header;
-
struct dmub_rb_init_params {
void *ctx;
void *base_address;
@@ -71,7 +69,7 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
}
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
- const struct dmub_cmd_header *cmd)
+ const union dmub_rb_cmd *cmd)
{
uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
const uint64_t *src = (const uint64_t *)cmd;
@@ -93,7 +91,7 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
}
static inline bool dmub_rb_front(struct dmub_rb *rb,
- struct dmub_cmd_header *cmd)
+ union dmub_rb_cmd *cmd)
{
uint8_t *rd_ptr = (uint8_t *)rb->base_address + rb->rptr;
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
index 41d524b0db2f..bed5b023a396 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_types.h
@@ -49,6 +49,12 @@ extern "C" {
#define dmub_udelay(microseconds) udelay(microseconds)
#endif
+/* Maximum number of streams on any ASIC. */
+#define DMUB_MAX_STREAMS 6
+
+/* Maximum number of planes on any ASIC. */
+#define DMUB_MAX_PLANES 6
+
union dmub_addr {
struct {
uint32_t low_part;
@@ -57,6 +63,11 @@ union dmub_addr {
uint64_t quad_part;
};
+struct dmub_psr_debug_flags {
+ uint8_t visual_confirm : 1;
+ uint8_t reserved : 7;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index 63bb9e2c81de..edc73d6d7ba2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn20.h"
@@ -186,14 +186,22 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset);
- dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset);
-
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
- REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
- REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
- REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
- DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
- DMCUB_REGION3_CW2_ENABLE, 1);
+ if (cw2->region.base != cw2->region.top) {
+ dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset,
+ &offset);
+
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base);
+ REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0,
+ DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top,
+ DMCUB_REGION3_CW2_ENABLE, 1);
+ } else {
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, 0);
+ REG_WRITE(DMCUB_REGION3_CW2_TOP_ADDRESS, 0);
+ }
dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset);
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
index 5bed9fcd6b5c..e8f488232e34 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_reg.h"
#include "dmub_dcn21.h"
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
index 4094eca212f0..ca0c8a54b635 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_reg.c
@@ -24,7 +24,7 @@
*/
#include "dmub_reg.h"
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
struct dmub_reg_value_masks {
uint32_t value;
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index ce32cc7933c4..0e3751d94cb0 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -23,7 +23,7 @@
*
*/
-#include "../inc/dmub_srv.h"
+#include "../dmub_srv.h"
#include "dmub_dcn20.h"
#include "dmub_dcn21.h"
#include "dmub_fw_meta.h"
@@ -70,7 +70,7 @@ static inline uint32_t dmub_align(uint32_t val, uint32_t factor)
return (val + factor - 1) / factor * factor;
}
-static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
+void dmub_flush_buffer_mem(const struct dmub_fb *fb)
{
const uint8_t *base = (const uint8_t *)fb->cpu_addr;
uint8_t buf[64];
@@ -91,18 +91,32 @@ static void dmub_flush_buffer_mem(const struct dmub_fb *fb)
}
static const struct dmub_fw_meta_info *
-dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size)
+dmub_get_fw_meta_info(const struct dmub_srv_region_params *params)
{
const union dmub_fw_meta *meta;
+ const uint8_t *blob = NULL;
+ uint32_t blob_size = 0;
+ uint32_t meta_offset = 0;
+
+ if (params->fw_bss_data) {
+ /* Legacy metadata region. */
+ blob = params->fw_bss_data;
+ blob_size = params->bss_data_size;
+ meta_offset = DMUB_FW_META_OFFSET;
+ } else if (params->fw_inst_const) {
+ /* Combined metadata region. */
+ blob = params->fw_inst_const;
+ blob_size = params->inst_const_size;
+ meta_offset = 0;
+ }
- if (fw_bss_data == NULL)
+ if (!blob || !blob_size)
return NULL;
- if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET)
+ if (blob_size < sizeof(union dmub_fw_meta) + meta_offset)
return NULL;
- meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size -
- DMUB_FW_META_OFFSET -
+ meta = (const union dmub_fw_meta *)(blob + blob_size - meta_offset -
sizeof(union dmub_fw_meta));
if (meta->info.magic_value != DMUB_FW_META_MAGIC)
@@ -247,8 +261,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
- fw_info = dmub_get_fw_meta_info(params->fw_bss_data,
- params->bss_data_size);
+ fw_info = dmub_get_fw_meta_info(params);
if (fw_info) {
fw_state_size = fw_info->fw_region_size;
@@ -449,7 +462,7 @@ enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub)
}
enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
- const struct dmub_cmd_header *cmd)
+ const union dmub_rb_cmd *cmd)
{
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h
index f31e6befc8d6..42229b4effdc 100644
--- a/drivers/gpu/drm/amd/display/include/hdcp_types.h
+++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h
@@ -83,6 +83,12 @@ enum hdcp_link {
HDCP_LINK_SECONDARY
};
+enum hdcp_message_status {
+ HDCP_MESSAGE_SUCCESS,
+ HDCP_MESSAGE_FAILURE,
+ HDCP_MESSAGE_UNSUPPORTED
+};
+
struct hdcp_protection_message {
enum hdcp_version version;
/* relevant only for DVI */
@@ -91,6 +97,7 @@ struct hdcp_protection_message {
uint32_t length;
uint8_t max_retries;
uint8_t *data;
+ enum hdcp_message_status status;
};
#endif
diff --git a/drivers/gpu/drm/amd/display/include/logger_interface.h b/drivers/gpu/drm/amd/display/include/logger_interface.h
index 6e008de25629..02c23b04d34b 100644
--- a/drivers/gpu/drm/amd/display/include/logger_interface.h
+++ b/drivers/gpu/drm/amd/display/include/logger_interface.h
@@ -40,8 +40,6 @@ struct dc_state;
*
*/
-void dc_conn_log_hex_linux(const uint8_t *hex_data, int hex_data_count);
-
void pre_surface_trace(
struct dc *dc,
const struct dc_plane_state *const *plane_states,
@@ -102,14 +100,12 @@ void context_clock_trace(
#define CONN_DATA_DETECT(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
- dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_DETECTION(__VA_ARGS__); \
} while (0)
#define CONN_DATA_LINK_LOSS(link, hex_data, hex_len, ...) \
do { \
(void)(link); \
- dc_conn_log_hex_linux(hex_data, hex_len); \
DC_LOG_EVENT_LINK_LOSS(__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index cac09d500fda..bcfe34ef8c28 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -843,7 +843,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
pow_buffer_ptr = -1; // reset back to no optimize
ret = true;
release:
- kfree(coeff);
+ kvfree(coeff);
return ret;
}
@@ -1777,12 +1777,13 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
kfree(rgb_regamma);
rgb_regamma_alloc_fail:
- kvfree(rgb_user);
+ kfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+ struct dc_transfer_func *input_tf,
const struct dc_gamma *ramp, bool mapUserRamp)
{
struct dc_transfer_func_distributed_points *tf_pts = &input_tf->tf_pts;
@@ -1801,11 +1802,29 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
/* we can use hardcoded curve for plain SRGB TF
* If linear, it's bypass if on user ramp
*/
- if (input_tf->type == TF_TYPE_PREDEFINED &&
- (input_tf->tf == TRANSFER_FUNCTION_SRGB ||
- input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
- !mapUserRamp)
- return true;
+ if (input_tf->type == TF_TYPE_PREDEFINED) {
+ if ((input_tf->tf == TRANSFER_FUNCTION_SRGB ||
+ input_tf->tf == TRANSFER_FUNCTION_LINEAR) &&
+ !mapUserRamp)
+ return true;
+
+ if (dc_caps != NULL &&
+ dc_caps->dpp.dcn_arch == 1) {
+
+ if (input_tf->tf == TRANSFER_FUNCTION_PQ &&
+ dc_caps->dpp.dgam_rom_caps.pq == 1)
+ return true;
+
+ if (input_tf->tf == TRANSFER_FUNCTION_GAMMA22 &&
+ dc_caps->dpp.dgam_rom_caps.gamma2_2 == 1)
+ return true;
+
+ // HLG OOTF not accounted for
+ if (input_tf->tf == TRANSFER_FUNCTION_HLG &&
+ dc_caps->dpp.dgam_rom_caps.hlg == 1)
+ return true;
+ }
+ }
input_tf->type = TF_TYPE_DISTRIBUTED_POINTS;
@@ -1902,7 +1921,7 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
- if (ramp->type == GAMMA_CUSTOM)
+ if (ramp && ramp->type == GAMMA_CUSTOM)
apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
index 9994817a9a03..7f56226ba77a 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.h
@@ -30,6 +30,7 @@ struct dc_transfer_func;
struct dc_gamma;
struct dc_transfer_func_distributed_points;
struct dc_rgb_fixed;
+struct dc_color_caps;
enum dc_transfer_func_predefined;
/* For SetRegamma ADL interface support
@@ -100,7 +101,8 @@ bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed,
const struct freesync_hdr_tf_params *fs_params);
-bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf,
+bool mod_color_calculate_degamma_params(struct dc_color_caps *dc_caps,
+ struct dc_transfer_func *output_tf,
const struct dc_gamma *ramp, bool mapUserRamp);
bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans,
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index c33454a9e0b4..eb7421e83b86 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -443,7 +443,7 @@ static bool vrr_settings_require_update(struct core_freesync *core_freesync,
return true;
} else if (in_vrr->state == VRR_STATE_ACTIVE_FIXED &&
in_vrr->fixed.target_refresh_in_uhz !=
- in_config->min_refresh_in_uhz) {
+ in_config->fixed_refresh_in_uhz) {
return true;
} else if (in_vrr->min_refresh_in_uhz != min_refresh_in_uhz) {
return true;
@@ -491,7 +491,7 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
return false;
}
-static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
+static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket)
{
/* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
@@ -523,14 +523,74 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
vrr->state == VRR_STATE_ACTIVE_FIXED)
infopacket->sb[6] |= 0x04;
+ // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ } else {
+ infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ }
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ //FreeSync HDR
+ infopacket->sb[9] = 0;
+ infopacket->sb[10] = 0;
+}
+
+static void build_vrr_infopacket_data_v3(const struct mod_vrr_params *vrr,
+ struct dc_info_packet *infopacket)
+{
+ /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
+ infopacket->sb[1] = 0x1A;
+
+ /* PB2 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 1) */
+ infopacket->sb[2] = 0x00;
+
+ /* PB3 = 0x00 (24bit AMD IEEE OUI (0x00001A) - Byte 2) */
+ infopacket->sb[3] = 0x00;
+
+ /* PB4 = Reserved */
+
+ /* PB5 = Reserved */
+
+ /* PB6 = [Bits 7:3 = Reserved] */
+
+ /* PB6 = [Bit 0 = FreeSync Supported] */
+ if (vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x01;
+
+ /* PB6 = [Bit 1 = FreeSync Enabled] */
+ if (vrr->state != VRR_STATE_DISABLED &&
+ vrr->state != VRR_STATE_UNSUPPORTED)
+ infopacket->sb[6] |= 0x02;
+
+ /* PB6 = [Bit 2 = FreeSync Active] */
+ if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+ vrr->state == VRR_STATE_ACTIVE_FIXED)
+ infopacket->sb[6] |= 0x04;
+
+ if (vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->fixed_refresh_in_uhz + 500000) / 1000000);
+ } else if (vrr->state == VRR_STATE_ACTIVE_VARIABLE) {
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ } else {
+ // Non-fs case, program nominal range
+ /* PB7 = FreeSync Minimum refresh rate (Hz) */
+ infopacket->sb[7] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ /* PB8 = FreeSync Maximum refresh rate (Hz) */
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
+ }
//FreeSync HDR
infopacket->sb[9] = 0;
@@ -678,7 +738,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
unsigned int payload_size = 0;
build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
- build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket);
build_vrr_infopacket_checksum(&payload_size, infopacket);
infopacket->valid = true;
@@ -692,7 +752,24 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
unsigned int payload_size = 0;
build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
- build_vrr_infopacket_data(vrr, infopacket);
+ build_vrr_infopacket_data_v1(vrr, infopacket);
+
+ build_vrr_infopacket_fs2_data(app_tf, infopacket);
+
+ build_vrr_infopacket_checksum(&payload_size, infopacket);
+
+ infopacket->valid = true;
+}
+
+static void build_vrr_infopacket_v3(enum signal_type signal,
+ const struct mod_vrr_params *vrr,
+ enum color_transfer_func app_tf,
+ struct dc_info_packet *infopacket)
+{
+ unsigned int payload_size = 0;
+
+ build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
+ build_vrr_infopacket_data_v3(vrr, infopacket);
build_vrr_infopacket_fs2_data(app_tf, infopacket);
@@ -717,11 +794,14 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
return;
switch (packet_type) {
- case PACKET_TYPE_FS2:
+ case PACKET_TYPE_FS_V3:
+ build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket);
+ break;
+ case PACKET_TYPE_FS_V2:
build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
break;
case PACKET_TYPE_VRR:
- case PACKET_TYPE_FS1:
+ case PACKET_TYPE_FS_V1:
default:
build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
}
@@ -793,6 +873,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
calc_duration_in_us_from_refresh_in_uhz(
(unsigned int)max_refresh_in_uhz);
+ if (in_config->state == VRR_STATE_ACTIVE_FIXED)
+ in_out_vrr->fixed_refresh_in_uhz = in_config->fixed_refresh_in_uhz;
+ else
+ in_out_vrr->fixed_refresh_in_uhz = 0;
+
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
@@ -843,7 +928,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->min_refresh_in_uhz);
} else if (in_out_vrr->state == VRR_STATE_ACTIVE_FIXED) {
in_out_vrr->fixed.target_refresh_in_uhz =
- in_out_vrr->min_refresh_in_uhz;
+ in_out_vrr->fixed_refresh_in_uhz;
if (in_out_vrr->fixed.ramping_active &&
in_out_vrr->fixed.fixed_active) {
/* Do not update vtotals if ramping is already active
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index cc1d3f470b99..e9fbd94f8635 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -328,7 +328,8 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* add display to connection */
hdcp->connection.link = *link;
*display_container = *display;
- status = mod_hdcp_add_display_to_topology(hdcp, display->index);
+ status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
@@ -374,7 +375,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
status = mod_hdcp_remove_display_from_topology(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ memset(display, 0, sizeof(struct mod_hdcp_display));
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 5cb4546be0ef..b0cefed2eb02 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* psp functions */
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, uint8_t index);
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -357,8 +357,6 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(
struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(
struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status);
/* ddc functions */
enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp);
@@ -503,11 +501,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
- return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,34 +508,23 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t added_count = 0;
+ uint8_t active_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_active(&hdcp->displays[i]))
- added_count++;
- return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
- uint8_t added_count = 0;
- uint8_t i;
-
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->displays[i]))
- added_count++;
- return added_count;
+ active_count++;
+ return active_count;
}
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->displays[i])) {
+ if (is_display_active(&hdcp->displays[i])) {
display = &hdcp->displays[i];
break;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37c8c05497d6..f244b72e74e0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 491c00f48026..549c113abcf7 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 44956f9ba178..fb6a19d020f9 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -98,8 +98,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
- case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
- return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE:
@@ -158,8 +158,8 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY";
- case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION:
- return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION";
+ case MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE:
+ return "MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE";
case MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING:
return "MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING";
case MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index c2929815c3ee..fb1161dd7ea8 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -51,12 +51,15 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct ta_dtm_shared_memory *dtm_cmd;
struct mod_hdcp_display *display =
get_active_display_at_index(hdcp, index);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- if (!display || !is_display_added(display))
+ if (!display || !is_display_active(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->dtm_context.mutex);
+
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -66,34 +69,33 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ } else {
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+ }
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
-
- return MOD_HDCP_STATUS_SUCCESS;
-
+ mutex_unlock(&psp->dtm_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
- uint8_t index)
+ struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display =
- get_active_display_at_index(hdcp, index);
struct mod_hdcp_link *link = &hdcp->connection.link;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
- if (!display || is_display_added(display))
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
+ mutex_lock(&psp->dtm_context.mutex);
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
@@ -113,21 +115,24 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ status = MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ } else {
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->dtm_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized.");
@@ -135,6 +140,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
}
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+
+ mutex_lock(&psp->hdcp_context.mutex);
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index;
@@ -144,16 +151,18 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle;
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
-
- hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
- memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
- sizeof(hdcp->auth.msg.hdcp1.aksv));
- memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
- sizeof(hdcp->auth.msg.hdcp1.an));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE;
+ } else {
+ hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary;
+ memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary,
+ sizeof(hdcp->auth.msg.hdcp1.aksv));
+ memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary,
+ sizeof(hdcp->auth.msg.hdcp1.an));
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
@@ -162,7 +171,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -171,27 +182,30 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
-
- HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_encryption_enabled(
- &hdcp->displays[i])) {
- hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_HDCP1_DISABLED_TRACE(hdcp,
- hdcp->displays[i].index);
- }
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
+ } else {
+ HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(&hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP1_DISABLED_TRACE(
+ hdcp, hdcp->displays[i].index);
+ }
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -206,10 +220,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
- if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) {
/* needs second part of authentication */
hdcp->connection.is_repeater = 1;
@@ -219,20 +232,22 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
hdcp->connection.is_hdcp1_revoked = 1;
- return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
+ status = MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
-
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -241,14 +256,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION;
-
- if (!is_dp_mst_hdcp(hdcp)) {
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE;
+ } else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
@@ -257,6 +273,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -287,6 +304,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
}
+ mutex_unlock(&psp->hdcp_context.mutex);
return status;
}
@@ -296,14 +314,15 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
int i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->displays[i].adjust.disable)
- continue;
+ if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+ continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -313,21 +332,26 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
+ break;
+ }
hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -339,12 +363,12 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level != 1)
+ status = MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
- return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1)
- ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
@@ -364,19 +388,23 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
return MOD_HDCP_STATUS_FAILURE;
}
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->hdcp_context.mutex);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
@@ -393,12 +421,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
- hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE;
+ else
+ hdcp->auth.id = hdcp_cmd->out_msg.hdcp2_create_session_v2.session_handle;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
@@ -406,7 +436,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
uint8_t i = 0;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -415,20 +447,21 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
-
- HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_encryption_enabled(
- &hdcp->displays[i])) {
- hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_HDCP2_DISABLED_TRACE(hdcp,
- hdcp->displays[i].index);
- }
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
+ } else {
+ HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(&hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP2_DISABLED_TRACE(
+ hdcp, hdcp->displays[i].index);
+ }
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
@@ -437,7 +470,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -452,12 +487,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_ake_init(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
-
- memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ake_init));
+ status = MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE;
+ else
+ memcpy(&hdcp->auth.msg.hdcp2.ake_init[0], &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_init));
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
@@ -466,7 +502,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -488,26 +526,32 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
-
- memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
- &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
- sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
-
- if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
- hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
- hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
- return MOD_HDCP_STATUS_SUCCESS;
- } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
- hdcp->connection.is_hdcp2_revoked = 1;
- return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.ake_no_stored_km,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km));
+
+ memcpy(hdcp->auth.msg.hdcp2.ake_stored_km,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ake_no_stored_km)],
+ sizeof(hdcp->auth.msg.hdcp2.ake_stored_km));
+
+ if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored =
+ msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater =
+ msg_out->process.is_repeater ? 1 : 0;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
+ }
}
-
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -516,7 +560,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -543,16 +589,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
-
- if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
+ else if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE;
else if (!hdcp->connection.is_km_stored &&
- msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
-
+ msg_out->process.msg2_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
@@ -561,7 +606,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -577,12 +624,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_lc_init(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.lc_init));
+ status = MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE;
+ else
+ memcpy(hdcp->auth.msg.hdcp2.lc_init, &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.lc_init));
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
@@ -591,7 +639,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -610,13 +660,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_l_prime(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
-
- if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS ||
+ msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
@@ -625,7 +674,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -642,48 +693,55 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_eks(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.ske_eks, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.ske_eks));
- msg_out->prepare.msg1_desc.msg_size = sizeof(hdcp->auth.msg.hdcp2.ske_eks);
-
- if (is_dp_hdcp(hdcp)) {
- memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
- &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
- sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.ske_eks,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks));
+ msg_out->prepare.msg1_desc.msg_size =
+ sizeof(hdcp->auth.msg.hdcp2.ske_eks);
+
+ if (is_dp_hdcp(hdcp)) {
+ memcpy(hdcp->auth.msg.hdcp2.content_stream_type_dp,
+ &msg_out->prepare.transmitter_message[sizeof(hdcp->auth.msg.hdcp2.ske_eks)],
+ sizeof(hdcp->auth.msg.hdcp2.content_stream_type_dp));
+ }
}
+ mutex_unlock(&psp->hdcp_context.mutex);
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
-
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+ mutex_lock(&psp->hdcp_context.mutex);
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
-
- if (!is_dp_mst_hdcp(hdcp)) {
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE;
+ } else if (!is_dp_mst_hdcp(hdcp)) {
display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
HDCP_HDCP2_ENABLED_TRACE(hdcp, display->index);
}
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
@@ -692,6 +750,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -712,23 +773,26 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
-
- memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
-
- if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
- hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
- hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
- return MOD_HDCP_STATUS_SUCCESS;
- } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
- hdcp->connection.is_hdcp2_revoked = 1;
- return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ } else {
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_ack,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_ack));
+
+ if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) {
+ hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
+ hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status ==
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
+ }
}
-
-
- return MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -737,7 +801,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
uint8_t i;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -747,9 +813,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->displays[i].adjust.disable)
- continue;
+ if (hdcp->displays[i].adjust.disable || hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE)
+ continue;
+
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
@@ -763,8 +829,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
- return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE;
+
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *hdcp)
@@ -774,7 +845,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -789,15 +862,17 @@ enum mod_hdcp_status mod_hdcp_hdcp2_prepare_stream_management(struct mod_hdcp *h
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
-
- hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
-
- memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage, &msg_out->prepare.transmitter_message[0],
- sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) {
+ status = MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE;
+ } else {
+ hdcp->auth.msg.hdcp2.stream_manage_size = msg_out->prepare.msg1_desc.msg_size;
- return MOD_HDCP_STATUS_SUCCESS;
+ memcpy(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage,
+ &msg_out->prepare.transmitter_message[0],
+ sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_manage));
+ }
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
@@ -806,7 +881,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
struct ta_hdcp_shared_memory *hdcp_cmd;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2 *msg_out;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
+ mutex_lock(&psp->hdcp_context.mutex);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -825,38 +902,13 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_stream_ready(struct mod_hdcp *hdcp)
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) &&
- (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
- ? MOD_HDCP_STATUS_SUCCESS
- : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
-}
-
-enum mod_hdcp_status mod_hdcp_hdcp2_get_link_encryption_status(struct mod_hdcp *hdcp,
- enum mod_hdcp_encryption_status *encryption_status)
-{
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_hdcp_shared_memory *hdcp_cmd;
-
- hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
-
- memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
-
- hdcp_cmd->in_msg.hdcp2_get_encryption_status.session_handle = hdcp->auth.id;
- hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level = 0;
- hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_GET_ENCRYPTION_STATUS;
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
- psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
-
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_FAILURE;
-
- if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.protection_level == 1) {
- if (hdcp_cmd->out_msg.hdcp2_get_encryption_status.hdcp2_type == TA_HDCP2_CONTENT_TYPE__TYPE1)
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON;
- else
- *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON;
- }
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS)
+ status = MOD_HDCP_STATUS_SUCCESS;
+ else
+ status = MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
- return MOD_HDCP_STATUS_SUCCESS;
+ mutex_unlock(&psp->hdcp_context.mutex);
+ return status;
}
+
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index dbe7835aabcf..0ba3cf7f336a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -83,6 +83,8 @@ struct mod_freesync_config {
bool btr;
unsigned int min_refresh_in_uhz;
unsigned int max_refresh_in_uhz;
+ unsigned int fixed_refresh_in_uhz;
+
};
struct mod_vrr_params_btr {
@@ -112,6 +114,7 @@ struct mod_vrr_params {
uint32_t max_duration_in_us;
uint32_t max_refresh_in_uhz;
uint32_t min_duration_in_us;
+ uint32_t fixed_refresh_in_uhz;
struct dc_crtc_timing_adjust adjust;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index c088602bc1a0..eed560eecbab 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -60,7 +60,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE,
@@ -90,7 +90,7 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE,
MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED,
- MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION,
+ MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING,
MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE,
MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE,
@@ -117,7 +117,6 @@ enum mod_hdcp_operation_mode {
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
index fe2117904329..198c0e64d13a 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_shared.h
@@ -40,8 +40,9 @@ enum color_transfer_func {
enum vrr_packet_type {
PACKET_TYPE_VRR,
- PACKET_TYPE_FS1,
- PACKET_TYPE_FS2,
+ PACKET_TYPE_FS_V1,
+ PACKET_TYPE_FS_V2,
+ PACKET_TYPE_FS_V3,
PACKET_TYPE_VTEM
};
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index cff3ab15fc0c..7cd8a43d1889 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -144,7 +144,7 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
}
/*VSC packet set to 2 when DP revision >= 1.2*/
- if (stream->psr_version != 0)
+ if (stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED)
vsc_packet_revision = vsc_packet_rev2;
/* Update to revision 5 for extended colorimetry support */
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e75a4bb94488..8c37bcc27132 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -24,6 +24,9 @@
#include "power_helpers.h"
#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
+#include "dc.h"
+#include "core_types.h"
#define DIV_ROUNDUP(a, b) (((a)+((b)/2))/(b))
@@ -237,7 +240,7 @@ static void fill_backlight_transform_table(struct dmcu_iram_parameters params,
}
static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters params,
- struct iram_table_v_2_2 *table)
+ struct iram_table_v_2_2 *table, bool big_endian)
{
unsigned int i;
unsigned int num_entries = NUM_BL_CURVE_SEGS;
@@ -261,10 +264,12 @@ static void fill_backlight_transform_table_v_2_2(struct dmcu_iram_parameters par
lut_index = (params.backlight_lut_array_size - 1) * i / (num_entries - 1);
ASSERT(lut_index < params.backlight_lut_array_size);
- table->backlight_thresholds[i] =
- cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries));
- table->backlight_offsets[i] =
- cpu_to_be16(params.backlight_lut_array[lut_index]);
+ table->backlight_thresholds[i] = (big_endian) ?
+ cpu_to_be16(DIV_ROUNDUP((i * 65536), num_entries)) :
+ cpu_to_le16(DIV_ROUNDUP((i * 65536), num_entries));
+ table->backlight_offsets[i] = (big_endian) ?
+ cpu_to_be16(params.backlight_lut_array[lut_index]) :
+ cpu_to_le16(params.backlight_lut_array[lut_index]);
}
}
@@ -584,18 +589,18 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
fill_backlight_transform_table_v_2_2(
- params, ram_table);
+ params, ram_table, true);
}
-void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params)
+void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parameters params, bool big_endian)
{
unsigned int i, j;
unsigned int set = params.set;
ram_table->flags = 0x0;
-
- ram_table->min_abm_backlight =
- cpu_to_be16(params.min_abm_backlight);
+ ram_table->min_abm_backlight = (big_endian) ?
+ cpu_to_be16(params.min_abm_backlight) :
+ cpu_to_le16(params.min_abm_backlight);
for (i = 0; i < NUM_AGGR_LEVEL; i++) {
ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain;
@@ -619,33 +624,51 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->iir_curve[4] = 0x65;
//Gamma 2.2
- ram_table->crgb_thresh[0] = cpu_to_be16(0x127c);
- ram_table->crgb_thresh[1] = cpu_to_be16(0x151b);
- ram_table->crgb_thresh[2] = cpu_to_be16(0x17d5);
- ram_table->crgb_thresh[3] = cpu_to_be16(0x1a56);
- ram_table->crgb_thresh[4] = cpu_to_be16(0x1c83);
- ram_table->crgb_thresh[5] = cpu_to_be16(0x1e72);
- ram_table->crgb_thresh[6] = cpu_to_be16(0x20f0);
- ram_table->crgb_thresh[7] = cpu_to_be16(0x232b);
- ram_table->crgb_offset[0] = cpu_to_be16(0x2999);
- ram_table->crgb_offset[1] = cpu_to_be16(0x3999);
- ram_table->crgb_offset[2] = cpu_to_be16(0x4666);
- ram_table->crgb_offset[3] = cpu_to_be16(0x5999);
- ram_table->crgb_offset[4] = cpu_to_be16(0x6333);
- ram_table->crgb_offset[5] = cpu_to_be16(0x7800);
- ram_table->crgb_offset[6] = cpu_to_be16(0x8c00);
- ram_table->crgb_offset[7] = cpu_to_be16(0xa000);
- ram_table->crgb_slope[0] = cpu_to_be16(0x3609);
- ram_table->crgb_slope[1] = cpu_to_be16(0x2dfa);
- ram_table->crgb_slope[2] = cpu_to_be16(0x27ea);
- ram_table->crgb_slope[3] = cpu_to_be16(0x235d);
- ram_table->crgb_slope[4] = cpu_to_be16(0x2042);
- ram_table->crgb_slope[5] = cpu_to_be16(0x1dc3);
- ram_table->crgb_slope[6] = cpu_to_be16(0x1b1a);
- ram_table->crgb_slope[7] = cpu_to_be16(0x1910);
+ ram_table->crgb_thresh[0] = (big_endian) ? cpu_to_be16(0x127c) : cpu_to_le16(0x127c);
+ ram_table->crgb_thresh[1] = (big_endian) ? cpu_to_be16(0x151b) : cpu_to_le16(0x151b);
+ ram_table->crgb_thresh[2] = (big_endian) ? cpu_to_be16(0x17d5) : cpu_to_le16(0x17d5);
+ ram_table->crgb_thresh[3] = (big_endian) ? cpu_to_be16(0x1a56) : cpu_to_le16(0x1a56);
+ ram_table->crgb_thresh[4] = (big_endian) ? cpu_to_be16(0x1c83) : cpu_to_le16(0x1c83);
+ ram_table->crgb_thresh[5] = (big_endian) ? cpu_to_be16(0x1e72) : cpu_to_le16(0x1e72);
+ ram_table->crgb_thresh[6] = (big_endian) ? cpu_to_be16(0x20f0) : cpu_to_le16(0x20f0);
+ ram_table->crgb_thresh[7] = (big_endian) ? cpu_to_be16(0x232b) : cpu_to_le16(0x232b);
+ ram_table->crgb_offset[0] = (big_endian) ? cpu_to_be16(0x2999) : cpu_to_le16(0x2999);
+ ram_table->crgb_offset[1] = (big_endian) ? cpu_to_be16(0x3999) : cpu_to_le16(0x3999);
+ ram_table->crgb_offset[2] = (big_endian) ? cpu_to_be16(0x4666) : cpu_to_le16(0x4666);
+ ram_table->crgb_offset[3] = (big_endian) ? cpu_to_be16(0x5999) : cpu_to_le16(0x5999);
+ ram_table->crgb_offset[4] = (big_endian) ? cpu_to_be16(0x6333) : cpu_to_le16(0x6333);
+ ram_table->crgb_offset[5] = (big_endian) ? cpu_to_be16(0x7800) : cpu_to_le16(0x7800);
+ ram_table->crgb_offset[6] = (big_endian) ? cpu_to_be16(0x8c00) : cpu_to_le16(0x8c00);
+ ram_table->crgb_offset[7] = (big_endian) ? cpu_to_be16(0xa000) : cpu_to_le16(0xa000);
+ ram_table->crgb_slope[0] = (big_endian) ? cpu_to_be16(0x3609) : cpu_to_le16(0x3609);
+ ram_table->crgb_slope[1] = (big_endian) ? cpu_to_be16(0x2dfa) : cpu_to_le16(0x2dfa);
+ ram_table->crgb_slope[2] = (big_endian) ? cpu_to_be16(0x27ea) : cpu_to_le16(0x27ea);
+ ram_table->crgb_slope[3] = (big_endian) ? cpu_to_be16(0x235d) : cpu_to_le16(0x235d);
+ ram_table->crgb_slope[4] = (big_endian) ? cpu_to_be16(0x2042) : cpu_to_le16(0x2042);
+ ram_table->crgb_slope[5] = (big_endian) ? cpu_to_be16(0x1dc3) : cpu_to_le16(0x1dc3);
+ ram_table->crgb_slope[6] = (big_endian) ? cpu_to_be16(0x1b1a) : cpu_to_le16(0x1b1a);
+ ram_table->crgb_slope[7] = (big_endian) ? cpu_to_be16(0x1910) : cpu_to_le16(0x1910);
fill_backlight_transform_table_v_2_2(
- params, ram_table);
+ params, ram_table, big_endian);
+}
+
+bool dmub_init_abm_config(struct abm *abm,
+ struct dmcu_iram_parameters params)
+{
+ unsigned char ram_table[IRAM_SIZE];
+ bool result = false;
+
+ if (abm == NULL)
+ return false;
+
+ memset(&ram_table, 0, sizeof(ram_table));
+
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, false);
+ result = abm->funcs->init_abm_config(
+ abm, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+
+ return result;
}
bool dmcu_load_iram(struct dmcu *dmcu,
@@ -657,17 +680,17 @@ bool dmcu_load_iram(struct dmcu *dmcu,
if (dmcu == NULL)
return false;
- if (!dmcu->funcs->is_dmcu_initialized(dmcu))
+ if (dmcu && !dmcu->funcs->is_dmcu_initialized(dmcu))
return true;
memset(&ram_table, 0, sizeof(ram_table));
if (dmcu->dmcu_version.abm_version == 0x24) {
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
- result = dmcu->funcs->load_iram(
- dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
+ result = dmcu->funcs->load_iram(
+ dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
} else if (dmcu->dmcu_version.abm_version == 0x23) {
- fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params);
+ fill_iram_v_2_3((struct iram_table_v_2_2 *)ram_table, params, true);
result = dmcu->funcs->load_iram(
dmcu, 0, (char *)(&ram_table), IRAM_RESERVE_AREA_START_V2_2);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index e54157026330..46fbca2e2cd1 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -26,6 +26,7 @@
#define MODULES_POWER_POWER_HELPERS_H_
#include "dc/inc/hw/dmcu.h"
+#include "dc/inc/hw/abm.h"
enum abm_defines {
@@ -44,5 +45,7 @@ struct dmcu_iram_parameters {
bool dmcu_load_iram(struct dmcu *dmcu,
struct dmcu_iram_parameters params);
+bool dmub_init_abm_config(struct abm *abm,
+ struct dmcu_iram_parameters params);
#endif /* MODULES_POWER_POWER_HELPERS_H_ */
diff --git a/drivers/gpu/drm/amd/display/modules/stats/stats.c b/drivers/gpu/drm/amd/display/modules/stats/stats.c
deleted file mode 100644
index 03121ca64fe4..000000000000
--- a/drivers/gpu/drm/amd/display/modules/stats/stats.c
+++ /dev/null
@@ -1,448 +0,0 @@
-/*
- * Copyright 2016 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
-#include "mod_stats.h"
-#include "dm_services.h"
-#include "dc.h"
-#include "core_types.h"
-
-#define DAL_STATS_ENABLE_REGKEY "DalStatsEnable"
-#define DAL_STATS_ENABLE_REGKEY_DEFAULT 0x00000000
-#define DAL_STATS_ENABLE_REGKEY_ENABLED 0x00000001
-
-#define DAL_STATS_ENTRIES_REGKEY "DalStatsEntries"
-#define DAL_STATS_ENTRIES_REGKEY_DEFAULT 0x00350000
-#define DAL_STATS_ENTRIES_REGKEY_MAX 0x01000000
-
-#define DAL_STATS_EVENT_ENTRIES_DEFAULT 0x00000100
-
-#define MOD_STATS_NUM_VSYNCS 5
-#define MOD_STATS_EVENT_STRING_MAX 512
-
-struct stats_time_cache {
- unsigned int entry_id;
-
- unsigned long flip_timestamp_in_ns;
- unsigned long vupdate_timestamp_in_ns;
-
- unsigned int render_time_in_us;
- unsigned int avg_render_time_in_us_last_ten;
- unsigned int v_sync_time_in_us[MOD_STATS_NUM_VSYNCS];
- unsigned int num_vsync_between_flips;
-
- unsigned int flip_to_vsync_time_in_us;
- unsigned int vsync_to_flip_time_in_us;
-
- unsigned int min_window;
- unsigned int max_window;
- unsigned int v_total_min;
- unsigned int v_total_max;
- unsigned int event_triggers;
-
- unsigned int lfc_mid_point_in_us;
- unsigned int num_frames_inserted;
- unsigned int inserted_duration_in_us;
-
- unsigned int flags;
-};
-
-struct stats_event_cache {
- unsigned int entry_id;
- char event_string[MOD_STATS_EVENT_STRING_MAX];
-};
-
-struct core_stats {
- struct mod_stats public;
- struct dc *dc;
-
- bool enabled;
- unsigned int entries;
- unsigned int event_entries;
- unsigned int entry_id;
-
- struct stats_time_cache *time;
- unsigned int index;
-
- struct stats_event_cache *events;
- unsigned int event_index;
-
-};
-
-#define MOD_STATS_TO_CORE(mod_stats)\
- container_of(mod_stats, struct core_stats, public)
-
-bool mod_stats_init(struct mod_stats *mod_stats)
-{
- bool result = false;
- struct core_stats *core_stats = NULL;
- struct dc *dc = NULL;
-
- if (mod_stats == NULL)
- return false;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
- dc = core_stats->dc;
-
- return result;
-}
-
-struct mod_stats *mod_stats_create(struct dc *dc)
-{
- struct core_stats *core_stats = NULL;
- struct persistent_data_flag flag;
- unsigned int reg_data;
- int i = 0;
-
- if (dc == NULL)
- goto fail_construct;
-
- core_stats = kzalloc(sizeof(struct core_stats), GFP_KERNEL);
-
- if (core_stats == NULL)
- goto fail_construct;
-
- core_stats->dc = dc;
-
- core_stats->enabled = DAL_STATS_ENABLE_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENABLE_REGKEY,
- &reg_data, sizeof(unsigned int), &flag))
- core_stats->enabled = reg_data;
-
- if (core_stats->enabled) {
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_DEFAULT;
- if (dm_read_persistent_data(dc->ctx, NULL, NULL,
- DAL_STATS_ENTRIES_REGKEY,
- &reg_data, sizeof(unsigned int), &flag)) {
- if (reg_data > DAL_STATS_ENTRIES_REGKEY_MAX)
- core_stats->entries = DAL_STATS_ENTRIES_REGKEY_MAX;
- else
- core_stats->entries = reg_data;
- }
- core_stats->time = kcalloc(core_stats->entries,
- sizeof(struct stats_time_cache),
- GFP_KERNEL);
-
- if (core_stats->time == NULL)
- goto fail_construct_time;
-
- core_stats->event_entries = DAL_STATS_EVENT_ENTRIES_DEFAULT;
- core_stats->events = kcalloc(core_stats->event_entries,
- sizeof(struct stats_event_cache),
- GFP_KERNEL);
-
- if (core_stats->events == NULL)
- goto fail_construct_events;
-
- } else {
- core_stats->entries = 0;
- }
-
- /* Purposely leave index 0 unused so we don't need special logic to
- * handle calculation cases that depend on previous flip data.
- */
- core_stats->index = 1;
- core_stats->event_index = 0;
-
- // Keeps track of ordering within the different stats structures
- core_stats->entry_id = 0;
-
- return &core_stats->public;
-
-fail_construct_events:
- kfree(core_stats->time);
-
-fail_construct_time:
- kfree(core_stats);
-
-fail_construct:
- return NULL;
-}
-
-void mod_stats_destroy(struct mod_stats *mod_stats)
-{
- if (mod_stats != NULL) {
- struct core_stats *core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- kfree(core_stats->time);
- kfree(core_stats->events);
- kfree(core_stats);
- }
-}
-
-void mod_stats_dump(struct mod_stats *mod_stats)
-{
- struct dc *dc = NULL;
- struct dal_logger *logger = NULL;
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- struct stats_event_cache *events = NULL;
- unsigned int time_index = 1;
- unsigned int event_index = 0;
- unsigned int index = 0;
- struct log_entry log_entry;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
- dc = core_stats->dc;
- logger = dc->ctx->logger;
- time = core_stats->time;
- events = core_stats->events;
-
- DISPLAY_STATS_BEGIN(log_entry);
-
- DISPLAY_STATS("==Display Caps==\n");
-
- DISPLAY_STATS("==Display Stats==\n");
-
- DISPLAY_STATS("%10s %10s %10s %10s %10s"
- " %11s %11s %17s %10s %14s"
- " %10s %10s %10s %10s %10s"
- " %10s %10s %10s %10s\n",
- "render", "avgRender",
- "minWindow", "midPoint", "maxWindow",
- "vsyncToFlip", "flipToVsync", "vsyncsBetweenFlip",
- "numFrame", "insertDuration",
- "vTotalMin", "vTotalMax", "eventTrigs",
- "vSyncTime1", "vSyncTime2", "vSyncTime3",
- "vSyncTime4", "vSyncTime5", "flags");
-
- for (int i = 0; i < core_stats->entry_id; i++) {
- if (event_index < core_stats->event_index &&
- i == events[event_index].entry_id) {
- DISPLAY_STATS("==Event==%s\n", events[event_index].event_string);
- event_index++;
- } else if (time_index < core_stats->index &&
- i == time[time_index].entry_id) {
- DISPLAY_STATS("%10u %10u %10u %10u %10u"
- " %11u %11u %17u %10u %14u"
- " %10u %10u %10u %10u %10u"
- " %10u %10u %10u %10u\n",
- time[time_index].render_time_in_us,
- time[time_index].avg_render_time_in_us_last_ten,
- time[time_index].min_window,
- time[time_index].lfc_mid_point_in_us,
- time[time_index].max_window,
- time[time_index].vsync_to_flip_time_in_us,
- time[time_index].flip_to_vsync_time_in_us,
- time[time_index].num_vsync_between_flips,
- time[time_index].num_frames_inserted,
- time[time_index].inserted_duration_in_us,
- time[time_index].v_total_min,
- time[time_index].v_total_max,
- time[time_index].event_triggers,
- time[time_index].v_sync_time_in_us[0],
- time[time_index].v_sync_time_in_us[1],
- time[time_index].v_sync_time_in_us[2],
- time[time_index].v_sync_time_in_us[3],
- time[time_index].v_sync_time_in_us[4],
- time[time_index].flags);
-
- time_index++;
- }
- }
-
- DISPLAY_STATS_END(log_entry);
-}
-
-void mod_stats_reset_data(struct mod_stats *mod_stats)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- memset(core_stats->time, 0,
- sizeof(struct stats_time_cache) * core_stats->entries);
-
- memset(core_stats->events, 0,
- sizeof(struct stats_event_cache) * core_stats->event_entries);
-
- core_stats->index = 1;
- core_stats->event_index = 0;
-
- // Keeps track of ordering within the different stats structures
- core_stats->entry_id = 0;
-}
-
-void mod_stats_update_event(struct mod_stats *mod_stats,
- char *event_string,
- unsigned int length)
-{
- struct core_stats *core_stats = NULL;
- struct stats_event_cache *events = NULL;
- unsigned int index = 0;
- unsigned int copy_length = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->event_index >= core_stats->event_entries)
- return;
-
- events = core_stats->events;
- index = core_stats->event_index;
-
- copy_length = length;
- if (length > MOD_STATS_EVENT_STRING_MAX)
- copy_length = MOD_STATS_EVENT_STRING_MAX;
-
- memcpy(&events[index].event_string, event_string, copy_length);
- events[index].event_string[copy_length - 1] = '\0';
-
- events[index].entry_id = core_stats->entry_id;
- core_stats->event_index++;
- core_stats->entry_id++;
-}
-
-void mod_stats_update_flip(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
-
- time[index].flip_timestamp_in_ns = timestamp_in_ns;
- time[index].render_time_in_us =
- (timestamp_in_ns - time[index - 1].flip_timestamp_in_ns) / 1000;
-
- if (index >= 10) {
- for (unsigned int i = 0; i < 10; i++)
- time[index].avg_render_time_in_us_last_ten +=
- time[index - i].render_time_in_us;
- time[index].avg_render_time_in_us_last_ten /= 10;
- }
-
- if (time[index].num_vsync_between_flips > 0)
- time[index].vsync_to_flip_time_in_us =
- (timestamp_in_ns -
- time[index].vupdate_timestamp_in_ns) / 1000;
- else
- time[index].vsync_to_flip_time_in_us =
- (timestamp_in_ns -
- time[index - 1].vupdate_timestamp_in_ns) / 1000;
-
- time[index].entry_id = core_stats->entry_id;
- core_stats->index++;
- core_stats->entry_id++;
-}
-
-void mod_stats_update_vupdate(struct mod_stats *mod_stats,
- unsigned long timestamp_in_ns)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
- unsigned int num_vsyncs = 0;
- unsigned int prev_vsync_in_ns = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
- num_vsyncs = time[index].num_vsync_between_flips;
-
- if (num_vsyncs < MOD_STATS_NUM_VSYNCS) {
- if (num_vsyncs == 0) {
- prev_vsync_in_ns =
- time[index - 1].vupdate_timestamp_in_ns;
-
- time[index].flip_to_vsync_time_in_us =
- (timestamp_in_ns -
- time[index - 1].flip_timestamp_in_ns) /
- 1000;
- } else {
- prev_vsync_in_ns =
- time[index].vupdate_timestamp_in_ns;
- }
-
- time[index].v_sync_time_in_us[num_vsyncs] =
- (timestamp_in_ns - prev_vsync_in_ns) / 1000;
- }
-
- time[index].vupdate_timestamp_in_ns = timestamp_in_ns;
- time[index].num_vsync_between_flips++;
-}
-
-void mod_stats_update_freesync(struct mod_stats *mod_stats,
- unsigned int v_total_min,
- unsigned int v_total_max,
- unsigned int event_triggers,
- unsigned int window_min,
- unsigned int window_max,
- unsigned int lfc_mid_point_in_us,
- unsigned int inserted_frames,
- unsigned int inserted_duration_in_us)
-{
- struct core_stats *core_stats = NULL;
- struct stats_time_cache *time = NULL;
- unsigned int index = 0;
-
- if (mod_stats == NULL)
- return;
-
- core_stats = MOD_STATS_TO_CORE(mod_stats);
-
- if (core_stats->index >= core_stats->entries)
- return;
-
- time = core_stats->time;
- index = core_stats->index;
-
- time[index].v_total_min = v_total_min;
- time[index].v_total_max = v_total_max;
- time[index].event_triggers = event_triggers;
- time[index].min_window = window_min;
- time[index].max_window = window_max;
- time[index].lfc_mid_point_in_us = lfc_mid_point_in_us;
- time[index].num_frames_inserted = inserted_frames;
- time[index].inserted_duration_in_us = inserted_duration_in_us;
-}
-
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index 00f132f8ad55..61ee4be35d27 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -112,9 +112,12 @@ uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
evict_vmids(core_vmid);
vmid = get_next_available_vmid(core_vmid);
- add_ptb_to_table(core_vmid, vmid, ptb);
+ if (vmid != -1) {
+ add_ptb_to_table(core_vmid, vmid, ptb);
- dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+ dc_setup_vm_context(core_vmid->dc, &va_config, vmid);
+ } else
+ ASSERT(0);
}
return vmid;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index d655a76bedc6..e98c84ef206f 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -40,6 +40,13 @@ enum amd_chip_flags {
AMD_EXP_HW_SUPPORT = 0x00080000UL,
};
+enum amd_apu_flags {
+ AMD_APU_IS_RAVEN = 0x00000001UL,
+ AMD_APU_IS_RAVEN2 = 0x00000002UL,
+ AMD_APU_IS_PICASSO = 0x00000004UL,
+ AMD_APU_IS_RENOIR = 0x00000008UL,
+};
+
enum amd_ip_block_type {
AMD_IP_BLOCK_TYPE_COMMON,
AMD_IP_BLOCK_TYPE_GMC,
@@ -150,6 +157,13 @@ enum DC_FEATURE_MASK {
DC_PSR_MASK = 0x8,
};
+enum DC_DEBUG_MASK {
+ DC_DISABLE_PIPE_SPLIT = 0x1,
+ DC_DISABLE_STUTTER = 0x2,
+ DC_DISABLE_DSC = 0x4,
+ DC_DISABLE_CLOCK_GATING = 0x8
+};
+
enum amd_dpm_forced_level;
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
index e7db6f9f9c86..8b0b9a2a8fed 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_1_0_sh_mask.h
@@ -5599,6 +5599,7 @@
#define GRBM_PWR_CNTL__ALL_REQ_EN_MASK 0x00008000L
//GRBM_STATUS
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL__SHIFT 0x0
+#define GRBM_STATUS__RSMU_RQ_PENDING__SHIFT 0x5
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING__SHIFT 0x7
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING__SHIFT 0x8
#define GRBM_STATUS__GDS_DMA_RQ_PENDING__SHIFT 0x9
@@ -5619,6 +5620,7 @@
#define GRBM_STATUS__CB_BUSY__SHIFT 0x1e
#define GRBM_STATUS__GUI_ACTIVE__SHIFT 0x1f
#define GRBM_STATUS__ME0PIPE0_CMDFIFO_AVAIL_MASK 0x0000000FL
+#define GRBM_STATUS__RSMU_RQ_PENDING_MASK 0x00000020L
#define GRBM_STATUS__ME0PIPE0_CF_RQ_PENDING_MASK 0x00000080L
#define GRBM_STATUS__ME0PIPE0_PF_RQ_PENDING_MASK 0x00000100L
#define GRBM_STATUS__GDS_DMA_RQ_PENDING_MASK 0x00000200L
@@ -5832,6 +5834,7 @@
#define GRBM_READ_ERROR__READ_ERROR_MASK 0x80000000L
//GRBM_READ_ERROR2
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF__SHIFT 0x10
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU__SHIFT 0x11
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC__SHIFT 0x12
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA__SHIFT 0x13
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF__SHIFT 0x14
@@ -5847,6 +5850,7 @@
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE2__SHIFT 0x1e
#define GRBM_READ_ERROR2__READ_REQUESTER_ME2PIPE3__SHIFT 0x1f
#define GRBM_READ_ERROR2__READ_REQUESTER_CPF_MASK 0x00010000L
+#define GRBM_READ_ERROR2__READ_REQUESTER_RSMU_MASK 0x00020000L
#define GRBM_READ_ERROR2__READ_REQUESTER_RLC_MASK 0x00040000L
#define GRBM_READ_ERROR2__READ_REQUESTER_GDS_DMA_MASK 0x00080000L
#define GRBM_READ_ERROR2__READ_REQUESTER_ME0PIPE0_CF_MASK 0x00100000L
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
index 68d0ffad28c7..92fd27c26a77 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbif/nbif_6_1_offset.h
@@ -1162,8 +1162,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 0
#define mmRCC_CONFIG_RESERVED 0x0de4 // duplicate
#define mmRCC_CONFIG_RESERVED_BASE_IDX 0
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x0de5 // duplicate
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 0
+#endif
// addressBlock: syshub_mmreg_ind_syshubdec
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
index 435462294fbc..a7cd760ebf8f 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_0_offset.h
@@ -4251,8 +4251,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
index ce5830ebe095..0c5a08bc034a 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h
@@ -2687,8 +2687,10 @@
#define mmRCC_CONFIG_MEMSIZE_BASE_IDX 2
#define mmRCC_CONFIG_RESERVED 0x00c4
#define mmRCC_CONFIG_RESERVED_BASE_IDX 2
+#ifndef mmRCC_IOV_FUNC_IDENTIFIER
#define mmRCC_IOV_FUNC_IDENTIFIER 0x00c5
#define mmRCC_IOV_FUNC_IDENTIFIER_BASE_IDX 2
+#endif
// addressBlock: nbio_nbif0_rcc_dev0_BIFDEC1
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
new file mode 100644
index 000000000000..e87c359ea1fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_offset.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_OFFSET_HEADER
+#define _pwr_10_0_OFFSET_HEADER
+
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
new file mode 100644
index 000000000000..8a000c21651c
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/pwr/pwr_10_0_sh_mask.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _pwr_10_0_SH_MASK_HEADER
+#define _pwr_10_0_SH_MASK_HEADER
+
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
new file mode 100644
index 000000000000..9bf73284ad73
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_offset.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_OFFSET_HEADER
+#define _smuio_12_0_0_OFFSET_HEADER
+
+#define mmSMUIO_GFX_MISC_CNTL 0x00c8
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+
+#define mmPWR_MISC_CNTL_STATUS 0x0183
+#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 1
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
new file mode 100644
index 000000000000..26556fa3d054
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_12_0_0_sh_mask.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _smuio_12_0_0_SH_MASK_HEADER
+#define _smuio_12_0_0_SH_MASK_HEADER
+
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+//PWR_MISC_CNTL_STATUS
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
+#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 70146518174c..b36ea8340afa 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -972,11 +972,13 @@ struct atom_ext_display_path
};
//usCaps
-enum ext_display_path_cap_def
-{
- EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE =0x0001,
- EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN =0x0002,
- EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK =0x007C,
+enum ext_display_path_cap_def {
+ EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE = 0x0001,
+ EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN = 0x0002,
+ EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK = 0x007C,
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204 = (0x01 << 2), //PI redriver chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT = (0x02 << 2), //TI retimer chip
+ EXT_DISPLAY_PATH_CAPS__HDMI20_PARADE_PS175 = (0x03 << 2) //Parade DP->HDMI recoverter chip
};
struct atom_external_display_connection_info
@@ -1876,6 +1878,108 @@ struct atom_smc_dpm_info_v4_6
uint32_t boardreserved[10];
};
+struct atom_smc_dpm_info_v4_7
+{
+ struct atom_common_table_header table_header;
+ // SECTION: BOARD PARAMETERS
+ // I2C Control
+ struct smudpm_i2c_controller_config_v2 I2cControllers[8];
+
+ // SVI2 Board Parameters
+ uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+ uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value.
+
+ uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields
+ uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields
+
+ uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode
+ uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN)
+ uint8_t Padding8_V;
+
+ // Telemetry Settings
+ uint16_t GfxMaxCurrent; // in Amps
+ uint8_t GfxOffset; // in Amps
+ uint8_t Padding_TelemetryGfx;
+ uint16_t SocMaxCurrent; // in Amps
+ uint8_t SocOffset; // in Amps
+ uint8_t Padding_TelemetrySoc;
+
+ uint16_t Mem0MaxCurrent; // in Amps
+ uint8_t Mem0Offset; // in Amps
+ uint8_t Padding_TelemetryMem0;
+
+ uint16_t Mem1MaxCurrent; // in Amps
+ uint8_t Mem1Offset; // in Amps
+ uint8_t Padding_TelemetryMem1;
+
+ // GPIO Settings
+ uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching
+ uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching
+ uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event
+ uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event
+
+ uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event
+ uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event
+ uint8_t GthrGpio; // GPIO pin configured for GTHR Event
+ uint8_t GthrPolarity; // replace GPIO polarity for GTHR
+
+ // LED Display Settings
+ uint8_t LedPin0; // GPIO number for LedPin[0]
+ uint8_t LedPin1; // GPIO number for LedPin[1]
+ uint8_t LedPin2; // GPIO number for LedPin[2]
+ uint8_t padding8_4;
+
+ // GFXCLK PLL Spread Spectrum
+ uint8_t PllGfxclkSpreadEnabled; // on or off
+ uint8_t PllGfxclkSpreadPercent; // Q4.4
+ uint16_t PllGfxclkSpreadFreq; // kHz
+
+ // GFXCLK DFLL Spread Spectrum
+ uint8_t DfllGfxclkSpreadEnabled; // on or off
+ uint8_t DfllGfxclkSpreadPercent; // Q4.4
+ uint16_t DfllGfxclkSpreadFreq; // kHz
+
+ // UCLK Spread Spectrum
+ uint8_t UclkSpreadEnabled; // on or off
+ uint8_t UclkSpreadPercent; // Q4.4
+ uint16_t UclkSpreadFreq; // kHz
+
+ // SOCCLK Spread Spectrum
+ uint8_t SoclkSpreadEnabled; // on or off
+ uint8_t SocclkSpreadPercent; // Q4.4
+ uint16_t SocclkSpreadFreq; // kHz
+
+ // Total board power
+ uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
+ uint16_t BoardPadding;
+
+ // Mvdd Svi2 Div Ratio Setting
+ uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16)
+
+ // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence
+ uint8_t GpioI2cScl; // Serial Clock
+ uint8_t GpioI2cSda; // Serial Data
+ uint16_t GpioPadding;
+
+ // Additional LED Display Settings
+ uint8_t LedPin3; // GPIO number for LedPin[3] - PCIE GEN Speed
+ uint8_t LedPin4; // GPIO number for LedPin[4] - PMFW Error Status
+ uint16_t LedEnableMask;
+
+ // Power Limit Scalars
+ uint8_t PowerLimitScalar[4]; //[PPT_THROTTLER_COUNT]
+
+ uint8_t MvddUlvPhaseSheddingMask;
+ uint8_t VddciUlvPhaseSheddingMask;
+ uint8_t Padding8_Psi1;
+ uint8_t Padding8_Psi2;
+
+ uint32_t BoardReserved[5];
+};
+
/*
***************************************************************************
Data Table asic_profiling_info structure
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index a69deb3a2ac0..60a6536ff656 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -32,7 +32,6 @@ struct cgs_device;
* enum cgs_ind_reg - Indirect register spaces
*/
enum cgs_ind_reg {
- CGS_IND_REG__MMIO,
CGS_IND_REG__PCIE,
CGS_IND_REG__SMC,
CGS_IND_REG__UVD_CTX,
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 2a12614a12c2..7e6dcdf7df73 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -50,6 +50,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
mutex_init(&hwmgr->smu_lock);
+ mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
hwmgr->feature_mask = adev->pm.pp_feature;
@@ -64,6 +65,8 @@ static void amd_powerplay_destroy(struct amdgpu_device *adev)
{
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ mutex_destroy(&hwmgr->msg_lock);
+
kfree(hwmgr->hardcode_pp_table);
hwmgr->hardcode_pp_table = NULL;
@@ -319,12 +322,12 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1435,7 +1438,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
mutex_lock(&hwmgr->smu_lock);
@@ -1452,8 +1456,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr)
return -EINVAL;
- if (!(hwmgr->not_vf && amdgpu_dpm) ||
- !hwmgr->hwmgr_func->get_asic_baco_state)
+ if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);
@@ -1470,7 +1473,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index e8b27fab6aa1..8c684a6e0156 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -62,6 +62,7 @@ const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask
size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
size_t size = 0;
int ret = 0, i = 0;
uint32_t feature_mask[2] = { 0 };
@@ -70,6 +71,9 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf)
uint32_t sort_feature[SMU_FEATURE_COUNT];
uint64_t hw_feature_count = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
@@ -110,9 +114,6 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
uint32_t feature_low = 0, feature_high = 0;
int ret = 0;
- if (!smu->pm_enabled)
- return ret;
-
feature_low = (feature_mask >> 0 ) & 0xffffffff;
feature_high = (feature_mask >> 32) & 0xffffffff;
@@ -155,6 +156,10 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask)
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
uint64_t feature_enables = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -191,16 +196,31 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
if (!if_version && !smu_version)
return -EINVAL;
+ if (smu->smc_fw_if_version && smu->smc_fw_version)
+ {
+ if (if_version)
+ *if_version = smu->smc_fw_if_version;
+
+ if (smu_version)
+ *smu_version = smu->smc_fw_version;
+
+ return 0;
+ }
+
if (if_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
+
+ smu->smc_fw_if_version = *if_version;
}
if (smu_version) {
ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
+
+ smu->smc_fw_version = *smu_version;
}
return ret;
@@ -327,13 +347,13 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
- param, &param);
+ param, value);
if (ret)
return ret;
/* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
* now, we un-support it */
- *value = param & 0x7fffffff;
+ *value = *value & 0x7fffffff;
return ret;
}
@@ -417,8 +437,12 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type)
int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
bool gate)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
ret = smu_dpm_set_uvd_enable(smu, !gate);
@@ -511,7 +535,6 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
int table_id = smu_table_get_index(smu, table_index);
uint32_t table_size;
int ret = 0;
-
if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0)
return -EINVAL;
@@ -547,12 +570,10 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
if (adev->asic_type == CHIP_VEGA20)
return (amdgpu_dpm == 2) ? true : false;
else if (adev->asic_type >= CHIP_ARCTURUS) {
- if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
- return false;
- else
+ if (amdgpu_sriov_is_pp_one_vf(adev) || !amdgpu_sriov_vf(adev))
return true;
- } else
- return false;
+ }
+ return false;
}
bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
@@ -569,8 +590,12 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev)
int smu_sys_get_pp_table(struct smu_context *smu, void **table)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
uint32_t powerplay_table_size;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
@@ -591,11 +616,13 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table)
int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
{
struct smu_table_context *smu_table = &smu->smu_table;
+ struct amdgpu_device *adev = smu->adev;
ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
int ret = 0;
- if (!smu->pm_enabled)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
+
if (header->usStructureSize != size) {
pr_err("pp table size not matched !\n");
return -EIO;
@@ -636,8 +663,6 @@ int smu_feature_init_dpm(struct smu_context *smu)
int ret = 0;
uint32_t allowed_feature_mask[SMU_FEATURE_MAX/32];
- if (!smu->pm_enabled)
- return ret;
mutex_lock(&feature->mutex);
bitmap_zero(feature->allowed, SMU_FEATURE_MAX);
mutex_unlock(&feature->mutex);
@@ -665,7 +690,6 @@ int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask)
if (smu->is_apu)
return 1;
-
feature_id = smu_feature_get_index(smu, mask);
if (feature_id < 0)
return 0;
@@ -932,13 +956,6 @@ static int smu_sw_init(void *handle)
return ret;
}
- if (adev->smu.ppt_funcs->i2c_eeprom_init) {
- ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
-
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -948,9 +965,6 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
- if (adev->smu.ppt_funcs->i2c_eeprom_fini)
- smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
-
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1323,6 +1337,9 @@ static int smu_hw_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return 0;
+
ret = smu_start_smc_engine(smu);
if (ret) {
pr_err("SMU is not ready yet!\n");
@@ -1336,9 +1353,6 @@ static int smu_hw_init(void *handle)
smu_set_gfx_cgpg(&adev->smu, true);
}
- if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
- return 0;
-
if (!smu->pm_enabled)
return 0;
@@ -1366,10 +1380,11 @@ static int smu_hw_init(void *handle)
if (ret)
goto failed;
- if (!smu->pm_enabled)
- adev->pm.dpm_enabled = false;
- else
- adev->pm.dpm_enabled = true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ if (ret)
+ goto failed;
+
+ adev->pm.dpm_enabled = true;
pr_info("SMU is initialized successfully!\n");
@@ -1381,6 +1396,9 @@ failed:
static int smu_stop_dpms(struct smu_context *smu)
{
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
return smu_system_features_control(smu, false);
}
@@ -1403,6 +1421,10 @@ static int smu_hw_fini(void *handle)
if (!smu->pm_enabled)
return 0;
+ adev->pm.dpm_enabled = false;
+
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
if (!amdgpu_sriov_vf(adev)){
ret = smu_stop_thermal_control(smu);
if (ret) {
@@ -1476,7 +1498,7 @@ static int smu_disable_dpm(struct smu_context *smu)
bool use_baco = !smu->is_apu &&
((adev->in_gpu_reset &&
(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
- (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+ ((adev->in_runpm || adev->in_hibernate) && amdgpu_asic_supports_baco(adev)));
ret = smu_get_smc_version(smu, NULL, &smu_version);
if (ret) {
@@ -1542,6 +1564,10 @@ static int smu_suspend(void *handle)
if (!smu->pm_enabled)
return 0;
+ adev->pm.dpm_enabled = false;
+
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
if(!amdgpu_sriov_vf(adev)) {
ret = smu_disable_dpm(smu);
if (ret)
@@ -1587,11 +1613,17 @@ static int smu_resume(void *handle)
if (ret)
goto failed;
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+ if (ret)
+ goto failed;
+
if (smu->is_apu)
smu_set_gfx_cgpg(&adev->smu, true);
smu->disable_uclk_switch = 0;
+ adev->pm.dpm_enabled = true;
+
pr_info("SMU is resumed successfully!\n");
return 0;
@@ -1603,10 +1635,14 @@ failed:
int smu_display_configuration_change(struct smu_context *smu,
const struct amd_pp_display_configuration *display_config)
{
+ struct amdgpu_device *adev = smu->adev;
int index = 0;
int num_of_active_display = 0;
- if (!smu->pm_enabled || !is_support_sw_smu(smu->adev))
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(smu->adev))
return -EINVAL;
if (!display_config)
@@ -1668,12 +1704,16 @@ int smu_get_current_clocks(struct smu_context *smu,
struct amd_pp_clock_info *clocks)
{
struct amd_pp_simple_clock_info simple_clocks = {0};
+ struct amdgpu_device *adev = smu->adev;
struct smu_clock_info hw_clocks;
int ret = 0;
if (!is_support_sw_smu(smu->adev))
return -EINVAL;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
smu_get_dal_power_level(smu, &simple_clocks);
@@ -1736,7 +1776,7 @@ static int smu_enable_umd_pstate(void *handle,
struct smu_context *smu = (struct smu_context*)(handle);
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context))
+ if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
@@ -1744,12 +1784,12 @@ static int smu_enable_umd_pstate(void *handle,
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
smu_dpm_ctx->enable_umd_pstate = true;
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
amdgpu_device_ip_set_powergating_state(smu->adev,
AMD_IP_BLOCK_TYPE_GFX,
AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -1778,9 +1818,6 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu,
long workload;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (!skip_display_settings) {
ret = smu_display_config_changed(smu);
if (ret) {
@@ -1831,8 +1868,12 @@ int smu_handle_task(struct smu_context *smu,
enum amd_pp_task task_id,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -1866,10 +1907,11 @@ int smu_switch_power_profile(struct smu_context *smu,
bool en)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
long workload;
uint32_t index;
- if (!smu->pm_enabled)
+ if (!adev->pm.dpm_enabled)
return -EINVAL;
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
@@ -1900,8 +1942,12 @@ int smu_switch_power_profile(struct smu_context *smu,
enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
enum amd_dpm_forced_level level;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1915,8 +1961,12 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu)
int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
@@ -1939,8 +1989,12 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev
int smu_set_display_count(struct smu_context *smu, uint32_t count)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
ret = smu_init_display_count(smu, count);
mutex_unlock(&smu->mutex);
@@ -1954,8 +2008,12 @@ int smu_force_clk_levels(struct smu_context *smu,
bool lock_needed)
{
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("force clock level is for dpm manual mode only.\n");
return -EINVAL;
@@ -1973,20 +2031,19 @@ int smu_force_clk_levels(struct smu_context *smu,
return ret;
}
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ * However, the mp1 state setting should still be granted
+ * even if the dpm_enabled cleared.
+ */
int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state)
{
uint16_t msg;
int ret;
- /*
- * The SMC is not fully ready. That may be
- * expected as the IP may be masked.
- * So, just return without error.
- */
- if (!smu->pm_enabled)
- return 0;
-
mutex_lock(&smu->mutex);
switch (mp1_state) {
@@ -2023,15 +2080,11 @@ int smu_set_mp1_state(struct smu_context *smu,
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /*
- * The SMC is not fully ready. That may be
- * expected as the IP may be masked.
- * So, just return without error.
- */
- if (!smu->pm_enabled)
- return 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
@@ -2047,6 +2100,28 @@ int smu_set_df_cstate(struct smu_context *smu,
return ret;
}
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
+ if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
+ return 0;
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
+ if (ret)
+ pr_err("[AllowXgmiPowerDown] failed!\n");
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
int smu_write_watermarks_table(struct smu_context *smu)
{
void *watermarks_table = smu->smu_table.watermarks_table;
@@ -2065,6 +2140,10 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
{
void *table = smu->smu_table.watermarks_table;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
if (!table)
return -EINVAL;
@@ -2089,8 +2168,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
int smu_set_ac_dc(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
/* controlled by firmware */
if (smu->dc_controlled_by_gpio)
return 0;
@@ -2149,8 +2232,12 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block =
int smu_load_microcode(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->load_microcode)
@@ -2163,8 +2250,12 @@ int smu_load_microcode(struct smu_context *smu)
int smu_check_fw_status(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->check_fw_status)
@@ -2191,8 +2282,12 @@ int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_rpm)
@@ -2208,10 +2303,15 @@ int smu_get_power_limit(struct smu_context *smu,
bool def,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
- if (lock_needed)
+ if (lock_needed) {
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
+ }
if (smu->ppt_funcs->get_power_limit)
ret = smu->ppt_funcs->get_power_limit(smu, limit, def);
@@ -2224,8 +2324,12 @@ int smu_get_power_limit(struct smu_context *smu,
int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_power_limit)
@@ -2238,8 +2342,12 @@ int smu_set_power_limit(struct smu_context *smu, uint32_t limit)
int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->print_clk_levels)
@@ -2252,8 +2360,12 @@ int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, ch
int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_od_percentage)
@@ -2266,8 +2378,12 @@ int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type)
int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_od_percentage)
@@ -2282,8 +2398,12 @@ int smu_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->od_edit_dpm_table)
@@ -2298,8 +2418,12 @@ int smu_read_sensor(struct smu_context *smu,
enum amd_pp_sensors sensor,
void *data, uint32_t *size)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->read_sensor)
@@ -2312,8 +2436,12 @@ int smu_read_sensor(struct smu_context *smu,
int smu_get_power_profile_mode(struct smu_context *smu, char *buf)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_power_profile_mode)
@@ -2329,8 +2457,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
uint32_t param_size,
bool lock_needed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (lock_needed)
mutex_lock(&smu->mutex);
@@ -2346,8 +2478,12 @@ int smu_set_power_profile_mode(struct smu_context *smu,
int smu_get_fan_control_mode(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_control_mode)
@@ -2360,8 +2496,12 @@ int smu_get_fan_control_mode(struct smu_context *smu)
int smu_set_fan_control_mode(struct smu_context *smu, int value)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_control_mode)
@@ -2374,8 +2514,12 @@ int smu_set_fan_control_mode(struct smu_context *smu, int value)
int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_percent)
@@ -2388,8 +2532,12 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed)
int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_fan_speed_percent)
@@ -2402,8 +2550,12 @@ int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_fan_speed_rpm)
@@ -2416,8 +2568,12 @@ int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed)
int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_deep_sleep_dcefclk)
@@ -2430,8 +2586,12 @@ int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk)
int smu_set_active_display_count(struct smu_context *smu, uint32_t count)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
if (smu->ppt_funcs->set_active_display_count)
ret = smu->ppt_funcs->set_active_display_count(smu, count);
@@ -2442,8 +2602,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
enum amd_pp_clock_type type,
struct amd_pp_clocks *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type)
@@ -2457,8 +2621,12 @@ int smu_get_clock_by_type(struct smu_context *smu,
int smu_get_max_high_clocks(struct smu_context *smu,
struct amd_pp_simple_clock_info *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_max_high_clocks)
@@ -2473,8 +2641,12 @@ int smu_get_clock_by_type_with_latency(struct smu_context *smu,
enum smu_clk_type clk_type,
struct pp_clock_levels_with_latency *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_latency)
@@ -2489,8 +2661,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
enum amd_pp_clock_type type,
struct pp_clock_levels_with_voltage *clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_clock_by_type_with_voltage)
@@ -2505,8 +2681,12 @@ int smu_get_clock_by_type_with_voltage(struct smu_context *smu,
int smu_display_clock_voltage_request(struct smu_context *smu,
struct pp_display_clock_request *clock_req)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_clock_voltage_request)
@@ -2520,8 +2700,12 @@ int smu_display_clock_voltage_request(struct smu_context *smu,
int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = -EINVAL;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->display_disable_memory_clock_switch)
@@ -2534,8 +2718,12 @@ int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disabl
int smu_notify_smu_enable_pwe(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->notify_smu_enable_pwe)
@@ -2549,8 +2737,12 @@ int smu_notify_smu_enable_pwe(struct smu_context *smu)
int smu_set_xgmi_pstate(struct smu_context *smu,
uint32_t pstate)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_xgmi_pstate)
@@ -2563,8 +2755,12 @@ int smu_set_xgmi_pstate(struct smu_context *smu,
int smu_set_azalia_d3_pme(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->set_azalia_d3_pme)
@@ -2575,6 +2771,14 @@ int smu_set_azalia_d3_pme(struct smu_context *smu)
return ret;
}
+/*
+ * On system suspending or resetting, the dpm_enabled
+ * flag will be cleared. So that those SMU services which
+ * are not supported will be gated.
+ *
+ * However, the baco/mode1 reset should still be granted
+ * as they are still supported and necessary.
+ */
bool smu_baco_is_support(struct smu_context *smu)
{
bool ret = false;
@@ -2646,8 +2850,12 @@ int smu_mode2_reset(struct smu_context *smu)
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
@@ -2662,8 +2870,12 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
unsigned int *clock_values_in_khz,
unsigned int *num_states)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_uclk_dpm_states)
@@ -2677,6 +2889,10 @@ int smu_get_uclk_dpm_states(struct smu_context *smu,
enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
{
enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT;
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
mutex_lock(&smu->mutex);
@@ -2691,8 +2907,12 @@ enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
int smu_get_dpm_clock_table(struct smu_context *smu,
struct dpm_clocks *clock_table)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
+ if (!adev->pm.dpm_enabled)
+ return -EINVAL;
+
mutex_lock(&smu->mutex);
if (smu->ppt_funcs->get_dpm_clock_table)
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 1ef0923f7190..27c5fc9572b2 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -128,6 +128,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
+ MSG_MAP(GmiPwrDnControl, PPSMC_MSG_GmiPwrDnControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -622,6 +623,9 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct arcturus_dpm_table *dpm_table = NULL;
+ if (amdgpu_ras_intr_triggered())
+ return snprintf(buf, PAGE_SIZE, "unavailable\n");
+
dpm_table = smu_dpm->dpm_context;
switch (type) {
@@ -997,6 +1001,9 @@ static int arcturus_read_sensor(struct smu_context *smu,
PPTable_t *pptable = table_context->driver_pptable;
int ret = 0;
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
if (!data || !size)
return -EINVAL;
@@ -2226,12 +2233,8 @@ static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = {
static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
{
struct amdgpu_device *adev = to_amdgpu_device(control);
- struct smu_context *smu = &adev->smu;
int res;
- if (!smu->pm_enabled)
- return -EOPNOTSUPP;
-
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
@@ -2247,12 +2250,6 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- struct smu_context *smu = &adev->smu;
-
- if (!smu->pm_enabled)
- return;
-
i2c_del_adapter(control);
}
@@ -2261,7 +2258,7 @@ static bool arcturus_is_baco_supported(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t val;
- if (!smu_v11_0_baco_is_support(smu))
+ if (!smu_v11_0_baco_is_support(smu) || amdgpu_sriov_vf(adev))
return false;
val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
@@ -2296,6 +2293,35 @@ static int arcturus_set_df_cstate(struct smu_context *smu,
return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
+static int arcturus_allow_xgmi_power_down(struct smu_context *smu, bool en)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_GmiPwrDnControl is supported by 54.23.0 and onwards */
+ if (smu_version < 0x00361700) {
+ pr_err("XGMI power down control is only supported by PMFW 54.23.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ if (en)
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 1,
+ NULL);
+
+ return smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ 0,
+ NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2389,6 +2415,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
.set_df_cstate = arcturus_set_df_cstate,
+ .allow_xgmi_power_down = arcturus_allow_xgmi_power_down,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 77c14671866c..719597c5d27d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -984,6 +984,32 @@ static int init_thermal_controller(
struct pp_hwmgr *hwmgr,
const ATOM_PPLIB_POWERPLAYTABLE *powerplay_table)
{
+ hwmgr->thermal_controller.ucType =
+ powerplay_table->sThermalController.ucType;
+ hwmgr->thermal_controller.ucI2cLine =
+ powerplay_table->sThermalController.ucI2cLine;
+ hwmgr->thermal_controller.ucI2cAddress =
+ powerplay_table->sThermalController.ucI2cAddress;
+
+ hwmgr->thermal_controller.fanInfo.bNoFan =
+ (0 != (powerplay_table->sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN));
+
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+ powerplay_table->sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+
+ hwmgr->thermal_controller.fanInfo.ulMinRPM
+ = powerplay_table->sThermalController.ucFanMinRPM * 100UL;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM
+ = powerplay_table->sThermalController.ucFanMaxRPM * 100UL;
+
+ set_hw_cap(hwmgr,
+ ATOM_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
+ PHM_PlatformCaps_ThermalController);
+
+ hwmgr->thermal_controller.use_hw_fan_control = 1;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 689072a312a7..c9cfe90a2947 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -36,6 +36,8 @@
#include "power_state.h"
#include "soc15_common.h"
#include "smu10.h"
+#include "asic_reg/pwr/pwr_10_0_offset.h"
+#include "asic_reg/pwr/pwr_10_0_sh_mask.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -43,13 +45,6 @@
#define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
#define SMC_RAM_END 0x40000
-#define mmPWR_MISC_CNTL_STATUS 0x0183
-#define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
-#define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
-#define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
-
static const unsigned long SMU10_Magic = (unsigned long) PHM_Rv_Magic;
@@ -81,7 +76,7 @@ static int smu10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
return -EINVAL;
}
- smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq);
+ smum_send_msg_to_smc_with_parameter(hwmgr, msg, clk_freq, NULL);
return 0;
}
@@ -214,7 +209,8 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- smu10_data->deep_sleep_dcefclk);
+ smu10_data->deep_sleep_dcefclk,
+ NULL);
}
return 0;
}
@@ -228,7 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
- smu10_data->dcf_actual_hard_min_freq);
+ smu10_data->dcf_actual_hard_min_freq,
+ NULL);
}
return 0;
}
@@ -242,7 +239,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- smu10_data->f_actual_hard_min_freq);
+ smu10_data->f_actual_hard_min_freq,
+ NULL);
}
return 0;
}
@@ -255,7 +253,8 @@ static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count
smu10_data->num_active_display = count;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplayCount,
- smu10_data->num_active_display);
+ smu10_data->num_active_display,
+ NULL);
}
return 0;
@@ -278,7 +277,8 @@ static int smu10_init_power_gate_state(struct pp_hwmgr *hwmgr)
if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetGfxCGPG,
- true);
+ true,
+ NULL);
else
return 0;
}
@@ -324,7 +324,7 @@ static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff, NULL);
/* confirm gfx is back to "on" state */
while (!smu10_is_gfx_on(hwmgr))
@@ -344,7 +344,7 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
struct amdgpu_device *adev = hwmgr->adev;
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff, NULL);
return 0;
}
@@ -410,12 +410,10 @@ static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table **pptable,
uint32_t num_entry, const DpmClock_t *pclk_dependency_table)
{
- uint32_t table_size, i;
+ uint32_t i;
struct smu10_voltage_dependency_table *ptable;
- table_size = sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table) * num_entry;
- ptable = kzalloc(table_size, GFP_KERNEL);
-
+ ptable = kzalloc(struct_size(ptable, entries, num_entry), GFP_KERNEL);
if (NULL == ptable)
return -ENOMEM;
@@ -479,12 +477,10 @@ static int smu10_populate_clock_table(struct pp_hwmgr *hwmgr)
smu10_get_clock_voltage_dependency_table(hwmgr, &pinfo->vdd_dep_on_phyclk,
ARRAY_SIZE(VddPhyClk), &VddPhyClk[0]);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency);
- result = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &result);
smu10_data->gfx_min_freq_limit = result / 10 * 1000;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency);
- result = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &result);
smu10_data->gfx_max_freq_limit = result / 10 * 1000;
return 0;
@@ -588,116 +584,148 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- SMU10_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- SMU10_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- SMU10_UMD_PSTATE_GFXCLK);
+ SMU10_UMD_PSTATE_GFXCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_FCLK);
+ SMU10_UMD_PSTATE_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_SOCCLK);
+ SMU10_UMD_PSTATE_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- min_sclk);
+ min_sclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
SMU10_UMD_PSTATE_PEAK_FCLK :
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
- SMU10_UMD_PSTATE_MIN_SOCCLK);
+ SMU10_UMD_PSTATE_MIN_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
- SMU10_UMD_PSTATE_MIN_VCE);
+ SMU10_UMD_PSTATE_MIN_VCE,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- SMU10_UMD_PSTATE_PEAK_FCLK);
+ SMU10_UMD_PSTATE_PEAK_FCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
- SMU10_UMD_PSTATE_PEAK_SOCCLK);
+ SMU10_UMD_PSTATE_PEAK_SOCCLK,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
- SMU10_UMD_PSTATE_VCE);
+ SMU10_UMD_PSTATE_VCE,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- min_mclk);
+ min_mclk,
+ NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
@@ -849,13 +877,15 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
low == 2 ? data->gfx_max_freq_limit/100 :
low == 1 ? SMU10_UMD_PSTATE_GFXCLK :
- data->gfx_min_freq_limit/100);
+ data->gfx_min_freq_limit/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxClk,
high == 0 ? data->gfx_min_freq_limit/100 :
high == 1 ? SMU10_UMD_PSTATE_GFXCLK :
- data->gfx_max_freq_limit/100);
+ data->gfx_max_freq_limit/100,
+ NULL);
break;
case PP_MCLK:
@@ -864,11 +894,13 @@ static int smu10_force_clock_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
- mclk_table->entries[low].clk/100);
+ mclk_table->entries[low].clk/100,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
- mclk_table->entries[high].clk/100);
+ mclk_table->entries[high].clk/100,
+ NULL);
break;
case PP_PCIE:
@@ -888,8 +920,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
/* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
if (now == data->gfx_max_freq_limit/100)
@@ -910,8 +941,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
i == 2 ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -1122,15 +1152,13 @@ static int smu10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency);
- sclk = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &sclk);
/* in units of 10KHZ */
*((uint32_t *)value) = sclk * 100;
*size = 4;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency);
- mclk = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &mclk);
/* in units of 10KHZ */
*((uint32_t *)value) = mclk * 100;
*size = 4;
@@ -1166,20 +1194,20 @@ static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
static int smu10_smus_notify_pwe(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SetRccPfcPmeRestoreRegister, NULL);
}
static int smu10_powergate_mmhub(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerGateMmHub, NULL);
}
static int smu10_powergate_sdma(struct pp_hwmgr *hwmgr, bool gate)
{
if (gate)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerDownSdma, NULL);
else
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PowerUpSdma, NULL);
}
static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
@@ -1191,11 +1219,11 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_GATE);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerDownVcn, 0);
+ PPSMC_MSG_PowerDownVcn, 0, NULL);
smu10_data->vcn_power_gated = true;
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PowerUpVcn, 0);
+ PPSMC_MSG_PowerUpVcn, 0, NULL);
amdgpu_device_ip_set_powergating_state(hwmgr->adev,
AMD_IP_BLOCK_TYPE_VCN,
AMD_PG_STATE_UNGATE);
@@ -1274,8 +1302,7 @@ static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
static bool smu10_is_raven1_refresh(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- if ((adev->asic_type == CHIP_RAVEN) &&
- (adev->rev_id != 0x15d8) &&
+ if ((adev->apu_flags & AMD_APU_IS_RAVEN) &&
(hwmgr->smu_version >= 0x41e2b))
return true;
else
@@ -1304,7 +1331,8 @@ static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uin
hwmgr->gfxoff_state_changed_by_workload = true;
}
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (!result)
hwmgr->power_profile_mode = input[size];
if (workload_type && hwmgr->gfxoff_state_changed_by_workload) {
@@ -1319,13 +1347,13 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DeviceDriverReset,
- mode);
+ mode,
+ NULL);
}
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
- .asic_setup = NULL,
.apply_state_adjust_rules = smu10_apply_state_adjust_rules,
.force_dpm_level = smu10_dpm_force_dpm_level,
.get_power_state_size = smu10_get_power_state_size,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
index 1fb296a996f3..0f969de10fab 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.h
@@ -192,7 +192,7 @@ struct smu10_clock_voltage_dependency_record {
struct smu10_voltage_dependency_table {
uint32_t count;
- struct smu10_clock_voltage_dependency_record entries[1];
+ struct smu10_clock_voltage_dependency_record entries[];
};
struct smu10_clock_voltage_information {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 683b29a99366..f2bda3bcbbde 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -29,14 +29,16 @@ static int smu7_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_UVDDPM_Enable :
- PPSMC_MSG_UVDDPM_Disable);
+ PPSMC_MSG_UVDDPM_Disable,
+ NULL);
}
static int smu7_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
{
return smum_send_msg_to_smc(hwmgr, enable ?
PPSMC_MSG_VCEDPM_Enable :
- PPSMC_MSG_VCEDPM_Disable);
+ PPSMC_MSG_VCEDPM_Disable,
+ NULL);
}
static int smu7_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
@@ -57,7 +59,8 @@ int smu7_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_uvd_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_UVDPowerOFF);
+ PPSMC_MSG_UVDPowerOFF,
+ NULL);
return 0;
}
@@ -67,10 +70,10 @@ static int smu7_powerup_uvd(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_UVDDynamicPowerGating)) {
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UVDPowerON, 1);
+ PPSMC_MSG_UVDPowerON, 1, NULL);
} else {
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_UVDPowerON, 0);
+ PPSMC_MSG_UVDPowerON, 0, NULL);
}
}
@@ -81,7 +84,8 @@ static int smu7_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerOFF);
+ PPSMC_MSG_VCEPowerOFF,
+ NULL);
return 0;
}
@@ -89,7 +93,8 @@ static int smu7_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (phm_cf_want_vce_power_gating(hwmgr))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerON);
+ PPSMC_MSG_VCEPowerON,
+ NULL);
return 0;
}
@@ -181,7 +186,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -191,7 +196,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -204,7 +209,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -215,7 +220,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_3DLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -228,7 +233,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_RLC_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -241,7 +246,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_GFX_CP_LS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -255,7 +260,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
CG_GFX_OTHERS_MGCG_MASK);
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -275,7 +280,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -285,7 +290,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_BIF_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -298,7 +303,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -309,7 +314,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_MC_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -322,7 +327,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
if (PP_STATE_SUPPORT_LS & *msg_id) {
@@ -332,7 +337,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_DRM_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -345,7 +350,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -356,7 +361,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_HDP_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -369,7 +374,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGCG_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
@@ -380,7 +385,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_SDMA_MGLS_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -393,7 +398,7 @@ int smu7_update_clock_gatings(struct pp_hwmgr *hwmgr,
value = CG_SYS_ROM_MASK;
if (smum_send_msg_to_smc_with_parameter(
- hwmgr, msg, value))
+ hwmgr, msg, value, NULL))
return -EINVAL;
}
break;
@@ -423,8 +428,10 @@ int smu7_powergate_gfx(struct pp_hwmgr *hwmgr, bool enable)
if (enable)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GFX_CU_PG_ENABLE,
- adev->gfx.cu_info.number);
+ adev->gfx.cu_info.number,
+ NULL);
else
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GFX_CU_PG_DISABLE);
+ PPSMC_MSG_GFX_CU_PG_DISABLE,
+ NULL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 7740488999df..753cb2cf6b77 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -186,7 +186,7 @@ static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
}
if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
return 0;
}
@@ -493,7 +493,7 @@ static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
}
/**
@@ -979,7 +979,8 @@ static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_RegulatorHot))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableVRHotGPIOInterrupt);
+ PPSMC_MSG_EnableVRHotGPIOInterrupt,
+ NULL);
return 0;
}
@@ -996,7 +997,7 @@ static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
return 0;
}
@@ -1006,7 +1007,7 @@ static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (data->ulv_supported)
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
return 0;
}
@@ -1015,13 +1016,14 @@ static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
{
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
PP_ASSERT_WITH_CODE(false,
"Attempt to enable Master Deep Sleep switch failed!",
return -EINVAL);
} else {
if (smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PPSMC_MSG_MASTER_DeepSleep_OFF,
+ NULL)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
return -EINVAL);
@@ -1036,7 +1038,8 @@ static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep)) {
if (smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MASTER_DeepSleep_OFF)) {
+ PPSMC_MSG_MASTER_DeepSleep_OFF,
+ NULL)) {
PP_ASSERT_WITH_CODE(false,
"Attempt to disable Master Deep Sleep switch failed!",
return -EINVAL);
@@ -1089,7 +1092,7 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
smu7_disable_sclk_vce_handshake(hwmgr);
PP_ASSERT_WITH_CODE(
- (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable)),
+ (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
"Failed to enable SCLK DPM during DPM Start Function!",
return -EINVAL);
}
@@ -1101,7 +1104,8 @@ static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_Enable)),
+ PPSMC_MSG_MCLKDPM_Enable,
+ NULL)),
"Failed to enable MCLK DPM during DPM Start Function!",
return -EINVAL);
@@ -1172,7 +1176,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (0 == data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_Enable)),
+ PPSMC_MSG_PCIeDPM_Enable,
+ NULL)),
"Failed to enable pcie DPM during DPM Start Function!",
return -EINVAL);
}
@@ -1180,7 +1185,8 @@ static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_Falcon_QuickTransition)) {
PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableACDCGPIOInterrupt)),
+ PPSMC_MSG_EnableACDCGPIOInterrupt,
+ NULL)),
"Failed to enable AC DC GPIO Interrupt!",
);
}
@@ -1197,7 +1203,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable SCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
}
/* disable MCLK dpm */
@@ -1205,7 +1211,7 @@ static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
"Trying to disable MCLK DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
}
return 0;
@@ -1226,7 +1232,8 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
if (!data->pcie_dpm_key_disabled) {
PP_ASSERT_WITH_CODE(
(smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_Disable) == 0),
+ PPSMC_MSG_PCIeDPM_Disable,
+ NULL) == 0),
"Failed to disable pcie DPM during DPM Stop Function!",
return -EINVAL);
}
@@ -1237,7 +1244,7 @@ static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
"Trying to disable voltage DPM when DPM is disabled",
return 0);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
return 0;
}
@@ -1388,7 +1395,7 @@ static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((0 == tmp_result),
"Failed to enable VR hot GPIO interrupt!", result = tmp_result);
- smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay);
+ smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
tmp_result = smu7_enable_sclk_control(hwmgr);
PP_ASSERT_WITH_CODE((0 == tmp_result),
@@ -1446,14 +1453,14 @@ static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_EnableAvfs),
+ hwmgr, PPSMC_MSG_EnableAvfs, NULL),
"Failed to enable AVFS!",
return -EINVAL);
}
} else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
- hwmgr, PPSMC_MSG_DisableAvfs),
+ hwmgr, PPSMC_MSG_DisableAvfs, NULL),
"Failed to disable AVFS!",
return -EINVAL);
}
@@ -2609,7 +2616,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PCIeDPM_ForceLevel, level);
+ PPSMC_MSG_PCIeDPM_ForceLevel, level,
+ NULL);
}
}
@@ -2623,7 +2631,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2637,7 +2646,8 @@ static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
if (level)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2656,14 +2666,16 @@ static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask,
+ NULL);
}
if (!data->mclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask,
+ NULL);
}
return 0;
@@ -2678,7 +2690,8 @@ static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
if (!data->pcie_dpm_key_disabled) {
smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_PCIeDPM_UnForceLevel);
+ PPSMC_MSG_PCIeDPM_UnForceLevel,
+ NULL);
}
return smu7_upload_dpm_level_enable_mask(hwmgr);
@@ -2696,7 +2709,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.sclk_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
@@ -2706,7 +2720,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.mclk_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- (1 << level));
+ (1 << level),
+ NULL);
}
}
@@ -2716,7 +2731,8 @@ static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
data->dpm_level_enable_mask.pcie_dpm_enable_mask);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- (level));
+ (level),
+ NULL);
}
}
@@ -3495,21 +3511,20 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
(adev->asic_type != CHIP_BONAIRE) &&
(adev->asic_type != CHIP_FIJI) &&
(adev->asic_type != CHIP_TONGA)) {
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0);
- tmp = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
*query = tmp;
if (tmp != 0)
return 0;
}
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
ixSMU_PM_STATUS_95, 0);
for (i = 0; i < 10; i++) {
msleep(500);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
tmp = cgs_read_ind_register(hwmgr->device,
CGS_IND_REG__SMC,
ixSMU_PM_STATUS_95);
@@ -3534,14 +3549,12 @@ static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
- sclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
*((uint32_t *)value) = sclk;
*size = 4;
return 0;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
- mclk = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
*((uint32_t *)value) = mclk;
*size = 4;
return 0;
@@ -3730,7 +3743,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_FreezeLevel),
+ PPSMC_MSG_SCLKDPM_FreezeLevel,
+ NULL),
"Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3742,7 +3756,8 @@ static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_FreezeLevel),
+ PPSMC_MSG_MCLKDPM_FreezeLevel,
+ NULL),
"Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3804,9 +3819,12 @@ static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
{
uint32_t i;
+ /* force the trim if mclk_switching is disabled to prevent flicker */
+ bool force_trim = (low_limit == high_limit);
for (i = 0; i < dpm_table->count; i++) {
/*skip the trim if od is enabled*/
- if (!hwmgr->od_enabled && (dpm_table->dpm_levels[i].value < low_limit
+ if ((!hwmgr->od_enabled || force_trim)
+ && (dpm_table->dpm_levels[i].value < low_limit
|| dpm_table->dpm_levels[i].value > high_limit))
dpm_table->dpm_levels[i].enabled = false;
else
@@ -3881,7 +3899,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_SCLKDPM_UnfreezeLevel,
+ NULL),
"Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3893,7 +3912,8 @@ static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_MCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel,
+ NULL),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
@@ -3946,12 +3966,14 @@ static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
if (hwmgr->chip_id == CHIP_VEGAM)
smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2);
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
+ NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr,
- (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2);
+ (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
+ NULL);
}
- return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
+ return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ? 0 : -EINVAL;
}
static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
@@ -4037,7 +4059,8 @@ static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm);
+ PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
+ NULL);
}
static int
@@ -4045,7 +4068,7 @@ smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
{
PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
- return (smum_send_msg_to_smc(hwmgr, msg) == 0) ? 0 : -1;
+ return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ? 0 : -1;
}
static int
@@ -4129,7 +4152,8 @@ static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_f
advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm);
+ PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
+ NULL);
}
static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
@@ -4259,14 +4283,14 @@ static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
if ((hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
} else {
data->mem_latency_high = 330;
data->mem_latency_low = 330;
if ((hwmgr->chip_id == CHIP_POLARIS10) ||
(hwmgr->chip_id == CHIP_POLARIS11) ||
(hwmgr->chip_id == CHIP_POLARIS12))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
}
return 0;
@@ -4410,13 +4434,15 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->sclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
+ data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
+ NULL);
break;
case PP_MCLK:
if (!data->mclk_dpm_key_disabled)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_MCLKDPM_SetEnabledMask,
- data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
+ data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
+ NULL);
break;
case PP_PCIE:
{
@@ -4424,11 +4450,13 @@ static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
if (!data->pcie_dpm_key_disabled) {
if (fls(tmp) != ffs(tmp))
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
+ NULL);
else
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_PCIeDPM_ForceLevel,
- fls(tmp) - 1);
+ fls(tmp) - 1,
+ NULL);
}
break;
}
@@ -4454,8 +4482,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
switch (type) {
case PP_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
for (i = 0; i < sclk_table->count; i++) {
if (clock > sclk_table->dpm_levels[i].value)
@@ -4470,8 +4497,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
(i == now) ? "*" : "");
break;
case PP_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency);
- clock = cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
for (i = 0; i < mclk_table->count; i++) {
if (clock > mclk_table->dpm_levels[i].value)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 58f5589aaf12..5d4971576111 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -887,7 +887,10 @@ static int smu7_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
didt_block |= block_en << TCP_Enable_SHIFT;
if (enable)
- result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Didt_Block_Function, didt_block);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_Didt_Block_Function,
+ didt_block,
+ NULL);
return result;
}
@@ -1009,7 +1012,8 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_EnableDpmDidt));
+ (uint16_t)(PPSMC_MSG_EnableDpmDidt),
+ NULL);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to enable DPM DIDT.", goto error);
}
@@ -1042,7 +1046,8 @@ int smu7_disable_didt_config(struct pp_hwmgr *hwmgr)
goto error);
if (hwmgr->chip_id == CHIP_POLARIS11) {
result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableDpmDidt));
+ (uint16_t)(PPSMC_MSG_DisableDpmDidt),
+ NULL);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to disable DPM DIDT.", goto error);
}
@@ -1063,7 +1068,8 @@ int smu7_enable_smc_cac(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_CAC)) {
int smc_result;
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_EnableCac));
+ (uint16_t)(PPSMC_MSG_EnableCac),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable CAC in SMC.", result = -1);
@@ -1079,7 +1085,8 @@ int smu7_disable_smc_cac(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_CAC) && data->cac_enabled) {
int smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableCac));
+ (uint16_t)(PPSMC_MSG_DisableCac),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable CAC in SMC.", result = -1);
@@ -1095,7 +1102,9 @@ int smu7_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PkgPwrSetLimit, n<<8);
+ PPSMC_MSG_PkgPwrSetLimit,
+ n<<8,
+ NULL);
return 0;
}
@@ -1103,7 +1112,9 @@ static int smu7_set_overdriver_target_tdp(struct pp_hwmgr *hwmgr,
uint32_t target_tdp)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ PPSMC_MSG_OverDriveSetTargetTdp,
+ target_tdp,
+ NULL);
}
int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
@@ -1124,7 +1135,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
if (PP_CAP(PHM_PlatformCaps_PowerContainment)) {
if (data->enable_tdc_limit_feature) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_TDCLimitEnable));
+ (uint16_t)(PPSMC_MSG_TDCLimitEnable),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable TDCLimit in SMC.", result = -1;);
if (0 == smc_result)
@@ -1134,7 +1146,8 @@ int smu7_enable_power_containment(struct pp_hwmgr *hwmgr)
if (data->enable_pkg_pwr_tracking_feature) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable));
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitEnable),
+ NULL);
PP_ASSERT_WITH_CODE((0 == smc_result),
"Failed to enable PkgPwrTracking in SMC.", result = -1;);
if (0 == smc_result) {
@@ -1163,7 +1176,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_TDCLimit) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_TDCLimitDisable));
+ (uint16_t)(PPSMC_MSG_TDCLimitDisable),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable TDCLimit in SMC.",
result = smc_result);
@@ -1172,7 +1186,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_DTE) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_DisableDTE));
+ (uint16_t)(PPSMC_MSG_DisableDTE),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable DTE in SMC.",
result = smc_result);
@@ -1181,7 +1196,8 @@ int smu7_disable_power_containment(struct pp_hwmgr *hwmgr)
if (data->power_containment_features &
POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
smc_result = smum_send_msg_to_smc(hwmgr,
- (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
+ (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable),
+ NULL);
PP_ASSERT_WITH_CODE((smc_result == 0),
"Failed to disable PkgPwrTracking in SMC.",
result = smc_result);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
index 5bdc0df5a9f4..0b30f73649a8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_thermal.c
@@ -151,8 +151,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int result;
if (PP_CAP(PHM_PlatformCaps_ODFuzzyFanControlSupport)) {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_FUZZY);
- result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+ FAN_CONTROL_FUZZY, NULL);
if (PP_CAP(PHM_PlatformCaps_FanSpeedInTableIsRPM))
hwmgr->hwmgr_func->set_max_fan_rpm_output(hwmgr,
@@ -164,8 +164,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
advanceFanControlParameters.usMaxFanPWM);
} else {
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, FAN_CONTROL_TABLE);
- result = smum_send_msg_to_smc(hwmgr, PPSMC_StartFanControl);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_StartFanControl,
+ FAN_CONTROL_TABLE, NULL);
}
if (!result && hwmgr->thermal_controller.
@@ -173,7 +173,8 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucTargetTemperature);
+ advanceFanControlParameters.ucTargetTemperature,
+ NULL);
hwmgr->fan_ctrl_enabled = true;
return result;
@@ -183,7 +184,7 @@ int smu7_fan_ctrl_start_smc_fan_control(struct pp_hwmgr *hwmgr)
int smu7_fan_ctrl_stop_smc_fan_control(struct pp_hwmgr *hwmgr)
{
hwmgr->fan_ctrl_enabled = false;
- return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_StopFanControl, NULL);
}
/**
@@ -372,7 +373,7 @@ static void smu7_thermal_enable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to enable internal thermal interrupts */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Enable, NULL);
}
/**
@@ -390,7 +391,7 @@ int smu7_thermal_disable_alert(struct pp_hwmgr *hwmgr)
CG_THERMAL_INT, THERM_INT_MASK, alert);
/* send message to SMU to disable internal thermal interrupts */
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Thermal_Cntl_Disable, NULL);
}
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 019d6a206492..a6c6a793e98e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -162,8 +162,10 @@ static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
if (data->max_sclk_level == 0) {
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
- data->max_sclk_level = smum_get_argument(hwmgr) + 1;
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetMaxSclkLevel,
+ &data->max_sclk_level);
+ data->max_sclk_level += 1;
}
return data->max_sclk_level;
@@ -580,7 +582,8 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_uvd_clock_voltage_dependency_table *table =
hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -588,8 +591,7 @@ static int smu8_init_uvd_limit(struct pp_hwmgr *hwmgr)
data->uvd_dpm.soft_min_clk = 0;
data->uvd_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel, &level);
if (level < table->count)
clock = table->entries[level].vclk;
@@ -607,7 +609,8 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_vce_clock_voltage_dependency_table *table =
hwmgr->dyn_state.vce_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -615,8 +618,7 @@ static int smu8_init_vce_limit(struct pp_hwmgr *hwmgr)
data->vce_dpm.soft_min_clk = 0;
data->vce_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel, &level);
if (level < table->count)
clock = table->entries[level].ecclk;
@@ -634,7 +636,8 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
struct smu8_hwmgr *data = hwmgr->backend;
struct phm_acp_clock_voltage_dependency_table *table =
hwmgr->dyn_state.acp_clock_voltage_dependency_table;
- unsigned long clock = 0, level;
+ unsigned long clock = 0;
+ uint32_t level;
if (NULL == table || table->count <= 0)
return -EINVAL;
@@ -642,8 +645,7 @@ static int smu8_init_acp_limit(struct pp_hwmgr *hwmgr)
data->acp_dpm.soft_min_clk = 0;
data->acp_dpm.hard_min_clk = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
- level = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel, &level);
if (level < table->count)
clock = table->entries[level].acpclk;
@@ -665,7 +667,7 @@ static void smu8_init_power_gate_state(struct pp_hwmgr *hwmgr)
#ifdef CONFIG_DRM_AMD_ACP
data->acp_power_gated = false;
#else
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
data->acp_power_gated = true;
#endif
@@ -708,7 +710,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkHardMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.hard_min_clk,
- PPSMC_MSG_SetSclkHardMin));
+ PPSMC_MSG_SetSclkHardMin),
+ NULL);
}
clock = data->sclk_dpm.soft_min_clk;
@@ -731,7 +734,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
}
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -742,7 +746,8 @@ static int smu8_update_sclk_limit(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
}
return 0;
@@ -760,7 +765,8 @@ static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepSclk,
- clks);
+ clks,
+ NULL);
}
return 0;
@@ -773,7 +779,8 @@ static int smu8_set_watermark_threshold(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetWatermarkFrequency,
- data->sclk_dpm.soft_max_clk);
+ data->sclk_dpm.soft_max_clk,
+ NULL);
return 0;
}
@@ -788,13 +795,15 @@ static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable,
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableLowMemoryPstate,
- (lock ? 1 : 0));
+ (lock ? 1 : 0),
+ NULL);
} else {
PP_DBG_LOG("disable Low Memory PState.\n");
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableLowMemoryPstate,
- (lock ? 1 : 0));
+ (lock ? 1 : 0),
+ NULL);
}
}
@@ -814,7 +823,8 @@ static int smu8_disable_nb_dpm(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
if (ret == 0)
data->is_nb_dpm_enabled = false;
}
@@ -835,7 +845,8 @@ static int smu8_enable_nb_dpm(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
if (ret == 0)
data->is_nb_dpm_enabled = true;
}
@@ -953,7 +964,8 @@ static int smu8_start_dpm(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableAllSmuFeatures,
- SCLK_DPM_MASK);
+ SCLK_DPM_MASK,
+ NULL);
}
static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
@@ -967,7 +979,8 @@ static int smu8_stop_dpm(struct pp_hwmgr *hwmgr)
data->dpm_flags &= ~DPMFlags_SCLK_Enabled;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DisableAllSmuFeatures,
- dpm_features);
+ dpm_features,
+ NULL);
}
return ret;
}
@@ -983,13 +996,15 @@ static int smu8_program_bootup_state(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1127,13 +1142,15 @@ static int smu8_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1167,13 +1184,15 @@ static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_max_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
return 0;
}
@@ -1186,13 +1205,15 @@ static int smu8_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetSclkSoftMax,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMax));
+ PPSMC_MSG_SetSclkSoftMax),
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
smu8_get_sclk_level(hwmgr,
data->sclk_dpm.soft_min_clk,
- PPSMC_MSG_SetSclkSoftMin));
+ PPSMC_MSG_SetSclkSoftMin),
+ NULL);
return 0;
}
@@ -1227,7 +1248,7 @@ static int smu8_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
static int smu8_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF, NULL);
return 0;
}
@@ -1237,7 +1258,8 @@ static int smu8_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(
hwmgr,
PPSMC_MSG_UVDPowerON,
- PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
+ PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0,
+ NULL);
}
return 0;
@@ -1259,15 +1281,20 @@ static int smu8_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
PPSMC_MSG_SetEclkHardMin,
smu8_get_eclk_level(hwmgr,
data->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
+ PPSMC_MSG_SetEclkHardMin),
+ NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkHardMin, 0);
+ PPSMC_MSG_SetEclkHardMin,
+ 0,
+ NULL);
/* disable ECLK DPM 0. Otherwise VCE could hang if
* switching SCLK from DPM 0 to 6/7 */
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetEclkSoftMin, 1);
+ PPSMC_MSG_SetEclkSoftMin,
+ 1,
+ NULL);
}
return 0;
}
@@ -1276,7 +1303,8 @@ static int smu8_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerOFF);
+ PPSMC_MSG_VCEPowerOFF,
+ NULL);
return 0;
}
@@ -1284,7 +1312,8 @@ static int smu8_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
{
if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
return smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_VCEPowerON);
+ PPSMC_MSG_VCEPowerON,
+ NULL);
return 0;
}
@@ -1435,7 +1464,8 @@ static void smu8_hw_print_display_cfg(
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDisplaySizePowerParams,
- data);
+ data,
+ NULL);
}
return 0;
@@ -1497,10 +1527,12 @@ static int smu8_force_clock_level(struct pp_hwmgr *hwmgr,
case PP_SCLK:
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMin,
- mask);
+ mask,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSclkSoftMax,
- mask);
+ mask,
+ NULL);
break;
default:
break;
@@ -1753,9 +1785,10 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*((uint32_t *)value) = 0;
return 0;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
+ result = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetAverageGraphicsActivity,
+ &activity_percent);
if (0 == result) {
- activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
activity_percent = activity_percent > 100 ? 100 : activity_percent;
} else {
activity_percent = 50;
@@ -1785,20 +1818,25 @@ static int smu8_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiVirtual,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoVirtual,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrHiPhysical,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramAddrLoPhysical,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramBufferSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -1827,12 +1865,16 @@ static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
data->dpm_flags |= DPMFlags_UVD_Enabled;
dpm_features |= UVD_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features,
+ NULL);
} else {
dpm_features |= UVD_DPM_MASK;
data->dpm_flags &= ~DPMFlags_UVD_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features,
+ NULL);
}
return 0;
}
@@ -1854,7 +1896,8 @@ int smu8_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
PPSMC_MSG_SetUvdHardMin,
smu8_get_uvd_level(hwmgr,
data->uvd_dpm.hard_min_clk,
- PPSMC_MSG_SetUvdHardMin));
+ PPSMC_MSG_SetUvdHardMin),
+ NULL);
smu8_enable_disable_uvd_dpm(hwmgr, true);
} else {
@@ -1878,12 +1921,16 @@ static int smu8_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
data->dpm_flags |= DPMFlags_VCE_Enabled;
dpm_features |= VCE_DPM_MASK;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_EnableAllSmuFeatures,
+ dpm_features,
+ NULL);
} else {
dpm_features |= VCE_DPM_MASK;
data->dpm_flags &= ~DPMFlags_VCE_Enabled;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures, dpm_features);
+ PPSMC_MSG_DisableAllSmuFeatures,
+ dpm_features,
+ NULL);
}
return 0;
@@ -1898,9 +1945,9 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
return;
if (bgate)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerOFF, NULL);
else
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
}
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
index d09690fca452..60b5ca974356 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu_helper.c
@@ -22,6 +22,7 @@
*/
#include <linux/pci.h>
+#include <linux/reboot.h>
#include "hwmgr.h"
#include "pp_debug.h"
@@ -557,7 +558,9 @@ void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
if (req_vddc <= vddc_table->entries[i].vddc) {
req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_VddC_Request, req_volt);
+ PPSMC_MSG_VddC_Request,
+ req_volt,
+ NULL);
return;
}
}
@@ -593,37 +596,43 @@ int phm_irq_process(struct amdgpu_device *adev,
uint32_t src_id = entry->src_id;
if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
- if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH)
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else if (src_id == VISLANDS30_IV_SRCID_GPIO_19)
- pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+ } else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW)
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
+ }
} else if (client_id == SOC15_IH_CLIENTID_THM) {
- if (src_id == 0)
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- else
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
- } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO)
- pr_warn("GPU Critical Temperature Fault detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ if (src_id == 0) {
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
+ } else
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index d168af4a4d78..46bb16c29cf6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -98,7 +98,7 @@ int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (state == BACO_STATE_IN) {
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnterBaco, NULL))
return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index f29f95be1e56..675c7cab7cfc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -484,8 +484,9 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
if (data->registry_data.vr0hot_enabled)
data->smu_features[GNLD_VR0HOT].supported = true;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetSmuVersion,
+ &hwmgr->smu_version);
/* ACG firmware has major version 5 */
if ((hwmgr->smu_version & 0xff000000) == 0x5000000)
data->smu_features[GNLD_ACG].supported = true;
@@ -503,10 +504,8 @@ static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_PCC_LIMIT].supported = true;
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -993,7 +992,10 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to set up led dpm config!",
return -EINVAL);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_NumOfDisplays, 0);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_NumOfDisplays,
+ 0,
+ NULL);
return 0;
}
@@ -2303,16 +2305,15 @@ static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitializeAcg, NULL);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc);
- agc_btc_response = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &agc_btc_response);
if (1 == agc_btc_response) {
if (1 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInClosedLoop, NULL);
else if (2 == data->acg_loop_state)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgInOpenLoop, NULL);
if (0 == vega10_enable_smc_features(hwmgr, true,
data->smu_features[GNLD_ACG].smu_feature_bitmap))
data->smu_features[GNLD_ACG].enabled = true;
@@ -2429,11 +2430,9 @@ static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
struct vega10_hwmgr *data = hwmgr->backend;
AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
serial_number = ((uint64_t)bottom32 << 32) | top32;
@@ -2610,14 +2609,16 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
if (0 != boot_up_values.usVddc) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFloorSocVoltage,
- (boot_up_values.usVddc * 4));
+ (boot_up_values.usVddc * 4),
+ NULL);
data->vbios_boot_state.bsoc_vddc_lock = true;
} else {
data->vbios_boot_state.bsoc_vddc_lock = false;
}
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
}
result = vega10_populate_avfs_parameters(hwmgr);
@@ -2904,7 +2905,8 @@ static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
if (data->vbios_boot_state.bsoc_vddc_lock) {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetFloorSocVoltage, 0);
+ PPSMC_MSG_SetFloorSocVoltage, 0,
+ NULL);
data->vbios_boot_state.bsoc_vddc_lock = false;
}
@@ -2947,7 +2949,8 @@ static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
vega10_enable_disable_PCC_limit_feature(hwmgr, true);
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
+ PPSMC_MSG_ConfigureTelemetry, data->config_telemetry,
+ NULL);
tmp_result = vega10_construct_voltage_tables(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
@@ -3528,7 +3531,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.gfx_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinGfxclkByIndex,
- data->smc_state_table.gfx_boot_level);
+ data->smc_state_table.gfx_boot_level,
+ NULL);
data->dpm_table.gfx_table.dpm_state.soft_min_level =
data->smc_state_table.gfx_boot_level;
@@ -3543,11 +3547,13 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- socclk_idx);
+ socclk_idx,
+ NULL);
} else {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinUclkByIndex,
- data->smc_state_table.mem_boot_level);
+ data->smc_state_table.mem_boot_level,
+ NULL);
}
data->dpm_table.mem_table.dpm_state.soft_min_level =
data->smc_state_table.mem_boot_level;
@@ -3562,7 +3568,8 @@ static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_min_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinSocclkByIndex,
- data->smc_state_table.soc_boot_level);
+ data->smc_state_table.soc_boot_level,
+ NULL);
data->dpm_table.soc_table.dpm_state.soft_min_level =
data->smc_state_table.soc_boot_level;
}
@@ -3582,7 +3589,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.gfx_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxGfxclkByIndex,
- data->smc_state_table.gfx_max_level);
+ data->smc_state_table.gfx_max_level,
+ NULL);
data->dpm_table.gfx_table.dpm_state.soft_max_level =
data->smc_state_table.gfx_max_level;
}
@@ -3593,7 +3601,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.mem_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxUclkByIndex,
- data->smc_state_table.mem_max_level);
+ data->smc_state_table.mem_max_level,
+ NULL);
data->dpm_table.mem_table.dpm_state.soft_max_level =
data->smc_state_table.mem_max_level;
}
@@ -3607,7 +3616,8 @@ static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
data->dpm_table.soc_table.dpm_state.soft_max_level) {
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByIndex,
- data->smc_state_table.soc_max_level);
+ data->smc_state_table.soc_max_level,
+ NULL);
data->dpm_table.soc_table.dpm_state.soft_max_level =
data->smc_state_table.soc_max_level;
}
@@ -3694,7 +3704,8 @@ static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
/* This message will also enable SmcToHost Interrupt */
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetLowGfxclkInterruptThreshold,
- (uint32_t)low_sclk_interrupt_threshold);
+ (uint32_t)low_sclk_interrupt_threshold,
+ NULL);
}
return 0;
@@ -3801,8 +3812,7 @@ static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
if (!query)
return -EINVAL;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr);
- value = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrPkgPwr, &value);
/* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
*query = value << 8;
@@ -3822,13 +3832,11 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
switch (idx) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency);
- sclk_mhz = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGfxclkActualFrequency, &sclk_mhz);
*((uint32_t *)value) = sclk_mhz * 100;
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- mclk_idx = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &mclk_idx);
if (mclk_idx < dpm_table->mem_table.count) {
*((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
*size = 4;
@@ -3837,8 +3845,8 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
}
break;
case AMDGPU_PP_SENSOR_GPU_LOAD:
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0);
- activity_percent = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetAverageGfxActivity, 0,
+ &activity_percent);
*((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
*size = 4;
break;
@@ -3847,14 +3855,14 @@ static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
*size = 4;
break;
case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot);
- *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHotspot, (uint32_t *)value);
+ *((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
case AMDGPU_PP_SENSOR_MEM_TEMP:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM);
- *((uint32_t *)value) = smum_get_argument(hwmgr) *
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetTemperatureHBM, (uint32_t *)value);
+ *((uint32_t *)value) = *((uint32_t *)value) *
PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
*size = 4;
break;
@@ -3893,7 +3901,8 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
+ has_disp ? 1 : 0,
+ NULL);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3928,7 +3937,8 @@ int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_freq << 16) | clk_select;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_RequestDisplayClockByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
return result;
@@ -3990,7 +4000,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR / 100);
+ min_clocks.dcefClockInSR / 100,
+ NULL);
} else {
pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
}
@@ -4000,7 +4011,8 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
if (min_clocks.memoryClock != 0) {
idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx,
+ NULL);
data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
}
@@ -4541,8 +4553,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.sclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentGfxclkIndex, &now);
if (hwmgr->pp_one_vf &&
(hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK))
@@ -4558,8 +4569,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.mclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
for (i = 0; i < mclk_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4570,8 +4580,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
if (data->registry_data.socclk_dpm_key_disabled)
break;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
for (i = 0; i < soc_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4583,8 +4592,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK);
- now = smum_get_argument(hwmgr);
+ PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
for (i = 0; i < dcef_table->count; i++)
size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -4593,8 +4601,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
"*" : "");
break;
case PP_PCIE:
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex);
- now = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentLinkIndex, &now);
for (i = 0; i < pcie_table->count; i++)
size += sprintf(buf + size, "%d: %s %s\n", i,
@@ -4658,7 +4665,8 @@ static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
if (data->water_marks_bitmap & WaterMarksLoaded) {
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+ NULL);
}
return result;
@@ -4924,21 +4932,26 @@ static int vega10_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -5040,12 +5053,14 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetCustomGfxDpmParameters,
busy_set_point | FPS<<8 |
- use_rlc_busy << 16 | min_active_level<<24);
+ use_rlc_busy << 16 | min_active_level<<24,
+ NULL);
}
out:
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << power_profile_mode);
+ 1 << power_profile_mode,
+ NULL);
hwmgr->power_profile_mode = power_profile_mode;
return 0;
@@ -5302,7 +5317,7 @@ static int vega10_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index 0a677d4bc87b..9757d47dd6b8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -651,18 +651,6 @@ static const struct vega10_didt_config_reg PSMSEEDCStallDelayConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMSEEDCThresholdConfig_Vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- /* SQ EDC THRESHOLD */
- { ixDIDT_SQ_EDC_THRESHOLD, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD_MASK, DIDT_SQ_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
static const struct vega10_didt_config_reg PSMSEEDCCtrlResetConfig_Vega10[] =
{
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -707,17 +695,6 @@ static const struct vega10_didt_config_reg PSMSEEDCCtrlConfig_Vega10[] =
{ 0xFFFFFFFF } /* End of list */
};
-static const struct vega10_didt_config_reg PSMGCEDCThresholdConfig_vega10[] =
-{
-/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- * Offset Mask Shift Value
- * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
- */
- { mmGC_EDC_THRESHOLD, GC_EDC_THRESHOLD__EDC_THRESHOLD_MASK, GC_EDC_THRESHOLD__EDC_THRESHOLD__SHIFT, 0x0000000 },
-
- { 0xFFFFFFFF } /* End of list */
-};
-
static const struct vega10_didt_config_reg PSMGCEDCDroopCtrlConfig_vega10[] =
{
/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
@@ -925,7 +902,8 @@ static void vega10_didt_set_mask(struct pp_hwmgr *hwmgr, const bool enable)
/* For Vega10, SMC does not support any mask yet. */
if (enable)
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ConfigureGfxDidt, didt_block_info,
+ NULL);
}
@@ -1327,7 +1305,8 @@ int vega10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->registry_data.enable_pkg_pwr_tracking_feature)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
+ PPSMC_MSG_SetPptLimit, n,
+ NULL);
return 0;
}
@@ -1393,7 +1372,8 @@ static void vega10_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
int vega10_power_control_set_level(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
index ba8763daa380..7783c7fd7ccb 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
@@ -31,8 +31,7 @@
static int vega10_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm);
- *current_rpm = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentRpm, current_rpm);
return 0;
}
@@ -520,7 +519,8 @@ int vega10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
table->FanPwmMin = hwmgr->thermal_controller.
advanceFanControlParameters.usPWMMin * 255 / 100;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
index 9d8ca94a8f0c..bc53cce4f32d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
@@ -96,7 +96,7 @@ int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
if (state == BACO_STATE_IN) {
if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
ARRAY_SIZE(pre_baco_tbl))) {
- if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index aca61d1ff3c2..f4d1692cccf3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -357,10 +357,8 @@ static void vega12_init_dpm_defaults(struct pp_hwmgr *hwmgr)
}
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -483,16 +481,12 @@ static int vega12_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- *num_of_levels = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*num_of_levels > 0,
- "[GetNumOfDpmLevel] number of clk levels is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -504,12 +498,11 @@ static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
*Lower 16 bits specify the level
*/
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index)) == 0,
+ PPSMC_MSG_GetDpmFreqByIndex, (clkID << 16 | index),
+ clock) == 0,
"[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
return -EINVAL);
- *clock = smum_get_argument(hwmgr);
-
return 0;
}
@@ -749,7 +742,8 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
}
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -767,11 +761,10 @@ static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
uint32_t result;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc, &result) == 0,
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
return -EINVAL);
- result = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(result == 1,
"Failed to run ACG BTC!", return -EINVAL);
@@ -792,12 +785,14 @@ static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
(allowed_features_low |= ((data->smu_features[i].smu_feature_bitmap >> SMU_FEATURES_LOW_SHIFT) & 0xFFFFFFFF));
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high,
+ NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
return -1);
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low,
+ NULL) == 0,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return -1);
@@ -828,7 +823,7 @@ static int vega12_enable_all_smu_features(struct pp_hwmgr *hwmgr)
bool enabled;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAllSmuFeatures, NULL) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return -1);
@@ -854,7 +849,7 @@ static int vega12_disable_all_smu_features(struct pp_hwmgr *hwmgr)
bool enabled;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures) == 0,
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableAllSmuFeatures, NULL) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return -1);
@@ -879,7 +874,8 @@ static int vega12_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
static int vega12_power_control_set_level(struct pp_hwmgr *hwmgr)
@@ -902,24 +898,24 @@ static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr *hwmgr,
{
/* AC Max */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMaxDpmFreq, (clkid << 16),
+ &(clock->ACMax)) == 0,
"[GetClockRanges] Failed to get max ac clock from SMC!",
return -EINVAL);
- clock->ACMax = smum_get_argument(hwmgr);
/* AC Min */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetMinDpmFreq, (clkid << 16),
+ &(clock->ACMin)) == 0,
"[GetClockRanges] Failed to get min ac clock from SMC!",
return -EINVAL);
- clock->ACMin = smum_get_argument(hwmgr);
/* DC Max */
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDcModeMaxDpmFreq, (clkid << 16),
+ &(clock->DCMax)) == 0,
"[GetClockRanges] Failed to get max dc clock from SMC!",
return -EINVAL);
- clock->DCMax = smum_get_argument(hwmgr);
return 0;
}
@@ -944,7 +940,7 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int tmp_result, result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega12_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(result == 0,
@@ -1043,7 +1039,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
@@ -1052,14 +1049,16 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min memclk !",
return ret);
min_freq = data->dpm_table.mem_table.dpm_state.hard_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min memclk !",
return ret);
}
@@ -1069,7 +1068,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min vclk!",
return ret);
@@ -1077,7 +1077,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min dclk!",
return ret);
}
@@ -1087,7 +1088,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min eclk!",
return ret);
}
@@ -1097,7 +1099,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min socclk!",
return ret);
}
@@ -1107,7 +1110,8 @@ static int vega12_upload_dpm_min_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
@@ -1127,7 +1131,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
@@ -1137,7 +1142,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max memclk!",
return ret);
}
@@ -1147,14 +1153,16 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max dclk!",
return ret);
}
@@ -1164,7 +1172,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max eclk!",
return ret);
}
@@ -1174,7 +1183,8 @@ static int vega12_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max socclk!",
return ret);
}
@@ -1287,10 +1297,10 @@ static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr *hwmgr, uint32_t *gfx
*gfx_freq = 0;
PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_GFXCLK << 16),
+ &gfx_clk) == 0,
"[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
return -EINVAL);
- gfx_clk = smum_get_argument(hwmgr);
*gfx_freq = gfx_clk * 100;
@@ -1304,10 +1314,10 @@ static int vega12_get_current_mclk_freq(struct pp_hwmgr *hwmgr, uint32_t *mclk_f
*mclk_freq = 0;
PP_ASSERT_WITH_CODE(
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16)) == 0,
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetDpmClockFreq, (PPCLK_UCLK << 16),
+ &mem_clk) == 0,
"[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
return -EINVAL);
- mem_clk = smum_get_argument(hwmgr);
*mclk_freq = mem_clk * 100;
@@ -1420,7 +1430,8 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
+ has_disp ? 1 : 0,
+ NULL);
return 0;
}
@@ -1459,7 +1470,8 @@ int vega12_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
}
@@ -1493,7 +1505,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
PP_ASSERT_WITH_CODE(
!smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR /100),
+ min_clocks.dcefClockInSR /100,
+ NULL),
"Attempt to set divider for DCEFCLK Failed!",
return -1);
} else {
@@ -2124,10 +2137,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
case PP_SOCCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_SOCCLK << 16),
+ &now) == 0,
"Attempt to get Current SOCCLK Frequency Failed!",
return -EINVAL);
- now = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(
vega12_get_socclocks(hwmgr, &clocks) == 0,
@@ -2142,10 +2155,10 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
case PP_DCEFCLK:
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16)) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (PPCLK_DCEFCLK << 16),
+ &now) == 0,
"Attempt to get Current DCEFCLK Frequency Failed!",
return -EINVAL);
- now = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(
vega12_get_dcefclocks(hwmgr, &clocks) == 0,
@@ -2343,7 +2356,8 @@ static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
@@ -2357,7 +2371,8 @@ static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0,
+ NULL);
ret = vega12_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
@@ -2383,7 +2398,8 @@ static int vega12_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_DCEFCLK].supported &&
data->smu_features[GNLD_DPM_SOCCLK].supported)
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display);
+ PPSMC_MSG_NumOfDisplays, hwmgr->display_config->num_display,
+ NULL);
return result;
}
@@ -2555,21 +2571,26 @@ static int vega12_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -2605,7 +2626,7 @@ static int vega12_enable_gfx_off(struct pp_hwmgr *hwmgr)
int ret = 0;
if (data->gfxoff_controlled_by_driver)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_AllowGfxOff, NULL);
return ret;
}
@@ -2617,7 +2638,7 @@ static int vega12_disable_gfx_off(struct pp_hwmgr *hwmgr)
int ret = 0;
if (data->gfxoff_controlled_by_driver)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisallowGfxOff, NULL);
return ret;
}
@@ -2654,7 +2675,7 @@ static int vega12_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
index 904eb2c9155b..c85806a6f62e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
@@ -32,10 +32,10 @@
static int vega12_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
{
PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm),
+ PPSMC_MSG_GetCurrentRpm,
+ current_rpm),
"Attempt to get current RPM from SMC Failed!",
return -EINVAL);
- *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -259,7 +259,8 @@ int vega12_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 9b5e72bdceca..2a28c9df15a0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -91,16 +91,16 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
if(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnterBaco, 0))
+ PPSMC_MSG_EnterBaco, 0, NULL))
return -EINVAL;
} else {
if(smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnterBaco, 1))
+ PPSMC_MSG_EnterBaco, 1, NULL))
return -EINVAL;
}
} else if (state == BACO_STATE_OUT) {
- if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco))
+ if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco, NULL))
return -EINVAL;
if (!soc15_baco_program_registers(hwmgr, clean_baco_tbl,
ARRAY_SIZE(clean_baco_tbl)))
@@ -118,5 +118,5 @@ int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
if (ret)
return ret;
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI, NULL);
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 08b6ba39a6d7..9ff470f1b826 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -92,8 +92,7 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
*/
data->registry_data.disallowed_features = 0xE0041C00;
/* ECC feature should be disabled on old SMUs */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
if (hwmgr->smu_version < 0x282100)
data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
@@ -400,10 +399,8 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
}
/* Get the SN to turn into a Unique ID */
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32);
- top32 = smum_get_argument(hwmgr);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32);
- bottom32 = smum_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumTop32, &top32);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ReadSerialNumBottom32, &bottom32);
adev->unique_id = ((uint64_t)bottom32 << 32) | top32;
}
@@ -527,16 +524,12 @@ static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ num_of_levels);
PP_ASSERT_WITH_CODE(!ret,
"[GetNumOfDpmLevel] failed to get dpm levels!",
return ret);
- *num_of_levels = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*num_of_levels > 0,
- "[GetNumOfDpmLevel] number of clk levels is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -547,16 +540,12 @@ static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | index));
+ (clk_id << 16 | index),
+ clk);
PP_ASSERT_WITH_CODE(!ret,
"[GetDpmFreqByIndex] failed to get dpm freq by index!",
return ret);
- *clk = smum_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(*clk,
- "[GetDpmFreqByIndex] clk value is invalid!",
- return -EINVAL);
-
return ret;
}
@@ -813,7 +802,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
- (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
+ (uint32_t)(data->vbios_boot_state.dcef_clock / 100),
+ NULL);
memcpy(pp_table, pptable_information->smc_pptable, sizeof(PPTable_t));
@@ -868,7 +858,8 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
*/
smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverridePcieParameters, smu_pcie_arg);
+ PPSMC_MSG_OverridePcieParameters, smu_pcie_arg,
+ NULL);
PP_ASSERT_WITH_CODE(!ret,
"[OverridePcieParameters] Attempt to override pcie params failed!",
return ret);
@@ -899,13 +890,13 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
& 0xFFFFFFFF));
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high);
+ PPSMC_MSG_SetAllowedFeaturesMaskHigh, allowed_features_high, NULL);
PP_ASSERT_WITH_CODE(!ret,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
return ret);
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low);
+ PPSMC_MSG_SetAllowedFeaturesMaskLow, allowed_features_low, NULL);
PP_ASSERT_WITH_CODE(!ret,
"[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
return ret);
@@ -915,12 +906,12 @@ static int vega20_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
static int vega20_run_btc(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunBtc, NULL);
}
static int vega20_run_btc_afll(struct pp_hwmgr *hwmgr)
{
- return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc);
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAfllBtc, NULL);
}
static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -933,7 +924,8 @@ static int vega20_enable_all_smu_features(struct pp_hwmgr *hwmgr)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_EnableAllSmuFeatures)) == 0,
+ PPSMC_MSG_EnableAllSmuFeatures,
+ NULL)) == 0,
"[EnableAllSMUFeatures] Failed to enable all smu features!",
return ret);
@@ -966,7 +958,8 @@ static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- 1);
+ 1,
+ NULL);
return 0;
}
@@ -978,7 +971,8 @@ static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFclkGfxClkRatio,
- data->registry_data.fclk_gfxclk_ratio);
+ data->registry_data.fclk_gfxclk_ratio,
+ NULL);
}
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
@@ -991,7 +985,8 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_DisableAllSmuFeatures)) == 0,
+ PPSMC_MSG_DisableAllSmuFeatures,
+ NULL)) == 0,
"[DisableAllSMUFeatures] Failed to disable all smu features!",
return ret);
@@ -1199,12 +1194,12 @@ static int vega20_od8_get_gfx_clock_base_voltage(
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
PP_ASSERT_WITH_CODE(!ret,
"[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
return ret);
- *voltage = smum_get_argument(hwmgr);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1560,19 +1555,19 @@ static int vega20_get_max_sustainable_clock(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetDcModeMaxDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
/* if DC limit is zero, return AC limit */
if (*clock == 0) {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMaxDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetMaxSustainableClock] failed to get max AC clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -1641,7 +1636,8 @@ static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr *hwmgr)
int result;
result = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SetMGpuFanBoostLimitRpm);
+ PPSMC_MSG_SetMGpuFanBoostLimitRpm,
+ NULL);
PP_ASSERT_WITH_CODE(!result,
"[EnableMgpuFan] Failed to enable mgpu fan boost!",
return result);
@@ -1669,7 +1665,7 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
int result = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
result = vega20_set_allowed_featuresmask(hwmgr);
PP_ASSERT_WITH_CODE(!result,
@@ -1740,12 +1736,12 @@ static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
return result);
result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetPptLimit,
- POWER_SOURCE_AC << 16);
+ POWER_SOURCE_AC << 16, &hwmgr->default_power_limit);
PP_ASSERT_WITH_CODE(!result,
"[GetPptLimit] get default PPT limit failed!",
return result);
hwmgr->power_limit =
- hwmgr->default_power_limit = smum_get_argument(hwmgr);
+ hwmgr->default_power_limit;
return 0;
}
@@ -1806,7 +1802,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
min_freq = data->dpm_table.gfx_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_GFXCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min gfxclk !",
return ret);
}
@@ -1816,7 +1813,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
min_freq = data->dpm_table.mem_table.dpm_state.soft_min_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_UCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min memclk !",
return ret);
}
@@ -1827,7 +1825,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_VCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min vclk!",
return ret);
@@ -1835,7 +1834,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_DCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min dclk!",
return ret);
}
@@ -1846,7 +1846,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_ECLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min eclk!",
return ret);
}
@@ -1857,7 +1858,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_SOCCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min socclk!",
return ret);
}
@@ -1868,7 +1870,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_FCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set soft min fclk!",
return ret);
}
@@ -1879,7 +1882,8 @@ static int vega20_upload_dpm_min_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff))),
+ (PPCLK_DCEFCLK << 16) | (min_freq & 0xffff),
+ NULL)),
"Failed to set hard min dcefclk!",
return ret);
}
@@ -1900,7 +1904,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_GFXCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_GFXCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max gfxclk!",
return ret);
}
@@ -1911,7 +1916,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_UCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_UCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max memclk!",
return ret);
}
@@ -1922,14 +1928,16 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_VCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_VCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max vclk!",
return ret);
max_freq = data->dpm_table.dclk_table.dpm_state.soft_max_level;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_DCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_DCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max dclk!",
return ret);
}
@@ -1940,7 +1948,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_ECLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_ECLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max eclk!",
return ret);
}
@@ -1951,7 +1960,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_SOCCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_SOCCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max socclk!",
return ret);
}
@@ -1962,7 +1972,8 @@ static int vega20_upload_dpm_max_level(struct pp_hwmgr *hwmgr, uint32_t feature_
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetSoftMaxByFreq,
- (PPCLK_FCLK << 16) | (max_freq & 0xffff))),
+ (PPCLK_FCLK << 16) | (max_freq & 0xffff),
+ NULL)),
"Failed to set soft max fclk!",
return ret);
}
@@ -2006,17 +2017,17 @@ static int vega20_get_clock_ranges(struct pp_hwmgr *hwmgr,
if (max) {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16))) == 0,
+ PPSMC_MSG_GetMaxDpmFreq, (clock_select << 16),
+ clock)) == 0,
"[GetClockRanges] Failed to get max clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
} else {
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_GetMinDpmFreq,
- (clock_select << 16))) == 0,
+ (clock_select << 16),
+ clock)) == 0,
"[GetClockRanges] Failed to get min clock from SMC!",
return ret);
- *clock = smum_get_argument(hwmgr);
}
return 0;
@@ -2122,10 +2133,10 @@ static int vega20_get_current_clk_freq(struct pp_hwmgr *hwmgr,
*clk_freq = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_GetDpmClockFreq, (clk_id << 16))) == 0,
+ PPSMC_MSG_GetDpmClockFreq, (clk_id << 16),
+ clk_freq)) == 0,
"[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
return ret);
- *clk_freq = smum_get_argument(hwmgr);
*clk_freq = *clk_freq * 100;
@@ -2276,7 +2287,8 @@ int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
clk_request = (clk_select << 16) | clk_freq;
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- clk_request);
+ clk_request,
+ NULL);
}
}
@@ -2312,7 +2324,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
if (data->smu_features[GNLD_DS_DCEFCLK].supported)
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(
hwmgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcefClockInSR / 100)) == 0,
+ min_clocks.dcefClockInSR / 100,
+ NULL)) == 0,
"Attempt to set divider for DCEFCLK Failed!",
return ret);
} else {
@@ -2324,7 +2337,8 @@ static int vega20_notify_smc_display_config_after_ps_adjustment(
dpm_table->dpm_state.hard_min_level = min_clocks.memoryClock / 100;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetHardMinFreq] Set hard min uclk failed!",
return ret);
}
@@ -2656,7 +2670,8 @@ static int vega20_force_clock_level(struct pp_hwmgr *hwmgr,
return -EINVAL;
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ PPSMC_MSG_SetMinLinkDpmByIndex, soft_min_level,
+ NULL);
PP_ASSERT_WITH_CODE(!ret,
"Failed to set min link dpm level!",
return ret);
@@ -3140,7 +3155,7 @@ static int vega20_set_mp1_state(struct pp_hwmgr *hwmgr,
return 0;
}
- PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr, msg, NULL)) == 0,
"[PrepareMp1] Failed!",
return ret);
@@ -3495,7 +3510,8 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level)),
+ (PPCLK_UCLK << 16 ) | dpm_table->dpm_state.hard_min_level,
+ NULL)),
"[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
return ret);
}
@@ -3520,7 +3536,8 @@ static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMinByFreq,
- (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
+ (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level,
+ NULL)),
"[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
return ret);
}
@@ -3534,7 +3551,7 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
int ret = 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_NumOfDisplays, 0);
+ PPSMC_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
&data->dpm_table.mem_table);
@@ -3565,7 +3582,8 @@ static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
data->smu_features[GNLD_DPM_SOCCLK].supported) {
result = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_NumOfDisplays,
- hwmgr->display_config->num_display);
+ hwmgr->display_config->num_display,
+ NULL);
}
return result;
@@ -4082,7 +4100,8 @@ out:
workload_type =
conv_power_profile_to_pplib_workload(power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
hwmgr->power_profile_mode = power_profile_mode;
@@ -4098,21 +4117,26 @@ static int vega20_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrHigh,
- virtual_addr_hi);
+ virtual_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSystemVirtualDramAddrLow,
- virtual_addr_low);
+ virtual_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrHigh,
- mc_addr_hi);
+ mc_addr_hi,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramAddrLow,
- mc_addr_low);
+ mc_addr_low,
+ NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DramLogSetDramSize,
- size);
+ size,
+ NULL);
return 0;
}
@@ -4153,7 +4177,8 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire)
(acquire ?
PPSMC_MSG_RequestI2CBus :
PPSMC_MSG_ReleaseI2CBus),
- 0);
+ 0,
+ NULL);
PP_ASSERT_WITH_CODE(!res, "[SmuI2CAccessBus] Failed to access bus!", return res);
return res;
@@ -4170,7 +4195,8 @@ static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state);
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state,
+ NULL);
if (ret)
pr_err("SetDfCstate failed!\n");
@@ -4184,7 +4210,8 @@ static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr,
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
if (ret)
pr_err("SetXgmiPstate failed!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
index a0bfb65cc5d6..d7cc3d2d9e17 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_powertune.c
@@ -36,7 +36,8 @@ int vega20_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
if (data->smu_features[GNLD_PPT].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetPptLimit, n);
+ PPSMC_MSG_SetPptLimit, n,
+ NULL);
return 0;
}
@@ -51,7 +52,8 @@ static int vega20_set_overdrive_target_percentage(struct pp_hwmgr *hwmgr,
uint32_t adjust_percent)
{
return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_OverDriveSetPercentage, adjust_percent);
+ PPSMC_MSG_OverDriveSetPercentage, adjust_percent,
+ NULL);
}
int vega20_power_control_set_level(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
index ede54e87e287..7add2f60f49c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
@@ -106,10 +106,10 @@ static int vega20_get_current_rpm(struct pp_hwmgr *hwmgr, uint32_t *current_rpm)
int ret = 0;
PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetCurrentRpm)) == 0,
+ PPSMC_MSG_GetCurrentRpm,
+ current_rpm)) == 0,
"Attempt to get current RPM from SMC Failed!",
return ret);
- *current_rpm = smum_get_argument(hwmgr);
return 0;
}
@@ -329,7 +329,8 @@ static int vega20_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanTemperatureTarget,
- (uint32_t)table->FanTargetTemperature);
+ (uint32_t)table->FanTargetTemperature,
+ NULL);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index ae2c318dd6fa..4d1c2a44a8b6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -405,7 +405,9 @@ struct smu_context
bool pm_enabled;
bool is_apu;
- uint32_t smc_if_version;
+ uint32_t smc_driver_if_version;
+ uint32_t smc_fw_if_version;
+ uint32_t smc_fw_version;
bool uploading_custom_pp_table;
bool dc_controlled_by_gpio;
@@ -489,6 +491,7 @@ struct pptable_funcs {
int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t dpm_level, uint32_t *freq);
int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state);
+ int (*allow_xgmi_power_down)(struct smu_context *smu, bool en);
int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap);
int (*i2c_eeprom_init)(struct i2c_adapter *control);
void (*i2c_eeprom_fini)(struct i2c_adapter *control);
@@ -580,11 +583,6 @@ int smu_check_fw_status(struct smu_context *smu);
int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
-#define smu_i2c_eeprom_init(smu, control) \
- ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL)
-#define smu_i2c_eeprom_fini(smu, control) \
- ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL)
-
int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed);
int smu_get_power_limit(struct smu_context *smu,
@@ -734,6 +732,7 @@ int smu_set_mp1_state(struct smu_context *smu,
enum pp_mp1_state mp1_state);
int smu_set_df_cstate(struct smu_context *smu,
enum pp_df_cstate state);
+int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
struct pp_smu_nv_clock_table *max_clocks);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index f736d773f9d6..e07478b6ac04 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -114,7 +114,8 @@
#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
#define PPSMC_MSG_DFCstateControl 0x3B
-#define PPSMC_Message_Count 0x3C
+#define PPSMC_MSG_GmiPwrDnControl 0x3D
+#define PPSMC_Message_Count 0x3E
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 2ffb666b97e6..15ed6cbdf366 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -743,6 +743,7 @@ struct pp_hwmgr {
bool pm_en;
bool pp_one_vf;
struct mutex smu_lock;
+ struct mutex msg_lock;
uint32_t pp_table_version;
void *device;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
index ce5b5011c122..8b82059d97e7 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
@@ -82,8 +82,8 @@
// Other
#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24
#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25
+#define FEATURE_PER_PART_VMIN_BIT 26
-#define FEATURE_SPARE_26_BIT 26
#define FEATURE_SPARE_27_BIT 27
#define FEATURE_SPARE_28_BIT 28
#define FEATURE_SPARE_29_BIT 29
@@ -154,6 +154,7 @@
#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT )
#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT )
+#define FEATURE_PER_PART_VMIN_MASK (1 << FEATURE_PER_PART_VMIN_BIT )
//FIXME need updating
@@ -628,8 +629,14 @@ typedef struct {
uint16_t BasePerformanceFrequencyCap; //In Mhz
uint16_t MaxPerformanceFrequencyCap; //In Mhz
+ // Per-Part Vmin
+ uint16_t VDDGFX_VminLow; // mv Q2
+ uint16_t VDDGFX_TVminLow; //Celcius
+ uint16_t VDDGFX_VminLow_HiTemp; // mv Q2
+ uint16_t VDDGFX_VminLow_LoTemp; // mv Q2
+
// SECTION: Reserved
- uint32_t Reserved[9];
+ uint32_t Reserved[7];
// SECTION: BOARD PARAMETERS
@@ -869,6 +876,10 @@ typedef struct {
uint8_t Mem_DownHystLimit;
uint16_t Mem_Fps;
+ uint32_t BusyThreshold; // Q16
+ uint32_t BusyHyst;
+ uint32_t IdleHyst;
+
uint32_t MmHubPadding[8]; // SMU internal use
} DpmActivityMonitorCoeffInt_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
index 2f85a34c0591..e9315eb5b48e 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu12_driver_if.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU12_DRIVER_IF_VERSION 11
+#define SMU12_DRIVER_IF_VERSION 14
typedef struct {
int32_t value;
@@ -154,15 +154,19 @@ typedef enum {
} CLOCK_IDs_e;
// Throttler Status Bitmask
-#define THROTTLER_STATUS_BIT_SPL 0
-#define THROTTLER_STATUS_BIT_FPPT 1
-#define THROTTLER_STATUS_BIT_SPPT 2
-#define THROTTLER_STATUS_BIT_SPPT_APU 3
-#define THROTTLER_STATUS_BIT_THM_CORE 4
-#define THROTTLER_STATUS_BIT_THM_GFX 5
-#define THROTTLER_STATUS_BIT_THM_SOC 6
-#define THROTTLER_STATUS_BIT_TDC_VDD 7
-#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_SPL 0
+#define THROTTLER_STATUS_BIT_FPPT 1
+#define THROTTLER_STATUS_BIT_SPPT 2
+#define THROTTLER_STATUS_BIT_SPPT_APU 3
+#define THROTTLER_STATUS_BIT_THM_CORE 4
+#define THROTTLER_STATUS_BIT_THM_GFX 5
+#define THROTTLER_STATUS_BIT_THM_SOC 6
+#define THROTTLER_STATUS_BIT_TDC_VDD 7
+#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9
+#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10
+#define THROTTLER_STATUS_BIT_EDC_CPU 11
+#define THROTTLER_STATUS_BIT_EDC_GFX 12
typedef struct {
uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz]
@@ -180,7 +184,7 @@ typedef struct {
uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
uint16_t FanPwm; //[milli]
- uint16_t CurrentSocketPower; //[mW]
+ uint16_t CurrentSocketPower; //[W]
uint16_t CoreFrequency[8]; //[MHz]
uint16_t CorePower[8]; //[mW]
@@ -193,10 +197,16 @@ typedef struct {
uint16_t ThrottlerStatus;
uint16_t spare;
- uint16_t StapmOriginalLimit; //[mW]
- uint16_t StapmCurrentLimit; //[mW]
- uint16_t ApuPower; //[mW]
- uint16_t dGpuPower; //[mW]
+ uint16_t StapmOriginalLimit; //[W]
+ uint16_t StapmCurrentLimit; //[W]
+ uint16_t ApuPower; //[W]
+ uint16_t dGpuPower; //[W]
+
+ uint16_t VddTdcValue; //[mA]
+ uint16_t SocTdcValue; //[mA]
+ uint16_t VddEdcValue; //[mA]
+ uint16_t SocEdcValue; //[mA]
+ uint16_t reserve[2];
} SmuMetrics_t;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
index a5b4df146713..ee7dac4693d4 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h
@@ -170,6 +170,7 @@
__SMU_DUMMY_MAP(SetSoftMinJpeg), \
__SMU_DUMMY_MAP(SetHardMinFclkByFreq), \
__SMU_DUMMY_MAP(DFCstateControl), \
+ __SMU_DUMMY_MAP(GmiPwrDnControl), \
__SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \
__SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index 674e426ed59b..6b3b451a8018 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -27,8 +27,8 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
-#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_ARCT 0x14
+#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x33
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
@@ -37,7 +37,6 @@
#define MP0_SRAM 0x03900000
#define MP1_Public 0x03b00000
#define MP1_SRAM 0x03c00004
-#define MP1_SMC_SIZE 0x40000
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index c5288831aa15..ad100b533d04 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -81,16 +81,15 @@ enum SMU10_TABLE_ID {
SMU10_CLOCKTABLE,
};
-extern uint32_t smum_get_argument(struct pp_hwmgr *hwmgr);
-
extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table);
extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr);
-extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
+extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp);
extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter);
+ uint16_t msg, uint32_t parameter,
+ uint32_t *resp);
extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 15030284b444..0c9be864d072 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -423,6 +423,7 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *smc_pptable = table_context->driver_pptable;
struct atom_smc_dpm_info_v4_5 *smc_dpm_table;
+ struct atom_smc_dpm_info_v4_7 *smc_dpm_table_v4_7;
int index, ret;
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -433,77 +434,33 @@ static int navi10_append_powerplay_table(struct smu_context *smu)
if (ret)
return ret;
- memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
- sizeof(I2cControllerConfig_t) * NUM_I2C_CONTROLLERS);
-
- /* SVI2 Board Parameters */
- smc_pptable->MaxVoltageStepGfx = smc_dpm_table->MaxVoltageStepGfx;
- smc_pptable->MaxVoltageStepSoc = smc_dpm_table->MaxVoltageStepSoc;
- smc_pptable->VddGfxVrMapping = smc_dpm_table->VddGfxVrMapping;
- smc_pptable->VddSocVrMapping = smc_dpm_table->VddSocVrMapping;
- smc_pptable->VddMem0VrMapping = smc_dpm_table->VddMem0VrMapping;
- smc_pptable->VddMem1VrMapping = smc_dpm_table->VddMem1VrMapping;
- smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->GfxUlvPhaseSheddingMask;
- smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->SocUlvPhaseSheddingMask;
- smc_pptable->ExternalSensorPresent = smc_dpm_table->ExternalSensorPresent;
- smc_pptable->Padding8_V = smc_dpm_table->Padding8_V;
-
- /* Telemetry Settings */
- smc_pptable->GfxMaxCurrent = smc_dpm_table->GfxMaxCurrent;
- smc_pptable->GfxOffset = smc_dpm_table->GfxOffset;
- smc_pptable->Padding_TelemetryGfx = smc_dpm_table->Padding_TelemetryGfx;
- smc_pptable->SocMaxCurrent = smc_dpm_table->SocMaxCurrent;
- smc_pptable->SocOffset = smc_dpm_table->SocOffset;
- smc_pptable->Padding_TelemetrySoc = smc_dpm_table->Padding_TelemetrySoc;
- smc_pptable->Mem0MaxCurrent = smc_dpm_table->Mem0MaxCurrent;
- smc_pptable->Mem0Offset = smc_dpm_table->Mem0Offset;
- smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->Padding_TelemetryMem0;
- smc_pptable->Mem1MaxCurrent = smc_dpm_table->Mem1MaxCurrent;
- smc_pptable->Mem1Offset = smc_dpm_table->Mem1Offset;
- smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->Padding_TelemetryMem1;
-
- /* GPIO Settings */
- smc_pptable->AcDcGpio = smc_dpm_table->AcDcGpio;
- smc_pptable->AcDcPolarity = smc_dpm_table->AcDcPolarity;
- smc_pptable->VR0HotGpio = smc_dpm_table->VR0HotGpio;
- smc_pptable->VR0HotPolarity = smc_dpm_table->VR0HotPolarity;
- smc_pptable->VR1HotGpio = smc_dpm_table->VR1HotGpio;
- smc_pptable->VR1HotPolarity = smc_dpm_table->VR1HotPolarity;
- smc_pptable->GthrGpio = smc_dpm_table->GthrGpio;
- smc_pptable->GthrPolarity = smc_dpm_table->GthrPolarity;
-
- /* LED Display Settings */
- smc_pptable->LedPin0 = smc_dpm_table->LedPin0;
- smc_pptable->LedPin1 = smc_dpm_table->LedPin1;
- smc_pptable->LedPin2 = smc_dpm_table->LedPin2;
- smc_pptable->padding8_4 = smc_dpm_table->padding8_4;
-
- /* GFXCLK PLL Spread Spectrum */
- smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->PllGfxclkSpreadEnabled;
- smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->PllGfxclkSpreadPercent;
- smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->PllGfxclkSpreadFreq;
-
- /* GFXCLK DFLL Spread Spectrum */
- smc_pptable->DfllGfxclkSpreadEnabled = smc_dpm_table->DfllGfxclkSpreadEnabled;
- smc_pptable->DfllGfxclkSpreadPercent = smc_dpm_table->DfllGfxclkSpreadPercent;
- smc_pptable->DfllGfxclkSpreadFreq = smc_dpm_table->DfllGfxclkSpreadFreq;
-
- /* UCLK Spread Spectrum */
- smc_pptable->UclkSpreadEnabled = smc_dpm_table->UclkSpreadEnabled;
- smc_pptable->UclkSpreadPercent = smc_dpm_table->UclkSpreadPercent;
- smc_pptable->UclkSpreadFreq = smc_dpm_table->UclkSpreadFreq;
-
- /* SOCCLK Spread Spectrum */
- smc_pptable->SoclkSpreadEnabled = smc_dpm_table->SoclkSpreadEnabled;
- smc_pptable->SocclkSpreadPercent = smc_dpm_table->SocclkSpreadPercent;
- smc_pptable->SocclkSpreadFreq = smc_dpm_table->SocclkSpreadFreq;
-
- /* Total board power */
- smc_pptable->TotalBoardPower = smc_dpm_table->TotalBoardPower;
- smc_pptable->BoardPadding = smc_dpm_table->BoardPadding;
-
- /* Mvdd Svi2 Div Ratio Setting */
- smc_pptable->MvddRatio = smc_dpm_table->MvddRatio;
+ pr_info("smc_dpm_info table revision(format.content): %d.%d\n",
+ smc_dpm_table->table_header.format_revision,
+ smc_dpm_table->table_header.content_revision);
+
+ if (smc_dpm_table->table_header.format_revision != 4) {
+ pr_err("smc_dpm_info table format revision is not 4!\n");
+ return -EINVAL;
+ }
+
+ switch (smc_dpm_table->table_header.content_revision) {
+ case 5: /* nv10 and nv14 */
+ memcpy(smc_pptable->I2cControllers, smc_dpm_table->I2cControllers,
+ sizeof(*smc_dpm_table) - sizeof(smc_dpm_table->table_header));
+ break;
+ case 7: /* nv12 */
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table_v4_7);
+ if (ret)
+ return ret;
+ memcpy(smc_pptable->I2cControllers, smc_dpm_table_v4_7->I2cControllers,
+ sizeof(*smc_dpm_table_v4_7) - sizeof(smc_dpm_table_v4_7->table_header));
+ break;
+ default:
+ pr_err("smc_dpm_info with unsupported content revision %d!\n",
+ smc_dpm_table->table_header.content_revision);
+ return -EINVAL;
+ }
if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
/* TODO: remove it once SMU fw fix it */
@@ -1336,8 +1293,6 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
}
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size < 0)
- return -EINVAL;
ret = smu_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
@@ -1860,7 +1815,8 @@ static int navi10_get_power_limit(struct smu_context *smu,
int power_src;
if (!smu->power_limit) {
- if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
+ if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT) &&
+ !amdgpu_sriov_vf(smu->adev)) {
power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
if (power_src < 0)
return -EINVAL;
@@ -2003,6 +1959,9 @@ static int navi10_set_default_od_settings(struct smu_context *smu, bool initiali
OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index ff73a735b888..67476047c067 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -296,6 +296,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
for (i = 0; i < count; i++) {
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
+ if (!value)
+ continue;
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
if (cur_value == value)
@@ -847,7 +849,7 @@ static int renoir_get_power_profile_mode(struct smu_context *smu,
uint32_t i, size = 0;
int16_t workload_type = 0;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
@@ -895,12 +897,17 @@ static int renoir_read_sensor(struct smu_context *smu,
static bool renoir_is_dpm_running(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
/*
- * Util now, the pmfw hasn't exported the interface of SMU
+ * Until now, the pmfw hasn't exported the interface of SMU
* feature mask to APU SKU so just force on all the feature
* at early initial stage.
*/
- return true;
+ if (adev->in_suspend)
+ return false;
+ else
+ return true;
}
@@ -950,6 +957,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
void renoir_set_ppt_funcs(struct smu_context *smu)
{
smu->ppt_funcs = &renoir_ppt_funcs;
- smu->smc_if_version = SMU12_DRIVER_IF_VERSION;
+ smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
smu->is_apu = true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 40c35bcc5a0a..c97444841abc 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -214,4 +214,9 @@ static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_typ
#define smu_set_power_source(smu, power_src) \
((smu)->ppt_funcs->set_power_source ? (smu)->ppt_funcs->set_power_source((smu), (power_src)) : 0)
+#define smu_i2c_eeprom_init(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : 0)
+#define smu_i2c_eeprom_fini(smu, control) \
+ ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : 0)
+
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 541c932a6005..aa76c2cea747 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -23,6 +23,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/reboot.h>
#define SMU_11_0_PARTIAL_PPTABLE
@@ -57,7 +58,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
{
struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
return 0;
}
@@ -65,7 +66,7 @@ static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ *arg = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82);
return 0;
}
@@ -75,7 +76,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
uint32_t cur_value, i, timeout = adev->usec_timeout * 10;
for (i = 0; i < timeout; i++) {
- cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ cur_value = RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
return cur_value == 0x1 ? 0 : -EIO;
@@ -83,7 +84,10 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
}
/* timeout means wrong logic */
- return -ETIME;
+ if (i == timeout)
+ return -ETIME;
+
+ return RREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
}
int
@@ -107,9 +111,9 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
goto out;
}
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15_NO_KIQ(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
@@ -119,6 +123,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_get_message_name(smu, msg), index, param, ret);
goto out;
}
+
if (read_arg) {
ret = smu_v11_0_read_arg(smu, read_arg);
if (ret) {
@@ -201,13 +206,15 @@ int smu_v11_0_load_microcode(struct smu_context *smu)
const struct smc_firmware_header_v1_0 *hdr;
uint32_t addr_start = MP1_SRAM;
uint32_t i;
+ uint32_t smc_fw_size;
uint32_t mp1_fw_flags;
hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
src = (const uint32_t *)(adev->pm.fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes));
+ smc_fw_size = hdr->header.ucode_size_bytes;
- for (i = 1; i < MP1_SMC_SIZE/4 - 1; i++) {
+ for (i = 1; i < smc_fw_size/4 - 1; i++) {
WREG32_PCIE(addr_start, src[i]);
addr_start += 4;
}
@@ -264,23 +271,23 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
switch (smu->adev->asic_type) {
case CHIP_VEGA20:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VG20;
break;
case CHIP_ARCTURUS:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
break;
case CHIP_NAVI10:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
case CHIP_NAVI12:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
case CHIP_NAVI14:
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
default:
pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
- smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -292,10 +299,10 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
@@ -479,8 +486,6 @@ int smu_v11_0_init_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (smu_power->power_context || smu_power->power_context_size != 0)
return -EINVAL;
@@ -497,8 +502,6 @@ int smu_v11_0_fini_power(struct smu_context *smu)
{
struct smu_power_context *smu_power = &smu->smu_power;
- if (!smu->pm_enabled)
- return 0;
if (!smu_power->power_context || smu_power->power_context_size == 0)
return -EINVAL;
@@ -730,8 +733,9 @@ int smu_v11_0_parse_pptable(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
struct smu_table *table = &table_context->tables[SMU_TABLE_PPTABLE];
+ /* during TDR we need to free and alloc the pptable */
if (table_context->driver_pptable)
- return -EINVAL;
+ kfree(table_context->driver_pptable);
table_context->driver_pptable = kzalloc(table->size, GFP_KERNEL);
@@ -771,6 +775,9 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
{
int ret;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
@@ -783,8 +790,6 @@ int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
- if (!smu->pm_enabled)
- return 0;
if (!table_context)
return -EINVAL;
@@ -816,6 +821,9 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
int ret = 0;
struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
@@ -835,6 +843,9 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
{
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!smu->pm_enabled)
return ret;
@@ -849,6 +860,9 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
mutex_lock(&feature->mutex);
if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
goto failed;
@@ -877,6 +891,9 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev) && !amdgpu_sriov_is_pp_one_vf(smu->adev))
+ return 0;
+
if (!feature_mask || num < 2)
return -EINVAL;
@@ -932,8 +949,12 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
{
int ret = 0;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (!smu->pm_enabled)
return ret;
+
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
@@ -948,9 +969,6 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
int ret = 0;
int clk_id;
- if (!smu->pm_enabled)
- return ret;
-
if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
(smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
return 0;
@@ -1096,6 +1114,9 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
int ret = 0;
uint32_t max_power_limit;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
max_power_limit = smu_v11_0_get_max_power_limit(smu);
if (n > max_power_limit) {
@@ -1205,9 +1226,6 @@ int smu_v11_0_start_thermal_control(struct smu_context *smu)
struct smu_temperature_range range;
struct amdgpu_device *adev = smu->adev;
- if (!smu->pm_enabled)
- return ret;
-
memcpy(&range, &smu11_thermal_policy[0], sizeof(struct smu_temperature_range));
ret = smu_get_thermal_temperature_range(smu, &range);
@@ -1321,9 +1339,6 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
enum smu_clk_type clk_select = 0;
uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
- if (!smu->pm_enabled)
- return -EINVAL;
-
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
switch (clk_type) {
@@ -1533,39 +1548,65 @@ static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
#define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
#define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
+#define SMUIO_11_0__SRCID__SMUIO_GPIO19 83
+
static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
+ /*
+ * ctxid is used to distinguish different
+ * events for SMCToHost interrupt.
+ */
+ uint32_t ctxid = entry->src_data[0];
+ uint32_t data;
if (client_id == SOC15_IH_CLIENTID_THM) {
switch (src_id) {
case THM_11_0__SRCID__THM_DIG_THERM_L2H:
- pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU over temperature range(SW CTF) detected!\n");
+ /*
+ * SW CTF just occurred.
+ * Try to do a graceful shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU SW CTF!\n");
+ orderly_poweroff(true);
break;
case THM_11_0__SRCID__THM_DIG_THERM_H2L:
- pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
break;
default:
- pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
- src_id,
- PCI_BUS_NUM(adev->pdev->devfn),
- PCI_SLOT(adev->pdev->devfn),
- PCI_FUNC(adev->pdev->devfn));
+ dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
+ src_id);
break;
-
}
+ } else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
+ dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
+ /*
+ * HW CTF just occurred. Shutdown to prevent further damage.
+ */
+ dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
+ orderly_poweroff(true);
} else if (client_id == SOC15_IH_CLIENTID_MP1) {
- if (src_id == 0xfe)
- smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ if (src_id == 0xfe) {
+ /* ACK SMUToHost interrupt */
+ data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
+ data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
+
+ switch (ctxid) {
+ case 0x3:
+ dev_dbg(adev->dev, "Switched to AC mode!\n");
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ break;
+ case 0x4:
+ dev_dbg(adev->dev, "Switched to DC mode!\n");
+ smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+ break;
+ }
+ }
}
return 0;
@@ -1605,6 +1646,13 @@ int smu_v11_0_register_irq_handler(struct smu_context *smu)
if (ret)
return ret;
+ /* Register CTF(GPIO_19) interrupt */
+ ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
+ SMUIO_11_0__SRCID__SMUIO_GPIO19,
+ irq_src);
+ if (ret)
+ return ret;
+
ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
0xfe,
irq_src);
@@ -1718,6 +1766,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (ret)
goto out;
+ if (ras && ras->supported) {
+ ret = smu_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+ if (ret)
+ goto out;
+ }
+
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
@@ -1827,6 +1881,9 @@ int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
uint32_t pcie_gen = 0, pcie_width = 0;
int ret;
+ if (amdgpu_sriov_vf(smu->adev))
+ return 0;
+
if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
pcie_gen = 3;
else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 169ebdad87b8..4023d10fb49b 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -32,13 +32,15 @@
#include "asic_reg/mp/mp_12_0_0_offset.h"
#include "asic_reg/mp/mp_12_0_0_sh_mask.h"
+#include "asic_reg/smuio/smuio_12_0_0_offset.h"
+#include "asic_reg/smuio/smuio_12_0_0_sh_mask.h"
-#define smnMP1_FIRMWARE_FLAGS 0x3010024
+// because some SMU12 based ASICs use older ip offset tables
+// we should undefine this register from the smuio12 header
+// to prevent confusion down the road
+#undef mmPWR_MISC_CNTL_STATUS
-#define mmSMUIO_GFX_MISC_CNTL 0x00c8
-#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
-#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg)
@@ -158,10 +160,10 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
* Considering above, we just leave user a warning message instead
* of halt driver loading.
*/
- if (if_version != smu->smc_if_version) {
+ if (if_version != smu->smc_driver_if_version) {
pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
"smu fw version = 0x%08x (%d.%d.%d)\n",
- smu->smc_if_version, if_version,
+ smu->smc_driver_if_version, if_version,
smu_version, smu_major, smu_minor, smu_debug);
pr_warn("SMU driver if version not matched\n");
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 868e2d5f6e62..56923a96b450 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
switch (dev_id) {
case 0x67BA:
- case 0x66B1:
+ case 0x67B1:
smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
case 0x67B8:
@@ -2780,7 +2780,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2810,12 +2810,12 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2845,7 +2845,7 @@ static int ci_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2881,8 +2881,9 @@ static int ci_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_UVDDPM))
break;
}
- ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
- data->dpm_level_enable_mask.uvd_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_UVDDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.uvd_dpm_enable_mask,
+ NULL);
return 0;
}
@@ -2912,8 +2913,9 @@ static int ci_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (hwmgr->dpm_level & profile_mode_mask || !PP_CAP(PHM_PlatformCaps_VCEDPM))
break;
}
- ci_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
- data->dpm_level_enable_mask.vce_dpm_enable_mask);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_VCEDPM_SetEnabledMask,
+ data->dpm_level_enable_mask.vce_dpm_enable_mask,
+ NULL);
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
index 32ebb383c456..ecb9ee46d6b3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c
@@ -137,9 +137,7 @@ static int fiji_start_smu_in_protection_mode(struct pp_hwmgr *hwmgr)
PHM_WAIT_VFPF_INDIRECT_FIELD(hwmgr, SMC_IND, RCU_UC_EVENTS,
INTERRUPTS_ENABLED, 1);
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
- PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
/* Wait for done bit to be set */
PHM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL(hwmgr, SMC_IND,
@@ -203,8 +201,9 @@ static int fiji_start_avfs_btc(struct pp_hwmgr *hwmgr)
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+ if (0 != smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+ NULL)) {
pr_info("[AVFS][Fiji_PerformBtc] PerformBTC SMU msg failed");
result = -EINVAL;
}
@@ -1913,7 +1912,8 @@ static int fiji_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
if (mask)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_LedConfig,
- mask);
+ mask,
+ NULL);
return 0;
}
@@ -2220,14 +2220,16 @@ static int fiji_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
+ advanceFanControlParameters.ucMinimumPWMLimit,
+ NULL);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+ NULL);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2242,7 +2244,7 @@ static int fiji_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
if (!hwmgr->avfs_supported)
return 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
return 0;
}
@@ -2390,7 +2392,8 @@ static int fiji_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2422,7 +2425,8 @@ static int fiji_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -2569,7 +2573,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2599,12 +2603,12 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2634,7 +2638,7 @@ static int fiji_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2649,6 +2653,7 @@ const struct pp_smumgr_func fiji_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = fiji_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
index 732005c03a82..431ad2fd38df 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/iceland_smumgr.c
@@ -2669,6 +2669,7 @@ const struct pp_smumgr_func iceland_smu_funcs = {
.request_smu_load_specific_fw = &iceland_request_smu_load_specific_fw,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.get_offsetof = iceland_get_offsetof,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 23c12018dbc1..c3d2e6dcf62a 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -99,7 +99,8 @@ static int polaris10_perform_btc(struct pp_hwmgr *hwmgr)
struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
if (0 != smu_data->avfs_btc_param) {
- if (0 != smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param)) {
+ if (0 != smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_PerformBtc, smu_data->avfs_btc_param,
+ NULL)) {
pr_info("[AVFS][SmuPolaris10_PerformBtc] PerformBTC SMU msg failed");
result = -1;
}
@@ -2049,15 +2050,16 @@ int polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
return 0;
smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+ PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting,
+ NULL);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
/* Apply avfs cks-off voltages to avoid the overshoot
* when switching to the highest sclk frequency
*/
if (data->apply_avfs_cks_off_voltage)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage, NULL);
return 0;
}
@@ -2158,14 +2160,16 @@ static int polaris10_thermal_setup_fan_table(struct pp_hwmgr *hwmgr)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanMinPwm,
hwmgr->thermal_controller.
- advanceFanControlParameters.ucMinimumPWMLimit);
+ advanceFanControlParameters.ucMinimumPWMLimit,
+ NULL);
if (!res && hwmgr->thermal_controller.
advanceFanControlParameters.ulMinFanSCLKAcousticLimit)
res = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetFanSclkTarget,
hwmgr->thermal_controller.
- advanceFanControlParameters.ulMinFanSCLKAcousticLimit);
+ advanceFanControlParameters.ulMinFanSCLKAcousticLimit,
+ NULL);
if (res)
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
@@ -2202,7 +2206,8 @@ static int polaris10_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2234,7 +2239,8 @@ static int polaris10_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -2485,7 +2491,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -2515,12 +2521,12 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -2550,7 +2556,7 @@ static int polaris10_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -2565,6 +2571,7 @@ const struct pp_smumgr_func polaris10_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = polaris10_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index 2319400a3fcb..ea2279bb8cbf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -126,15 +126,18 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL;);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL;);
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
/* flush hdp cache */
amdgpu_asic_flush_hdp(adev, NULL);
@@ -164,15 +167,18 @@ static int smu10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu10_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
return 0;
}
@@ -181,9 +187,9 @@ static int smu10_verify_smc_interface(struct pp_hwmgr *hwmgr)
{
uint32_t smc_driver_if_version;
- smu10_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion);
- smc_driver_if_version = smu10_read_arg_from_smc(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion,
+ &smc_driver_if_version);
if ((smc_driver_if_version != SMU10_DRIVER_IF_VERSION) &&
(smc_driver_if_version != SMU10_DRIVER_IF_VERSION + 1)) {
@@ -217,11 +223,11 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
- hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion, &hwmgr->smu_version);
adev->pm.fw_version = hwmgr->smu_version >> 8;
- if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2) &&
+ (adev->apu_flags & AMD_APU_IS_RAVEN) &&
adev->pm.fw_version < 0x1e45)
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index 3f51d545e8ff..aae25243eb10 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -191,13 +191,6 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
return 0;
}
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
-{
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
-
- return 0;
-}
-
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
{
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -207,25 +200,14 @@ int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, ui
return smu7_send_msg_to_smc(hwmgr, msg);
}
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
-
- return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
+ return cgs_read_register(hwmgr->device, mmSMC_MSG_ARG_0);
}
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
{
- cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
-
- cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
-
- PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
-
- if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
- pr_info("Failed to send Message.\n");
-
- return 0;
+ return smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_Test, 0x20000, NULL);
}
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
@@ -353,12 +335,14 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
if (hwmgr->not_vf) {
- smu7_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_HI,
- upper_32_bits(smu_data->smu_buffer.mc_addr));
- smu7_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(smu_data->smu_buffer.mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SMU_DRAM_ADDR_LO,
- lower_32_bits(smu_data->smu_buffer.mc_addr));
+ lower_32_bits(smu_data->smu_buffer.mc_addr),
+ NULL);
}
fw_to_load = UCODE_ID_RLC_G_MASK
+ UCODE_ID_SDMA0_MASK
@@ -423,10 +407,16 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
}
memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
sizeof(struct SMU_DRAMData_TOC));
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
-
- smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DRV_DRAM_ADDR_HI,
+ upper_32_bits(smu_data->header_buffer.mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DRV_DRAM_ADDR_LO,
+ lower_32_bits(smu_data->header_buffer.mc_addr),
+ NULL);
+
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load, NULL);
r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
if (!r)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
index 01f0538fba6b..e7303dc8c260 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h
@@ -60,11 +60,9 @@ int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr);
bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr);
int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg);
-int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg);
int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg,
uint32_t parameter);
-int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter);
+uint32_t smu7_get_argument(struct pp_hwmgr *hwmgr);
int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr);
enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
index 7dca04a89217..76d4f12ceedf 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c
@@ -610,18 +610,21 @@ static int smu8_download_pptable_settings(struct pp_hwmgr *hwmgr, void **table)
*table = (struct SMU8_Fusion_ClkTable *)smu8_smu->scratch_buffer[i].kaddr;
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_clock_table);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table,
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToDram, NULL);
return 0;
}
@@ -637,18 +640,21 @@ static int smu8_upload_pptable_settings(struct pp_hwmgr *hwmgr)
break;
}
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrHi,
- upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ upper_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetClkTableAddrLo,
- lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr));
+ lower_32_bits(smu8_smu->scratch_buffer[i].mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_clock_table);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_clock_table,
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ClkTableXferToSmu, NULL);
return 0;
}
@@ -671,25 +677,30 @@ static int smu8_request_smu_load_fw(struct pp_hwmgr *hwmgr)
smu8_write_smc_sram_dword(hwmgr, smc_address, 0, smc_address+4);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrHi,
- upper_32_bits(smu8_smu->toc_buffer.mc_addr));
+ upper_32_bits(smu8_smu->toc_buffer.mc_addr),
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_DriverDramAddrLo,
- lower_32_bits(smu8_smu->toc_buffer.mc_addr));
+ lower_32_bits(smu8_smu->toc_buffer.mc_addr),
+ NULL);
- smu8_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_InitJobs, NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_aram);
- smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_power_profiling_index);
+ smu8_smu->toc_entry_aram,
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ExecuteJob,
+ smu8_smu->toc_entry_power_profiling_index,
+ NULL);
- smu8_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_ExecuteJob,
- smu8_smu->toc_entry_initialize_index);
+ smu8_smu->toc_entry_initialize_index,
+ NULL);
fw_to_check = UCODE_ID_RLC_G_MASK |
UCODE_ID_SDMA0_MASK |
@@ -860,11 +871,13 @@ static bool smu8_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
unsigned long check_feature)
{
int result;
- unsigned long features;
+ uint32_t features;
- result = smu8_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
+ result = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_GetFeatureStatus,
+ 0,
+ &features);
if (result == 0) {
- features = smum_get_argument(hwmgr);
if (features & check_feature)
return true;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 4240aeec9000..b6fb48066841 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -103,14 +103,6 @@ int smum_process_firmware_header(struct pp_hwmgr *hwmgr)
return 0;
}
-uint32_t smum_get_argument(struct pp_hwmgr *hwmgr)
-{
- if (NULL != hwmgr->smumgr_funcs->get_argument)
- return hwmgr->smumgr_funcs->get_argument(hwmgr);
-
- return 0;
-}
-
uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value)
{
if (NULL != hwmgr->smumgr_funcs->get_mac_definition)
@@ -135,22 +127,58 @@ int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr)
return 0;
}
-int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
+int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp)
{
- if (hwmgr == NULL || hwmgr->smumgr_funcs->send_msg_to_smc == NULL)
+ int ret = 0;
+
+ if (hwmgr == NULL ||
+ hwmgr->smumgr_funcs->send_msg_to_smc == NULL ||
+ (resp && !hwmgr->smumgr_funcs->get_argument))
return -EINVAL;
- return hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+ mutex_lock(&hwmgr->msg_lock);
+
+ ret = hwmgr->smumgr_funcs->send_msg_to_smc(hwmgr, msg);
+ if (ret) {
+ mutex_unlock(&hwmgr->msg_lock);
+ return ret;
+ }
+
+ if (resp)
+ *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+ mutex_unlock(&hwmgr->msg_lock);
+
+ return ret;
}
int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
- uint16_t msg, uint32_t parameter)
+ uint16_t msg,
+ uint32_t parameter,
+ uint32_t *resp)
{
+ int ret = 0;
+
if (hwmgr == NULL ||
- hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL)
+ hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter == NULL ||
+ (resp && !hwmgr->smumgr_funcs->get_argument))
return -EINVAL;
- return hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
+
+ mutex_lock(&hwmgr->msg_lock);
+
+ ret = hwmgr->smumgr_funcs->send_msg_to_smc_with_parameter(
hwmgr, msg, parameter);
+ if (ret) {
+ mutex_unlock(&hwmgr->msg_lock);
+ return ret;
+ }
+
+ if (resp)
+ *resp = hwmgr->smumgr_funcs->get_argument(hwmgr);
+
+ mutex_unlock(&hwmgr->msg_lock);
+
+ return ret;
}
int smum_init_smc_table(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index f19bac7ef7ba..398e7e3587de 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -2702,7 +2702,8 @@ static int tonga_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -2733,7 +2734,8 @@ static int tonga_update_vce_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -3168,7 +3170,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
if (setting->bupdate_sclk) {
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.GraphicsDpmLevelCount; i++) {
if (levels[i].ActivityLevel !=
cpu_to_be16(setting->sclk_activity)) {
@@ -3198,12 +3200,12 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->sclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_SCLKDPM_UnfreezeLevel, NULL);
}
if (setting->bupdate_mclk) {
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_FreezeLevel, NULL);
for (i = 0; i < smu_data->smc_state_table.MemoryDpmLevelCount; i++) {
if (mclk_levels[i].ActivityLevel !=
cpu_to_be16(setting->mclk_activity)) {
@@ -3233,7 +3235,7 @@ static int tonga_update_dpm_settings(struct pp_hwmgr *hwmgr,
}
}
if (!data->mclk_dpm_key_disabled)
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_UnfreezeLevel, NULL);
}
return 0;
}
@@ -3248,6 +3250,7 @@ const struct pp_smumgr_func tonga_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = &smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = &smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.download_pptable_settings = NULL,
.upload_pptable_settings = NULL,
.update_smc_table = tonga_update_smc_table,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
index 715564009089..1e222c5d91a4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
@@ -47,15 +47,18 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
/* flush hdp cache */
amdgpu_asic_flush_hdp(adev, NULL);
@@ -90,15 +93,18 @@ static int vega10_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- priv->smu_tables.entry[table_id].table_id);
+ priv->smu_tables.entry[table_id].table_id,
+ NULL);
return 0;
}
@@ -118,17 +124,21 @@ int vega10_enable_smc_features(struct pp_hwmgr *hwmgr,
return 0;
return smum_send_msg_to_smc_with_parameter(hwmgr,
- msg, feature_mask);
+ msg, feature_mask, NULL);
}
int vega10_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
uint64_t *features_enabled)
{
+ uint32_t enabled_features;
+
if (features_enabled == NULL)
return -EINVAL;
- smu9_send_msg_to_smc(hwmgr, PPSMC_MSG_GetEnabledSmuFeatures);
- *features_enabled = smu9_get_argument(hwmgr);
+ smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeatures,
+ &enabled_features);
+ *features_enabled = enabled_features;
return 0;
}
@@ -150,12 +160,14 @@ static int vega10_set_tools_address(struct pp_hwmgr *hwmgr)
struct vega10_smumgr *priv = hwmgr->smu_backend;
if (priv->smu_tables.entry[TOOLSTABLE].mc_addr) {
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+ NULL);
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TOOLSTABLE].mc_addr),
+ NULL);
}
return 0;
}
@@ -167,11 +179,11 @@ static int vega10_verify_smc_interface(struct pp_hwmgr *hwmgr)
uint32_t dev_id;
uint32_t rev_id;
- PP_ASSERT_WITH_CODE(!smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetDriverIfVersion),
+ PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetDriverIfVersion,
+ &smc_driver_if_version),
"Attempt to get SMC IF Version Number Failed!",
return -EINVAL);
- smc_driver_if_version = smu9_get_argument(hwmgr);
dev_id = adev->pdev->device;
rev_id = adev->pdev->revision;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
index 275dbf65f1a0..f54df76537e4 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
@@ -50,18 +50,21 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr,
"Invalid SMU Table version!", return -EINVAL);
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!", return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- table_id) == 0,
+ table_id,
+ NULL) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return -EINVAL);
@@ -98,19 +101,22 @@ static int vega12_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return -EINVAL;);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr)) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableDram2Smu,
- table_id) == 0,
+ table_id,
+ NULL) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return -EINVAL);
@@ -126,21 +132,21 @@ int vega12_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return -EINVAL);
} else {
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return -EINVAL);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return -EINVAL);
}
@@ -156,17 +162,17 @@ int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow,
+ &smc_features_low) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return -EINVAL);
- smc_features_low = smu9_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0,
+ PP_ASSERT_WITH_CODE(smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+ &smc_features_high) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return -EINVAL);
- smc_features_high = smu9_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -192,12 +198,14 @@ static int vega12_set_tools_address(struct pp_hwmgr *hwmgr)
(struct vega12_smumgr *)(hwmgr->smu_backend);
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- if (!smu9_send_msg_to_smc_with_parameter(hwmgr,
+ if (!smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr)))
- smu9_send_msg_to_smc_with_parameter(hwmgr,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL))
+ smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 16aa171971d3..2fb97554134f 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -175,18 +175,20 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr,
PP_ASSERT_WITH_CODE(priv->smu_tables.entry[table_id].size != 0,
"Invalid SMU Table Length!", return -EINVAL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableFromSMC] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableSmu2Dram, table_id)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableSmu2Dram, table_id, NULL)) == 0,
"[CopyTableFromSMC] Attempt to Transfer Table From SMU Failed!",
return ret);
@@ -224,18 +226,20 @@ static int vega20_copy_table_to_smc(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[table_id].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[table_id].mc_addr),
+ NULL)) == 0,
"[CopyTableToSMC] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu, table_id)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu, table_id, NULL)) == 0,
"[CopyTableToSMC] Attempt to Transfer Table To SMU Failed!",
return ret);
@@ -255,18 +259,22 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
amdgpu_asic_flush_hdp(adev, NULL);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_TransferTableDram2Smu, TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_TransferTableDram2Smu,
+ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16),
+ NULL)) == 0,
"[SetActivityMonitor] Attempt to Transfer Table To SMU Failed!",
return ret);
@@ -281,19 +289,21 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
struct amdgpu_device *adev = hwmgr->adev;
int ret = 0;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[GetActivityMonitor] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].mc_addr),
+ NULL)) == 0,
"[GetActivityMonitor] Attempt to Set Dram Addr Low Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_TransferTableSmu2Dram,
- TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16))) == 0,
+ TABLE_ACTIVITY_MONITOR_COEFF | (workload_type << 16), NULL)) == 0,
"[GetActivityMonitor] Attempt to Transfer Table From SMU Failed!",
return ret);
@@ -316,21 +326,21 @@ int vega20_enable_smc_features(struct pp_hwmgr *hwmgr,
smu_features_high = (uint32_t)((feature_mask & SMU_FEATURES_HIGH_MASK) >> SMU_FEATURES_HIGH_SHIFT);
if (enable) {
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!",
return ret);
} else {
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high, NULL)) == 0,
"[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!",
return ret);
}
@@ -347,16 +357,16 @@ int vega20_get_enabled_smc_features(struct pp_hwmgr *hwmgr,
if (features_enabled == NULL)
return -EINVAL;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesLow,
+ &smc_features_low)) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!",
return ret);
- smc_features_low = vega20_get_argument(hwmgr);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr,
- PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_GetEnabledSmuFeaturesHigh,
+ &smc_features_high)) == 0,
"[GetEnabledSMCFeatures] Attempt to get SMU features High failed!",
return ret);
- smc_features_high = vega20_get_argument(hwmgr);
*features_enabled = ((((uint64_t)smc_features_low << SMU_FEATURES_LOW_SHIFT) & SMU_FEATURES_LOW_MASK) |
(((uint64_t)smc_features_high << SMU_FEATURES_HIGH_SHIFT) & SMU_FEATURES_HIGH_MASK));
@@ -371,13 +381,15 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
int ret = 0;
if (priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr) {
- ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ upper_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
if (!ret)
- ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetToolsDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr));
+ lower_32_bits(priv->smu_tables.entry[TABLE_PMSTATUSLOG].mc_addr),
+ NULL);
}
return ret;
@@ -389,14 +401,16 @@ int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
int ret = 0;
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrHigh,
- upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+ NULL)) == 0,
"[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
return ret);
- PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetDriverDramAddrLow,
- lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr),
+ NULL)) == 0,
"[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
return ret);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index b0e0d67cd54b..3da71a088b92 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -356,7 +356,8 @@ static int vegam_update_uvd_smc_table(struct pp_hwmgr *hwmgr)
PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_UVDDPM_SetEnabledMask,
- (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel));
+ (uint32_t)(1 << smu_data->smc_state_table.UvdBootLevel),
+ NULL);
return 0;
}
@@ -388,7 +389,8 @@ static int vegam_update_vce_smc_table(struct pp_hwmgr *hwmgr)
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_VCEDPM_SetEnabledMask,
- (uint32_t)1 << smu_data->smc_state_table.VceBootLevel);
+ (uint32_t)1 << smu_data->smc_state_table.VceBootLevel,
+ NULL);
return 0;
}
@@ -1906,7 +1908,8 @@ static int vegam_enable_reconfig_cus(struct pp_hwmgr *hwmgr)
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_EnableModeSwitchRLCNotification,
- adev->gfx.cu_info.number);
+ adev->gfx.cu_info.number,
+ NULL);
return 0;
}
@@ -2060,7 +2063,7 @@ static int vegam_init_smc_table(struct pp_hwmgr *hwmgr)
table->AcDcGpio = gpio_pin.uc_gpio_pin_bit_shift;
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_AutomaticDCTransition) &&
- !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme))
+ !smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UseNewGPIOScheme, NULL))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme);
} else {
@@ -2250,10 +2253,12 @@ int vegam_thermal_avfs_enable(struct pp_hwmgr *hwmgr)
if (!hwmgr->avfs_supported)
return 0;
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableAvfs, NULL);
if (!ret) {
if (data->apply_avfs_cks_off_voltage)
- ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ApplyAvfsCksOffVoltage);
+ ret = smum_send_msg_to_smc(hwmgr,
+ PPSMC_MSG_ApplyAvfsCksOffVoltage,
+ NULL);
}
return ret;
@@ -2279,6 +2284,7 @@ const struct pp_smumgr_func vegam_smu_funcs = {
.request_smu_load_specific_fw = NULL,
.send_msg_to_smc = smu7_send_msg_to_smc,
.send_msg_to_smc_with_parameter = smu7_send_msg_to_smc_with_parameter,
+ .get_argument = smu7_get_argument,
.process_firmware_header = vegam_process_firmware_header,
.is_dpm_running = vegam_is_dpm_running,
.get_mac_definition = vegam_get_mac_definition,
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 3f1044326dcb..61923530b2e4 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -1796,7 +1796,7 @@ static int vega20_get_power_profile_mode(struct smu_context *smu, char *buf)
"PD_Data_error_rate_coeff"};
int result = 0;
- if (!smu->pm_enabled || !buf)
+ if (!buf)
return -EINVAL;
size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
@@ -1887,8 +1887,6 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
smu->power_profile_mode = input[size];
- if (!smu->pm_enabled)
- return ret;
if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
return -EINVAL;
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index d6a6692db0ac..f164818ec477 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -137,10 +137,11 @@ static struct drm_info_list arcpgu_debugfs_list[] = {
{ "clocks", arcpgu_show_pxlclock, 0 },
};
-static int arcpgu_debugfs_init(struct drm_minor *minor)
+static void arcpgu_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(arcpgu_debugfs_list,
- ARRAY_SIZE(arcpgu_debugfs_list), minor->debugfs_root, minor);
+ drm_debugfs_create_files(arcpgu_debugfs_list,
+ ARRAY_SIZE(arcpgu_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -153,17 +154,7 @@ static struct drm_driver arcpgu_drm_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_print_info = drm_gem_cma_print_info,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index a204103b3efb..3a9e966e0e78 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -3,7 +3,7 @@ menu "ARM devices"
config DRM_HDLCD
tristate "ARM HDLCD"
- depends on DRM && OF && (ARM || ARM64)
+ depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
@@ -24,7 +24,7 @@ config DRM_HDLCD_SHOW_UNDERRUN
config DRM_MALI_DISPLAY
tristate "ARM Mali Display Processor"
- depends on DRM && OF && (ARM || ARM64)
+ depends on DRM && OF && (ARM || ARM64 || COMPILE_TEST)
depends on COMMON_CLK
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
index 1b01a625f40e..170f9dc8ec19 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c
@@ -19,7 +19,7 @@ static void komeda_fb_destroy(struct drm_framebuffer *fb)
u32 i;
for (i = 0; i < fb->format->num_planes; i++)
- drm_gem_object_put_unlocked(fb->obj[i]);
+ drm_gem_object_put(fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(kfb);
@@ -103,7 +103,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
return 0;
check_failed:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EINVAL;
}
@@ -199,7 +199,7 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file,
err_cleanup:
for (i = 0; i < kfb->base.format->num_planes; i++)
- drm_gem_object_put_unlocked(kfb->base.obj[i]);
+ drm_gem_object_put(kfb->base.obj[i]);
kfree(kfb);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 442d4656150a..1f6682032ca4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -14,6 +14,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -60,16 +61,7 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
static struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.lastclose = drm_fb_helper_lastclose,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = komeda_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_cma_dumb_create),
.fops = &komeda_cma_fops,
.name = "komeda",
.desc = "Arm Komeda Display Processor driver",
@@ -260,17 +252,16 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
{
- struct komeda_kms_dev *kms = kzalloc(sizeof(*kms), GFP_KERNEL);
+ struct komeda_kms_dev *kms;
struct drm_device *drm;
int err;
- if (!kms)
- return ERR_PTR(-ENOMEM);
+ kms = devm_drm_dev_alloc(mdev->dev, &komeda_kms_driver,
+ struct komeda_kms_dev, base);
+ if (IS_ERR(kms))
+ return kms;
drm = &kms->base;
- err = drm_dev_init(drm, &komeda_kms_driver, mdev->dev);
- if (err)
- goto free_kms;
drm->dev_private = mdev;
@@ -327,9 +318,6 @@ cleanup_mode_config:
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
- drm_dev_put(drm);
-free_kms:
- kfree(kms);
return ERR_PTR(err);
}
@@ -346,5 +334,4 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
drm_mode_config_cleanup(drm);
komeda_kms_cleanup_private_objs(kms);
drm->dev_private = NULL;
- drm_dev_put(drm);
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index 2e053815b54a..faa8a5a752da 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -224,10 +224,11 @@ static struct drm_info_list hdlcd_debugfs_list[] = {
{ "clocks", hdlcd_show_pxlclock, 0 },
};
-static int hdlcd_debugfs_init(struct drm_minor *minor)
+static void hdlcd_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(hdlcd_debugfs_list,
- ARRAY_SIZE(hdlcd_debugfs_list), minor->debugfs_root, minor);
+ drm_debugfs_create_files(hdlcd_debugfs_list,
+ ARRAY_SIZE(hdlcd_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -239,17 +240,7 @@ static struct drm_driver hdlcd_driver = {
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
.irq_uninstall = hdlcd_irq_uninstall,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_print_info = drm_gem_cma_print_info,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = hdlcd_debugfs_init,
#endif
@@ -346,9 +337,8 @@ static void hdlcd_drm_unbind(struct device *dev)
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
pm_runtime_get_sync(dev);
- drm_crtc_vblank_off(&hdlcd->crtc);
- drm_irq_uninstall(drm);
drm_atomic_helper_shutdown(drm);
+ drm_irq_uninstall(drm);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 37d92a06318e..a76aa3fb8d3c 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -349,11 +349,11 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
if (objs->size < afbc_size) {
DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n",
objs->size, afbc_size);
- drm_gem_object_put_unlocked(objs);
+ drm_gem_object_put(objs);
return false;
}
- drm_gem_object_put_unlocked(objs);
+ drm_gem_object_put(objs);
return true;
}
@@ -548,7 +548,7 @@ static const struct file_operations malidp_debugfs_fops = {
.release = single_release,
};
-static int malidp_debugfs_init(struct drm_minor *minor)
+static void malidp_debugfs_init(struct drm_minor *minor)
{
struct malidp_drm *malidp = minor->dev->dev_private;
@@ -557,23 +557,13 @@ static int malidp_debugfs_init(struct drm_minor *minor)
spin_lock_init(&malidp->errors_lock);
debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &malidp_debugfs_fops);
- return 0;
}
#endif //CONFIG_DEBUG_FS
static struct drm_driver malidp_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = malidp_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
#ifdef CONFIG_DEBUG_FS
.debugfs_init = malidp_debugfs_init,
#endif
@@ -667,20 +657,11 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR_RO(core_id);
-static int malidp_init_sysfs(struct device *dev)
-{
- int ret = device_create_file(dev, &dev_attr_core_id);
-
- if (ret)
- DRM_ERROR("failed to create device file for core_id\n");
-
- return ret;
-}
-
-static void malidp_fini_sysfs(struct device *dev)
-{
- device_remove_file(dev, &dev_attr_core_id);
-}
+static struct attribute *mali_dp_attrs[] = {
+ &dev_attr_core_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(mali_dp);
#define MAX_OUTPUT_CHANNELS 3
@@ -842,10 +823,6 @@ static int malidp_bind(struct device *dev)
if (ret < 0)
goto query_hw_fail;
- ret = malidp_init_sysfs(dev);
- if (ret)
- goto init_fail;
-
/* Set the CRTC's port so that the encoder component can find it */
malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
@@ -903,8 +880,6 @@ irq_init_fail:
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
-init_fail:
- malidp_fini_sysfs(dev);
malidp_fini(drm);
query_hw_fail:
pm_runtime_put(dev);
@@ -930,15 +905,13 @@ static void malidp_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
pm_runtime_get_sync(dev);
- drm_crtc_vblank_off(&malidp->crtc);
+ drm_atomic_helper_shutdown(drm);
malidp_se_irq_fini(hwdev);
malidp_de_irq_fini(hwdev);
drm->irq_enabled = false;
- drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- malidp_fini_sysfs(dev);
malidp_fini(drm);
pm_runtime_put(dev);
if (pm_runtime_enabled(dev))
@@ -1034,6 +1007,7 @@ static struct platform_driver malidp_platform_driver = {
.name = "mali-dp",
.pm = &malidp_pm_ops,
.of_match_table = malidp_drm_of_match,
+ .dev_groups = mali_dp_groups,
},
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index c2b92acd1e9a..38dfaa46d306 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -710,13 +710,13 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
/* Must be a kernel-mapped object */
if (!obj->addr) {
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
return -EINVAL;
}
if (obj->obj.size < w * h * 4) {
DRM_ERROR("buffer is too small\n");
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
return -ENOMEM;
}
}
@@ -724,7 +724,7 @@ static int armada_drm_crtc_cursor_set(struct drm_crtc *crtc,
if (dcrtc->cursor_obj) {
dcrtc->cursor_obj->update = NULL;
dcrtc->cursor_obj->update_data = NULL;
- drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
+ drm_gem_object_put(&dcrtc->cursor_obj->obj);
}
dcrtc->cursor_obj = obj;
dcrtc->cursor_w = w;
@@ -760,7 +760,7 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
struct armada_private *priv = crtc->dev->dev_private;
if (dcrtc->cursor_obj)
- drm_gem_object_put_unlocked(&dcrtc->cursor_obj->obj);
+ drm_gem_object_put(&dcrtc->cursor_obj->obj);
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 197dca3fc84c..5fc25c3f445c 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_fb_helper.h>
@@ -103,6 +104,7 @@ static int armada_drm_bind(struct device *dev)
kfree(priv);
return ret;
}
+ drmm_add_final_kfree(&priv->drm, priv);
/* Remove early framebuffers */
ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
@@ -311,7 +313,7 @@ static void __exit armada_drm_exit(void)
}
module_exit(armada_drm_exit);
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_DESCRIPTION("Armada DRM Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:armada-drm");
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index 426ca383d696..b87c71703c85 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -129,12 +129,12 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
goto err;
}
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
return &dfb->fb;
err_unref:
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
err:
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index f2dc371bd8e5..0c4601275507 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -51,13 +51,13 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
ret = armada_gem_linear_back(dev, obj);
if (ret) {
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
return ret;
}
ptr = armada_gem_map_object(dev, obj);
if (!ptr) {
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
return -ENOMEM;
}
@@ -67,7 +67,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
* A reference is now held by the framebuffer object if
* successful, otherwise this drops the ref for the error path.
*/
- drm_gem_object_put_unlocked(&obj->obj);
+ drm_gem_object_put(&obj->obj);
if (IS_ERR(dfb))
return PTR_ERR(dfb);
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 976685f2939e..8005614d2e6b 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -256,7 +256,7 @@ int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
- drm_gem_object_put_unlocked(&dobj->obj);
+ drm_gem_object_put(&dobj->obj);
return ret;
}
@@ -288,7 +288,7 @@ int armada_gem_create_ioctl(struct drm_device *dev, void *data,
/* drop reference from allocate - handle holds it now */
DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
err:
- drm_gem_object_put_unlocked(&dobj->obj);
+ drm_gem_object_put(&dobj->obj);
return ret;
}
@@ -305,13 +305,13 @@ int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
if (!dobj->obj.filp) {
- drm_gem_object_put_unlocked(&dobj->obj);
+ drm_gem_object_put(&dobj->obj);
return -EINVAL;
}
addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
MAP_SHARED, args->offset);
- drm_gem_object_put_unlocked(&dobj->obj);
+ drm_gem_object_put(&dobj->obj);
if (IS_ERR_VALUE(addr))
return addr;
@@ -366,7 +366,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
unref:
- drm_gem_object_put_unlocked(&dobj->obj);
+ drm_gem_object_put(&dobj->obj);
return ret;
}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
index a10358bb61ec..e7ca95827ae8 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx.h
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -5,6 +5,7 @@
#include <drm/drm_simple_kms_helper.h>
struct aspeed_gfx {
+ struct drm_device drm;
void __iomem *base;
struct clk *clk;
struct reset_control *rst;
@@ -12,8 +13,8 @@ struct aspeed_gfx {
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
- struct drm_fbdev_cma *fbdev;
};
+#define to_aspeed_gfx(x) container_of(x, struct aspeed_gfx, drm)
int aspeed_gfx_create_pipe(struct drm_device *drm);
int aspeed_gfx_create_output(struct drm_device *drm);
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
index 2184b8be6fd4..e54686c31a90 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -231,7 +231,7 @@ static const uint32_t aspeed_gfx_formats[] = {
int aspeed_gfx_create_pipe(struct drm_device *drm)
{
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
aspeed_gfx_formats,
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index ada2f6aca906..5e7ea0459d01 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -77,7 +77,7 @@ static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
{
struct drm_device *drm = data;
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
u32 reg;
reg = readl(priv->base + CRT_CTRL1);
@@ -96,15 +96,10 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
static int aspeed_gfx_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
- struct aspeed_gfx *priv;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct resource *res;
int ret;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- drm->dev_private = priv;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(drm->dev, res);
if (IS_ERR(priv->base))
@@ -187,15 +182,13 @@ static void aspeed_gfx_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_mode_config_cleanup(drm);
-
- drm->dev_private = NULL;
}
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver aspeed_gfx_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_create_object = drm_cma_gem_create_object_default_funcs,
+ .gem_create_object = drm_gem_cma_create_object_default_funcs,
.dumb_create = drm_gem_cma_dumb_create,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -216,27 +209,26 @@ static const struct of_device_id aspeed_gfx_match[] = {
static int aspeed_gfx_probe(struct platform_device *pdev)
{
- struct drm_device *drm;
+ struct aspeed_gfx *priv;
int ret;
- drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
+ priv = devm_drm_dev_alloc(&pdev->dev, &aspeed_gfx_driver,
+ struct aspeed_gfx, drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
- ret = aspeed_gfx_load(drm);
+ ret = aspeed_gfx_load(&priv->drm);
if (ret)
- goto err_free;
+ return ret;
- ret = drm_dev_register(drm, 0);
+ ret = drm_dev_register(&priv->drm, 0);
if (ret)
goto err_unload;
return 0;
err_unload:
- aspeed_gfx_unload(drm);
-err_free:
- drm_dev_put(drm);
+ aspeed_gfx_unload(&priv->drm);
return ret;
}
@@ -247,7 +239,6 @@ static int aspeed_gfx_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
aspeed_gfx_unload(drm);
- drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
index 67ee5fa10055..6759cb88415a 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -28,7 +28,7 @@ static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
int aspeed_gfx_create_output(struct drm_device *drm)
{
- struct aspeed_gfx *priv = drm->dev_private;
+ struct aspeed_gfx *priv = to_aspeed_gfx(drm);
int ret;
priv->connector.dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 30aa73a5d9b7..48a9cc4e080a 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_probe_helper.h>
@@ -90,15 +91,13 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ast_kick_out_firmware_fb(pdev);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
dev->pdev = pdev;
pci_set_drvdata(pdev, dev);
@@ -111,14 +110,14 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_ast_driver_unload;
+ drm_fbdev_generic_setup(dev, 32);
+
return 0;
err_ast_driver_unload:
ast_driver_unload(dev);
err_drm_dev_put:
drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
return ret;
}
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 18a0a4ce00f6..1b35728ad871 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
@@ -512,10 +511,6 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags)
drm_mode_config_reset(dev);
- ret = drm_fbdev_generic_setup(dev, 32);
- if (ret)
- goto out_free;
-
return 0;
out_free:
kfree(ast);
@@ -536,8 +531,5 @@ void ast_driver_unload(struct drm_device *dev)
drm_mode_config_cleanup(dev);
ast_mm_fini(ast);
- if (ast->ioregs != ast->regs + AST_IO_MM_OFFSET)
- pci_iounmap(dev->pdev, ast->ioregs);
- pci_iounmap(dev->pdev, ast->regs);
kfree(ast);
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index cdd6c46d6557..3a3a511670c9 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -226,6 +226,7 @@ static void ast_set_vbios_color_reg(struct ast_private *ast,
case 3:
case 4:
color_index = TrueCModeIndex;
+ break;
default:
return;
}
@@ -561,8 +562,9 @@ static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
return 0;
}
-void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void
+ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
struct ast_private *ast = plane->dev->dev_private;
struct drm_plane_state *state = plane->state;
@@ -801,6 +803,9 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
+ if (!state->enable)
+ return 0; /* no mode checks if CRTC is being disabled */
+
ast_state = to_ast_crtc_state(state);
format = ast_state->format;
@@ -881,6 +886,17 @@ static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.atomic_disable = ast_crtc_helper_atomic_disable,
};
+static void ast_crtc_reset(struct drm_crtc *crtc)
+{
+ struct ast_crtc_state *ast_state =
+ kzalloc(sizeof(*ast_state), GFP_KERNEL);
+
+ if (crtc->state)
+ crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ __drm_atomic_helper_crtc_reset(crtc, &ast_state->base);
+}
+
static void ast_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
@@ -919,8 +935,7 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc,
}
static const struct drm_crtc_funcs ast_crtc_funcs = {
- .reset = drm_atomic_helper_crtc_reset,
- .set_config = drm_crtc_helper_set_config,
+ .reset = ast_crtc_reset,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = ast_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
@@ -1069,7 +1084,6 @@ static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
ast_i2c_destroy(ast_connector->i2c);
- drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1112,8 +1126,6 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- drm_connector_register(connector);
-
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
encoder = list_first_entry(&dev->mode_config.encoder_list, struct drm_encoder, head);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 112aa5066cee..871293d1aeeb 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -821,16 +821,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
.irq_preinstall = atmel_hlcdc_dc_irq_uninstall,
.irq_postinstall = atmel_hlcdc_dc_irq_postinstall,
.irq_uninstall = atmel_hlcdc_dc_irq_uninstall,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
- .dumb_create = drm_gem_cma_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "atmel-hlcdc",
.desc = "Atmel HLCD Controller DRM",
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index e2019fe97fff..43bc709e3523 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -11,9 +11,10 @@
#include <linux/media-bus-format.h>
#include <linux/of_graph.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
-#include <drm/drm_bridge.h>
+#include <drm/drm_simple_kms_helper.h>
#include "atmel_hlcdc_dc.h"
@@ -22,10 +23,6 @@ struct atmel_hlcdc_rgb_output {
int bus_fmt;
};
-static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static struct atmel_hlcdc_rgb_output *
atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
{
@@ -98,9 +95,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
return -EINVAL;
}
- ret = drm_encoder_init(dev, &output->encoder,
- &atmel_hlcdc_panel_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, &output->encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 917767173ee6..e5bd1d517a18 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -92,7 +92,6 @@ void bochs_mm_fini(struct bochs_device *bochs);
/* bochs_kms.c */
int bochs_kms_init(struct bochs_device *bochs);
-void bochs_kms_fini(struct bochs_device *bochs);
/* bochs_fbdev.c */
extern const struct drm_mode_config_funcs bochs_mode_funcs;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index addb0568c1af..e18c51de1196 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_managed.h>
#include "bochs.h"
@@ -21,10 +22,7 @@ static void bochs_unload(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
- bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- kfree(bochs);
- dev->dev_private = NULL;
}
static int bochs_load(struct drm_device *dev)
@@ -32,7 +30,7 @@ static int bochs_load(struct drm_device *dev)
struct bochs_device *bochs;
int ret;
- bochs = kzalloc(sizeof(*bochs), GFP_KERNEL);
+ bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL);
if (bochs == NULL)
return -ENOMEM;
dev->dev_private = bochs;
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 8066d7d370d5..05d8373888e8 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -104,7 +104,6 @@ static void bochs_connector_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_VIRTUAL);
drm_connector_helper_add(connector,
&bochs_connector_connector_helper_funcs);
- drm_connector_register(connector);
bochs_hw_load_edid(bochs);
if (bochs->edid) {
@@ -134,7 +133,11 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
int bochs_kms_init(struct bochs_device *bochs)
{
- drm_mode_config_init(bochs->dev);
+ int ret;
+
+ ret = drmm_mode_config_init(bochs->dev);
+ if (ret)
+ return ret;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
@@ -160,12 +163,3 @@ int bochs_kms_init(struct bochs_device *bochs)
return 0;
}
-
-void bochs_kms_fini(struct bochs_device *bochs)
-{
- if (!bochs->dev->mode_config.num_connector)
- return;
-
- drm_atomic_helper_shutdown(bochs->dev);
- drm_mode_config_cleanup(bochs->dev);
-}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index aaed2347ace9..43271c21d3fc 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,6 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
+config DRM_CHRONTEL_CH7033
+ tristate "Chrontel CH7033 Video Encoder"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Enable support for the Chrontel CH7033 VGA/DVI/HDMI Encoder, as
+ found in the Dell Wyse 3020 thin client.
+
+ If in doubt, say "N".
+
config DRM_DISPLAY_CONNECTOR
tristate "Display connector support"
depends on OF
@@ -52,18 +62,34 @@ config DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL
- ---help---
+ help
This is a driver for the display bridges of
GE B850v3 that convert dual channel LVDS
to DP++. This is used with the i.MX6 imx-ldb
driver. You are likely to say N here.
+config DRM_NWL_MIPI_DSI
+ tristate "Northwest Logic MIPI DSI Host controller"
+ depends on DRM
+ depends on COMMON_CLK
+ depends on OF && HAS_IOMEM
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL_BRIDGE
+ select GENERIC_PHY_MIPI_DPHY
+ select MFD_SYSCON
+ select MULTIPLEXER
+ select REGMAP_MMIO
+ help
+ This enables the Northwest Logic MIPI DSI Host controller as
+ for example found on NXP's i.MX8 Processors.
+
config DRM_NXP_PTN3460
tristate "NXP PTN3460 DP/LVDS bridge"
depends on OF
select DRM_KMS_HELPER
select DRM_PANEL
- ---help---
+ help
NXP PTN3460 eDP-LVDS bridge chip driver.
config DRM_PARADE_PS8622
@@ -72,7 +98,7 @@ config DRM_PARADE_PS8622
select DRM_PANEL
select DRM_KMS_HELPER
select BACKLIGHT_CLASS_DEVICE
- ---help---
+ help
Parade eDP-LVDS bridge chip driver.
config DRM_PARADE_PS8640
@@ -102,13 +128,13 @@ config DRM_SII902X
select REGMAP_I2C
select I2C_MUX
select SND_SOC_HDMI_CODEC if SND_SOC
- ---help---
+ help
Silicon Image sii902x bridge chip driver.
config DRM_SII9234
tristate "Silicon Image SII9234 HDMI/MHL bridge"
depends on OF
- ---help---
+ help
Say Y here if you want support for the MHL interface.
It is an I2C driver, that detects connection of MHL bridge
and starts encapsulation of HDMI signal.
@@ -124,7 +150,7 @@ config DRM_SIMPLE_BRIDGE
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
- ---help---
+ help
Thine THC63LVD1024 LVDS/parallel converter driver.
config DRM_TOSHIBA_TC358764
@@ -142,7 +168,7 @@ config DRM_TOSHIBA_TC358767
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
- ---help---
+ help
Toshiba TC358767 eDP bridge chip driver.
config DRM_TOSHIBA_TC358768
@@ -159,7 +185,7 @@ config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
select DRM_KMS_HELPER
- ---help---
+ help
Texas Instruments TFP410 DVI/HDMI Transmitter driver
config DRM_TI_SN65DSI86
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 6fb062b5b0f0..d63d4b7e4347 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
+obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
@@ -18,6 +19,7 @@ obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
+obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 47d4eb9e845d..f46a5e26b5dd 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -6,7 +6,7 @@ config DRM_I2C_ADV7511
select REGMAP_I2C
select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
+ Support for the Analog Devices ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index a428185be2c1..f101dd2819b5 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -19,13 +19,15 @@ static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs,
{
switch (fs) {
case 32000:
- *n = 4096;
+ case 48000:
+ case 96000:
+ case 192000:
+ *n = fs * 128 / 1000;
break;
case 44100:
- *n = 6272;
- break;
- case 48000:
- *n = 6144;
+ case 88200:
+ case 176400:
+ *n = fs * 128 / 900;
break;
}
@@ -119,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
audio_source = ADV7511_AUDIO_SOURCE_I2S;
i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
break;
+ case HDMI_SPDIF:
+ audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
+ break;
default:
return -EINVAL;
}
@@ -175,11 +180,21 @@ static int audio_startup(struct device *dev, void *data)
/* use Audio infoframe updated info */
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
BIT(5), 0);
+ /* enable SPDIF receiver */
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), BIT(7));
+
return 0;
}
static void audio_shutdown(struct device *dev, void *data)
{
+ struct adv7511 *adv7511 = dev_get_drvdata(dev);
+
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+ BIT(7), 0);
}
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
@@ -213,6 +228,7 @@ static const struct hdmi_codec_pdata codec_data = {
.ops = &adv7511_codec_ops,
.max_i2s_channels = 2,
.i2s = 1,
+ .spdif = 1,
};
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 2bc6e4f85171..9af39ec958db 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -485,6 +485,9 @@ static int anx6345_get_modes(struct drm_connector *connector)
num_modes += drm_add_edid_modes(connector, anx6345->edid);
+ /* Driver currently supports only 6bpc */
+ connector->display_info.bpc = 6;
+
unlock:
if (power_off)
anx6345_poweroff(anx6345);
diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c
new file mode 100644
index 000000000000..f8675d82974b
--- /dev/null
+++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Chrontel CH7033 Video Encoder Driver
+ *
+ * Copyright (C) 2019,2020 Lubomir Rintel
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_of.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+/* Page 0, Register 0x07 */
+enum {
+ DRI_PD = BIT(3),
+ IO_PD = BIT(5),
+};
+
+/* Page 0, Register 0x08 */
+enum {
+ DRI_PDDRI = GENMASK(7, 4),
+ PDDAC = GENMASK(3, 1),
+ PANEN = BIT(0),
+};
+
+/* Page 0, Register 0x09 */
+enum {
+ DPD = BIT(7),
+ GCKOFF = BIT(6),
+ TV_BP = BIT(5),
+ SCLPD = BIT(4),
+ SDPD = BIT(3),
+ VGA_PD = BIT(2),
+ HDBKPD = BIT(1),
+ HDMI_PD = BIT(0),
+};
+
+/* Page 0, Register 0x0a */
+enum {
+ MEMINIT = BIT(7),
+ MEMIDLE = BIT(6),
+ MEMPD = BIT(5),
+ STOP = BIT(4),
+ LVDS_PD = BIT(3),
+ HD_DVIB = BIT(2),
+ HDCP_PD = BIT(1),
+ MCU_PD = BIT(0),
+};
+
+/* Page 0, Register 0x18 */
+enum {
+ IDF = GENMASK(7, 4),
+ INTEN = BIT(3),
+ SWAP = GENMASK(2, 0),
+};
+
+enum {
+ BYTE_SWAP_RGB = 0,
+ BYTE_SWAP_RBG = 1,
+ BYTE_SWAP_GRB = 2,
+ BYTE_SWAP_GBR = 3,
+ BYTE_SWAP_BRG = 4,
+ BYTE_SWAP_BGR = 5,
+};
+
+/* Page 0, Register 0x19 */
+enum {
+ HPO_I = BIT(5),
+ VPO_I = BIT(4),
+ DEPO_I = BIT(3),
+ CRYS_EN = BIT(2),
+ GCLKFREQ = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2e */
+enum {
+ HFLIP = BIT(7),
+ VFLIP = BIT(6),
+ DEPO_O = BIT(5),
+ HPO_O = BIT(4),
+ VPO_O = BIT(3),
+ TE = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x2b */
+enum {
+ SWAPS = GENMASK(7, 4),
+ VFMT = GENMASK(3, 0),
+};
+
+/* Page 0, Register 0x54 */
+enum {
+ COMP_BP = BIT(7),
+ DAC_EN_T = BIT(6),
+ HWO_HDMI_HI = GENMASK(5, 3),
+ HOO_HDMI_HI = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x57 */
+enum {
+ FLDSEN = BIT(7),
+ VWO_HDMI_HI = GENMASK(5, 3),
+ VOO_HDMI_HI = GENMASK(2, 0),
+};
+
+/* Page 0, Register 0x7e */
+enum {
+ HDMI_LVDS_SEL = BIT(7),
+ DE_GEN = BIT(6),
+ PWM_INDEX_HI = BIT(5),
+ USE_DE = BIT(4),
+ R_INT = GENMASK(3, 0),
+};
+
+/* Page 1, Register 0x07 */
+enum {
+ BPCKSEL = BIT(7),
+ DRI_CMFB_EN = BIT(6),
+ CEC_PUEN = BIT(5),
+ CEC_T = BIT(3),
+ CKINV = BIT(2),
+ CK_TVINV = BIT(1),
+ DRI_CKS2 = BIT(0),
+};
+
+/* Page 1, Register 0x08 */
+enum {
+ DACG = BIT(6),
+ DACKTST = BIT(5),
+ DEDGEB = BIT(4),
+ SYO = BIT(3),
+ DRI_IT_LVDS = GENMASK(2, 1),
+ DISPON = BIT(0),
+};
+
+/* Page 1, Register 0x0c */
+enum {
+ DRI_PLL_CP = GENMASK(7, 6),
+ DRI_PLL_DIVSEL = BIT(5),
+ DRI_PLL_N1_1 = BIT(4),
+ DRI_PLL_N1_0 = BIT(3),
+ DRI_PLL_N3_1 = BIT(2),
+ DRI_PLL_N3_0 = BIT(1),
+ DRI_PLL_CKTSTEN = BIT(0),
+};
+
+/* Page 1, Register 0x6b */
+enum {
+ VCO3CS = GENMASK(7, 6),
+ ICPGBK2_0 = GENMASK(5, 3),
+ DRI_VCO357SC = BIT(2),
+ PDPLL2 = BIT(1),
+ DRI_PD_SER = BIT(0),
+};
+
+/* Page 1, Register 0x6c */
+enum {
+ PLL2N11 = GENMASK(7, 4),
+ PLL2N5_4 = BIT(3),
+ PLL2N5_TOP = BIT(2),
+ DRI_PLL_PD = BIT(1),
+ PD_I2CM = BIT(0),
+};
+
+/* Page 3, Register 0x28 */
+enum {
+ DIFF_EN = GENMASK(7, 6),
+ CORREC_EN = GENMASK(5, 4),
+ VGACLK_BP = BIT(3),
+ HM_LV_SEL = BIT(2),
+ HD_VGA_SEL = BIT(1),
+};
+
+/* Page 3, Register 0x2a */
+enum {
+ LVDSCLK_BP = BIT(7),
+ HDTVCLK_BP = BIT(6),
+ HDMICLK_BP = BIT(5),
+ HDTV_BP = BIT(4),
+ HDMI_BP = BIT(3),
+ THRWL = GENMASK(2, 0),
+};
+
+/* Page 4, Register 0x52 */
+enum {
+ PGM_ARSTB = BIT(7),
+ MCU_ARSTB = BIT(6),
+ MCU_RETB = BIT(2),
+ RESETIB = BIT(1),
+ RESETDB = BIT(0),
+};
+
+struct ch7033_priv {
+ struct regmap *regmap;
+ struct drm_bridge *next_bridge;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+};
+
+#define conn_to_ch7033_priv(x) \
+ container_of(x, struct ch7033_priv, connector)
+#define bridge_to_ch7033_priv(x) \
+ container_of(x, struct ch7033_priv, bridge)
+
+
+static enum drm_connector_status ch7033_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+ return drm_bridge_detect(priv->next_bridge);
+}
+
+static const struct drm_connector_funcs ch7033_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ch7033_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int ch7033_connector_get_modes(struct drm_connector *connector)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+ struct edid *edid;
+ int ret;
+
+ edid = drm_bridge_get_edid(priv->next_bridge, connector);
+ drm_connector_update_edid_property(connector, edid);
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ } else {
+ ret = drm_add_modes_noedid(connector, 1920, 1080);
+ drm_set_preferred_mode(connector, 1024, 768);
+ }
+
+ return ret;
+}
+
+static struct drm_encoder *ch7033_connector_best_encoder(
+ struct drm_connector *connector)
+{
+ struct ch7033_priv *priv = conn_to_ch7033_priv(connector);
+
+ return priv->bridge.encoder;
+}
+
+static const struct drm_connector_helper_funcs ch7033_connector_helper_funcs = {
+ .get_modes = ch7033_connector_get_modes,
+ .best_encoder = ch7033_connector_best_encoder,
+};
+
+static void ch7033_hpd_event(void *arg, enum drm_connector_status status)
+{
+ struct ch7033_priv *priv = arg;
+
+ if (priv->bridge.dev)
+ drm_helper_hpd_irq_event(priv->connector.dev);
+}
+
+static int ch7033_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+ struct drm_connector *connector = &priv->connector;
+ int ret;
+
+ ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_DETECT) {
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_enable(priv->next_bridge, ch7033_hpd_event,
+ priv);
+ }
+
+ drm_connector_helper_add(connector,
+ &ch7033_connector_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &priv->connector,
+ &ch7033_connector_funcs,
+ priv->next_bridge->type,
+ priv->next_bridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ return drm_connector_attach_encoder(&priv->connector, bridge->encoder);
+}
+
+static void ch7033_bridge_detach(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ if (priv->next_bridge->ops & DRM_BRIDGE_OP_HPD)
+ drm_bridge_hpd_disable(priv->next_bridge);
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status ch7033_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
+ if (mode->hdisplay >= 1920)
+ return MODE_BAD_HVALUE;
+ if (mode->vdisplay >= 1080)
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void ch7033_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ regmap_update_bits(priv->regmap, 0x52, RESETDB, 0x00);
+}
+
+static void ch7033_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ regmap_update_bits(priv->regmap, 0x52, RESETDB, RESETDB);
+}
+
+static void ch7033_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge);
+ int hbporch = mode->hsync_start - mode->hdisplay;
+ int hsynclen = mode->hsync_end - mode->hsync_start;
+ int vbporch = mode->vsync_start - mode->vdisplay;
+ int vsynclen = mode->vsync_end - mode->vsync_start;
+
+ /*
+ * Page 4
+ */
+ regmap_write(priv->regmap, 0x03, 0x04);
+
+ /* Turn everything off to set all the registers to their defaults. */
+ regmap_write(priv->regmap, 0x52, 0x00);
+ /* Bring I/O block up. */
+ regmap_write(priv->regmap, 0x52, RESETIB);
+
+ /*
+ * Page 0
+ */
+ regmap_write(priv->regmap, 0x03, 0x00);
+
+ /* Bring up parts we need from the power down. */
+ regmap_update_bits(priv->regmap, 0x07, DRI_PD | IO_PD, 0);
+ regmap_update_bits(priv->regmap, 0x08, DRI_PDDRI | PDDAC | PANEN, 0);
+ regmap_update_bits(priv->regmap, 0x09, DPD | GCKOFF |
+ HDMI_PD | VGA_PD, 0);
+ regmap_update_bits(priv->regmap, 0x0a, HD_DVIB, 0);
+
+ /* Horizontal input timing. */
+ regmap_write(priv->regmap, 0x0b, (mode->htotal >> 8) << 3 |
+ (mode->hdisplay >> 8));
+ regmap_write(priv->regmap, 0x0c, mode->hdisplay);
+ regmap_write(priv->regmap, 0x0d, mode->htotal);
+ regmap_write(priv->regmap, 0x0e, (hsynclen >> 8) << 3 |
+ (hbporch >> 8));
+ regmap_write(priv->regmap, 0x0f, hbporch);
+ regmap_write(priv->regmap, 0x10, hsynclen);
+
+ /* Vertical input timing. */
+ regmap_write(priv->regmap, 0x11, (mode->vtotal >> 8) << 3 |
+ (mode->vdisplay >> 8));
+ regmap_write(priv->regmap, 0x12, mode->vdisplay);
+ regmap_write(priv->regmap, 0x13, mode->vtotal);
+ regmap_write(priv->regmap, 0x14, ((vsynclen >> 8) << 3) |
+ (vbporch >> 8));
+ regmap_write(priv->regmap, 0x15, vbporch);
+ regmap_write(priv->regmap, 0x16, vsynclen);
+
+ /* Input color swap. */
+ regmap_update_bits(priv->regmap, 0x18, SWAP, BYTE_SWAP_BGR);
+
+ /* Input clock and sync polarity. */
+ regmap_update_bits(priv->regmap, 0x19, 0x1, mode->clock >> 16);
+ regmap_update_bits(priv->regmap, 0x19, HPO_I | VPO_I | GCLKFREQ,
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_I : 0 |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_I : 0 |
+ mode->clock >> 16);
+ regmap_write(priv->regmap, 0x1a, mode->clock >> 8);
+ regmap_write(priv->regmap, 0x1b, mode->clock);
+
+ /* Horizontal output timing. */
+ regmap_write(priv->regmap, 0x1f, (mode->htotal >> 8) << 3 |
+ (mode->hdisplay >> 8));
+ regmap_write(priv->regmap, 0x20, mode->hdisplay);
+ regmap_write(priv->regmap, 0x21, mode->htotal);
+
+ /* Vertical output timing. */
+ regmap_write(priv->regmap, 0x25, (mode->vtotal >> 8) << 3 |
+ (mode->vdisplay >> 8));
+ regmap_write(priv->regmap, 0x26, mode->vdisplay);
+ regmap_write(priv->regmap, 0x27, mode->vtotal);
+
+ /* VGA channel bypass */
+ regmap_update_bits(priv->regmap, 0x2b, VFMT, 9);
+
+ /* Output sync polarity. */
+ regmap_update_bits(priv->regmap, 0x2e, HPO_O | VPO_O,
+ (mode->flags & DRM_MODE_FLAG_PHSYNC) ? HPO_O : 0 |
+ (mode->flags & DRM_MODE_FLAG_PVSYNC) ? VPO_O : 0);
+
+ /* HDMI horizontal output timing. */
+ regmap_update_bits(priv->regmap, 0x54, HWO_HDMI_HI | HOO_HDMI_HI,
+ (hsynclen >> 8) << 3 |
+ (hbporch >> 8));
+ regmap_write(priv->regmap, 0x55, hbporch);
+ regmap_write(priv->regmap, 0x56, hsynclen);
+
+ /* HDMI vertical output timing. */
+ regmap_update_bits(priv->regmap, 0x57, VWO_HDMI_HI | VOO_HDMI_HI,
+ (vsynclen >> 8) << 3 |
+ (vbporch >> 8));
+ regmap_write(priv->regmap, 0x58, vbporch);
+ regmap_write(priv->regmap, 0x59, vsynclen);
+
+ /* Pick HDMI, not LVDS. */
+ regmap_update_bits(priv->regmap, 0x7e, HDMI_LVDS_SEL, HDMI_LVDS_SEL);
+
+ /*
+ * Page 1
+ */
+ regmap_write(priv->regmap, 0x03, 0x01);
+
+ /* No idea what these do, but VGA is wobbly and blinky without them. */
+ regmap_update_bits(priv->regmap, 0x07, CKINV, CKINV);
+ regmap_update_bits(priv->regmap, 0x08, DISPON, DISPON);
+
+ /* DRI PLL */
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_DIVSEL, DRI_PLL_DIVSEL);
+ if (mode->clock <= 40000) {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ 0);
+ } else if (mode->clock < 80000) {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ DRI_PLL_N3_0 |
+ DRI_PLL_N1_0);
+ } else {
+ regmap_update_bits(priv->regmap, 0x0c, DRI_PLL_N1_1 |
+ DRI_PLL_N1_0 |
+ DRI_PLL_N3_1 |
+ DRI_PLL_N3_0,
+ DRI_PLL_N3_1 |
+ DRI_PLL_N1_1);
+ }
+
+ /* This seems to be color calibration for VGA. */
+ regmap_write(priv->regmap, 0x64, 0x29); /* LSB Blue */
+ regmap_write(priv->regmap, 0x65, 0x29); /* LSB Green */
+ regmap_write(priv->regmap, 0x66, 0x29); /* LSB Red */
+ regmap_write(priv->regmap, 0x67, 0x00); /* MSB Blue */
+ regmap_write(priv->regmap, 0x68, 0x00); /* MSB Green */
+ regmap_write(priv->regmap, 0x69, 0x00); /* MSB Red */
+
+ regmap_update_bits(priv->regmap, 0x6b, DRI_PD_SER, 0x00);
+ regmap_update_bits(priv->regmap, 0x6c, DRI_PLL_PD, 0x00);
+
+ /*
+ * Page 3
+ */
+ regmap_write(priv->regmap, 0x03, 0x03);
+
+ /* More bypasses and apparently another HDMI/LVDS selector. */
+ regmap_update_bits(priv->regmap, 0x28, VGACLK_BP | HM_LV_SEL,
+ VGACLK_BP | HM_LV_SEL);
+ regmap_update_bits(priv->regmap, 0x2a, HDMICLK_BP | HDMI_BP,
+ HDMICLK_BP | HDMI_BP);
+
+ /*
+ * Page 4
+ */
+ regmap_write(priv->regmap, 0x03, 0x04);
+
+ /* Output clock. */
+ regmap_write(priv->regmap, 0x10, mode->clock >> 16);
+ regmap_write(priv->regmap, 0x11, mode->clock >> 8);
+ regmap_write(priv->regmap, 0x12, mode->clock);
+}
+
+static const struct drm_bridge_funcs ch7033_bridge_funcs = {
+ .attach = ch7033_bridge_attach,
+ .detach = ch7033_bridge_detach,
+ .mode_valid = ch7033_bridge_mode_valid,
+ .disable = ch7033_bridge_disable,
+ .enable = ch7033_bridge_enable,
+ .mode_set = ch7033_bridge_mode_set,
+};
+
+static const struct regmap_config ch7033_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x7f,
+};
+
+static int ch7033_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ch7033_priv *priv;
+ unsigned int val;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 1, -1, NULL,
+ &priv->next_bridge);
+ if (ret)
+ return ret;
+
+ priv->regmap = devm_regmap_init_i2c(client, &ch7033_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(&client->dev, "regmap init failed\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ ret = regmap_read(priv->regmap, 0x00, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading the model id: %d\n", ret);
+ return ret;
+ }
+ if ((val & 0xf7) != 0x56) {
+ dev_err(&client->dev, "the device is not a ch7033\n");
+ return -ENODEV;
+ }
+
+ regmap_write(priv->regmap, 0x03, 0x04);
+ ret = regmap_read(priv->regmap, 0x51, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "error reading the model id: %d\n", ret);
+ return ret;
+ }
+ if ((val & 0x0f) != 3) {
+ dev_err(&client->dev, "unknown revision %u\n", val);
+ return -ENODEV;
+ }
+
+ INIT_LIST_HEAD(&priv->bridge.list);
+ priv->bridge.funcs = &ch7033_bridge_funcs;
+ priv->bridge.of_node = dev->of_node;
+ drm_bridge_add(&priv->bridge);
+
+ dev_info(dev, "Chrontel CH7033 Video Encoder\n");
+ return 0;
+}
+
+static int ch7033_remove(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct ch7033_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ch7033_dt_ids[] = {
+ { .compatible = "chrontel,ch7033", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ch7033_dt_ids);
+
+static const struct i2c_device_id ch7033_ids[] = {
+ { "ch7033", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ch7033_ids);
+
+static struct i2c_driver ch7033_driver = {
+ .probe = ch7033_probe,
+ .remove = ch7033_remove,
+ .driver = {
+ .name = "ch7033",
+ .of_match_table = of_match_ptr(ch7033_dt_ids),
+ },
+ .id_table = ch7033_ids,
+};
+
+module_i2c_driver(ch7033_driver);
+
+MODULE_AUTHOR("Lubomir Rintel <lkundrak@v3.sk>");
+MODULE_DESCRIPTION("Chrontel CH7033 Video Encoder Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
new file mode 100644
index 000000000000..b14d725bf609
--- /dev/null
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -0,0 +1,1213 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * i.MX8 NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2020 Purism SPC
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/irq.h>
+#include <linux/math64.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mux/consumer.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+#include <linux/time64.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <video/mipi_display.h>
+
+#include "nwl-dsi.h"
+
+#define DRV_NAME "nwl-dsi"
+
+/* i.MX8 NWL quirks */
+/* i.MX8MQ errata E11418 */
+#define E11418_HS_MODE_QUIRK BIT(0)
+
+#define NWL_DSI_MIPI_FIFO_TIMEOUT msecs_to_jiffies(500)
+
+enum transfer_direction {
+ DSI_PACKET_SEND,
+ DSI_PACKET_RECEIVE,
+};
+
+#define NWL_DSI_ENDPOINT_LCDIF 0
+#define NWL_DSI_ENDPOINT_DCSS 1
+
+struct nwl_dsi_plat_clk_config {
+ const char *id;
+ struct clk *clk;
+ bool present;
+};
+
+struct nwl_dsi_transfer {
+ const struct mipi_dsi_msg *msg;
+ struct mipi_dsi_packet packet;
+ struct completion completed;
+
+ int status; /* status of transmission */
+ enum transfer_direction direction;
+ bool need_bta;
+ u8 cmd;
+ u16 rx_word_count;
+ size_t tx_len; /* in bytes */
+ size_t rx_len; /* in bytes */
+};
+
+struct nwl_dsi {
+ struct drm_bridge bridge;
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge *panel_bridge;
+ struct device *dev;
+ struct phy *phy;
+ union phy_configure_opts phy_cfg;
+ unsigned int quirks;
+
+ struct regmap *regmap;
+ int irq;
+ /*
+ * The DSI host controller needs this reset sequence according to NWL:
+ * 1. Deassert pclk reset to get access to DSI regs
+ * 2. Configure DSI Host and DPHY and enable DPHY
+ * 3. Deassert ESC and BYTE resets to allow host TX operations)
+ * 4. Send DSI cmds to configure peripheral (handled by panel drv)
+ * 5. Deassert DPI reset so DPI receives pixels and starts sending
+ * DSI data
+ *
+ * TODO: Since panel_bridges do their DSI setup in enable we
+ * currently have 4. and 5. swapped.
+ */
+ struct reset_control *rst_byte;
+ struct reset_control *rst_esc;
+ struct reset_control *rst_dpi;
+ struct reset_control *rst_pclk;
+ struct mux_control *mux;
+
+ /* DSI clocks */
+ struct clk *phy_ref_clk;
+ struct clk *rx_esc_clk;
+ struct clk *tx_esc_clk;
+ struct clk *core_clk;
+ /*
+ * hardware bug: the i.MX8MQ needs this clock on during reset
+ * even when not using LCDIF.
+ */
+ struct clk *lcdif_clk;
+
+ /* dsi lanes */
+ u32 lanes;
+ enum mipi_dsi_pixel_format format;
+ struct drm_display_mode mode;
+ unsigned long dsi_mode_flags;
+ int error;
+
+ struct nwl_dsi_transfer *xfer;
+};
+
+static const struct regmap_config nwl_dsi_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = NWL_DSI_IRQ_MASK2,
+ .name = DRV_NAME,
+};
+
+static inline struct nwl_dsi *bridge_to_dsi(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct nwl_dsi, bridge);
+}
+
+static int nwl_dsi_clear_error(struct nwl_dsi *dsi)
+{
+ int ret = dsi->error;
+
+ dsi->error = 0;
+ return ret;
+}
+
+static void nwl_dsi_write(struct nwl_dsi *dsi, unsigned int reg, u32 val)
+{
+ int ret;
+
+ if (dsi->error)
+ return;
+
+ ret = regmap_write(dsi->regmap, reg, val);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev,
+ "Failed to write NWL DSI reg 0x%x: %d\n", reg,
+ ret);
+ dsi->error = ret;
+ }
+}
+
+static u32 nwl_dsi_read(struct nwl_dsi *dsi, u32 reg)
+{
+ unsigned int val;
+ int ret;
+
+ if (dsi->error)
+ return 0;
+
+ ret = regmap_read(dsi->regmap, reg, &val);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to read NWL DSI reg 0x%x: %d\n",
+ reg, ret);
+ dsi->error = ret;
+ }
+ return val;
+}
+
+static int nwl_dsi_get_dpi_pixel_format(enum mipi_dsi_pixel_format format)
+{
+ switch (format) {
+ case MIPI_DSI_FMT_RGB565:
+ return NWL_DSI_PIXEL_FORMAT_16;
+ case MIPI_DSI_FMT_RGB666:
+ return NWL_DSI_PIXEL_FORMAT_18L;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ return NWL_DSI_PIXEL_FORMAT_18;
+ case MIPI_DSI_FMT_RGB888:
+ return NWL_DSI_PIXEL_FORMAT_24;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * ps2bc - Picoseconds to byte clock cycles
+ */
+static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
+{
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ return DIV64_U64_ROUND_UP(ps * dsi->mode.clock * bpp,
+ dsi->lanes * 8 * NSEC_PER_SEC);
+}
+
+/*
+ * ui2bc - UI time periods to byte clock cycles
+ */
+static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+{
+ u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ return DIV64_U64_ROUND_UP(ui * dsi->lanes,
+ dsi->mode.clock * 1000 * bpp);
+}
+
+/*
+ * us2bc - micro seconds to lp clock cycles
+ */
+static u32 us2lp(u32 lp_clk_rate, unsigned long us)
+{
+ return DIV_ROUND_UP(us * lp_clk_rate, USEC_PER_SEC);
+}
+
+static int nwl_dsi_config_host(struct nwl_dsi *dsi)
+{
+ u32 cycles;
+ struct phy_configure_opts_mipi_dphy *cfg = &dsi->phy_cfg.mipi_dphy;
+
+ if (dsi->lanes < 1 || dsi->lanes > 4)
+ return -EINVAL;
+
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "DSI Lanes %d\n", dsi->lanes);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NUM_LANES, dsi->lanes - 1);
+
+ if (dsi->dsi_mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x01);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x01);
+ } else {
+ nwl_dsi_write(dsi, NWL_DSI_CFG_NONCONTINUOUS_CLK, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_AUTOINSERT_EOTP, 0x00);
+ }
+
+ /* values in byte clock cycles */
+ cycles = ui2bc(dsi, cfg->clk_pre);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
+ cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
+ cycles += ui2bc(dsi, cfg->clk_pre);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
+ cycles = ps2bc(dsi, cfg->hs_exit);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_TX_GAP, cycles);
+
+ nwl_dsi_write(dsi, NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP, 0x01);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_HTX_TO_COUNT, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_LRX_H_TO_COUNT, 0x00);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_BTA_H_TO_COUNT, 0x00);
+ /* In LP clock cycles */
+ cycles = us2lp(cfg->lp_clk_rate, cfg->wakeup);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_twakeup: 0x%x\n", cycles);
+ nwl_dsi_write(dsi, NWL_DSI_CFG_TWAKEUP, cycles);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_config_dpi(struct nwl_dsi *dsi)
+{
+ u32 mode;
+ int color_format;
+ bool burst_mode;
+ int hfront_porch, hback_porch, vfront_porch, vback_porch;
+ int hsync_len, vsync_len;
+
+ hfront_porch = dsi->mode.hsync_start - dsi->mode.hdisplay;
+ hsync_len = dsi->mode.hsync_end - dsi->mode.hsync_start;
+ hback_porch = dsi->mode.htotal - dsi->mode.hsync_end;
+
+ vfront_porch = dsi->mode.vsync_start - dsi->mode.vdisplay;
+ vsync_len = dsi->mode.vsync_end - dsi->mode.vsync_start;
+ vback_porch = dsi->mode.vtotal - dsi->mode.vsync_end;
+
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hfront_porch = %d\n", hfront_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hback_porch = %d\n", hback_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hsync_len = %d\n", hsync_len);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "hdisplay = %d\n", dsi->mode.hdisplay);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vfront_porch = %d\n", vfront_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vback_porch = %d\n", vback_porch);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vsync_len = %d\n", vsync_len);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "vactive = %d\n", dsi->mode.vdisplay);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "clock = %d kHz\n", dsi->mode.clock);
+
+ color_format = nwl_dsi_get_dpi_pixel_format(dsi->format);
+ if (color_format < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Invalid color format 0x%x\n",
+ dsi->format);
+ return color_format;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "pixel fmt = %d\n", dsi->format);
+
+ nwl_dsi_write(dsi, NWL_DSI_INTERFACE_COLOR_CODING, NWL_DSI_DPI_24_BIT);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FORMAT, color_format);
+ /*
+ * Adjusting input polarity based on the video mode results in
+ * a black screen so always pick active low:
+ */
+ nwl_dsi_write(dsi, NWL_DSI_VSYNC_POLARITY,
+ NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW);
+ nwl_dsi_write(dsi, NWL_DSI_HSYNC_POLARITY,
+ NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW);
+
+ burst_mode = (dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
+ !(dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE);
+
+ if (burst_mode) {
+ nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, NWL_DSI_VM_BURST_MODE);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL, 256);
+ } else {
+ mode = ((dsi->dsi_mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ?
+ NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES :
+ NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS);
+ nwl_dsi_write(dsi, NWL_DSI_VIDEO_MODE, mode);
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_FIFO_SEND_LEVEL,
+ dsi->mode.hdisplay);
+ }
+
+ nwl_dsi_write(dsi, NWL_DSI_HFP, hfront_porch);
+ nwl_dsi_write(dsi, NWL_DSI_HBP, hback_porch);
+ nwl_dsi_write(dsi, NWL_DSI_HSA, hsync_len);
+
+ nwl_dsi_write(dsi, NWL_DSI_ENABLE_MULT_PKTS, 0x0);
+ nwl_dsi_write(dsi, NWL_DSI_BLLP_MODE, 0x1);
+ nwl_dsi_write(dsi, NWL_DSI_USE_NULL_PKT_BLLP, 0x0);
+ nwl_dsi_write(dsi, NWL_DSI_VC, 0x0);
+
+ nwl_dsi_write(dsi, NWL_DSI_PIXEL_PAYLOAD_SIZE, dsi->mode.hdisplay);
+ nwl_dsi_write(dsi, NWL_DSI_VACTIVE, dsi->mode.vdisplay - 1);
+ nwl_dsi_write(dsi, NWL_DSI_VBP, vback_porch);
+ nwl_dsi_write(dsi, NWL_DSI_VFP, vfront_porch);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_init_interrupts(struct nwl_dsi *dsi)
+{
+ u32 irq_enable;
+
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, 0xffffffff);
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK2, 0x7);
+
+ irq_enable = ~(u32)(NWL_DSI_TX_PKT_DONE_MASK |
+ NWL_DSI_RX_PKT_HDR_RCVD_MASK |
+ NWL_DSI_TX_FIFO_OVFLW_MASK |
+ NWL_DSI_HS_TX_TIMEOUT_MASK);
+
+ nwl_dsi_write(dsi, NWL_DSI_IRQ_MASK, irq_enable);
+
+ return nwl_dsi_clear_error(dsi);
+}
+
+static int nwl_dsi_host_attach(struct mipi_dsi_host *dsi_host,
+ struct mipi_dsi_device *device)
+{
+ struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+ struct device *dev = dsi->dev;
+
+ DRM_DEV_INFO(dev, "lanes=%u, format=0x%x flags=0x%lx\n", device->lanes,
+ device->format, device->mode_flags);
+
+ if (device->lanes < 1 || device->lanes > 4)
+ return -EINVAL;
+
+ dsi->lanes = device->lanes;
+ dsi->format = device->format;
+ dsi->dsi_mode_flags = device->mode_flags;
+
+ return 0;
+}
+
+static bool nwl_dsi_read_packet(struct nwl_dsi *dsi, u32 status)
+{
+ struct device *dev = dsi->dev;
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ int err;
+ u8 *payload = xfer->msg->rx_buf;
+ u32 val;
+ u16 word_count;
+ u8 channel;
+ u8 data_type;
+
+ xfer->status = 0;
+
+ if (xfer->rx_word_count == 0) {
+ if (!(status & NWL_DSI_RX_PKT_HDR_RCVD))
+ return false;
+ /* Get the RX header and parse it */
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PKT_HEADER);
+ err = nwl_dsi_clear_error(dsi);
+ if (err)
+ xfer->status = err;
+ word_count = NWL_DSI_WC(val);
+ channel = NWL_DSI_RX_VC(val);
+ data_type = NWL_DSI_RX_DT(val);
+
+ if (channel != xfer->msg->channel) {
+ DRM_DEV_ERROR(dev,
+ "[%02X] Channel mismatch (%u != %u)\n",
+ xfer->cmd, channel, xfer->msg->channel);
+ xfer->status = -EINVAL;
+ return true;
+ }
+
+ switch (data_type) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ fallthrough;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ if (xfer->msg->rx_len > 1) {
+ /* read second byte */
+ payload[1] = word_count >> 8;
+ ++xfer->rx_len;
+ }
+ fallthrough;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ fallthrough;
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ if (xfer->msg->rx_len > 0) {
+ /* read first byte */
+ payload[0] = word_count & 0xff;
+ ++xfer->rx_len;
+ }
+ xfer->status = xfer->rx_len;
+ return true;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ word_count &= 0xff;
+ DRM_DEV_ERROR(dev, "[%02X] DSI error report: 0x%02x\n",
+ xfer->cmd, word_count);
+ xfer->status = -EPROTO;
+ return true;
+ }
+
+ if (word_count > xfer->msg->rx_len) {
+ DRM_DEV_ERROR(dev,
+ "[%02X] Receive buffer too small: %zu (< %u)\n",
+ xfer->cmd, xfer->msg->rx_len, word_count);
+ xfer->status = -EINVAL;
+ return true;
+ }
+
+ xfer->rx_word_count = word_count;
+ } else {
+ /* Set word_count from previous header read */
+ word_count = xfer->rx_word_count;
+ }
+
+ /* If RX payload is not yet received, wait for it */
+ if (!(status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD))
+ return false;
+
+ /* Read the RX payload */
+ while (word_count >= 4) {
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+ payload[0] = (val >> 0) & 0xff;
+ payload[1] = (val >> 8) & 0xff;
+ payload[2] = (val >> 16) & 0xff;
+ payload[3] = (val >> 24) & 0xff;
+ payload += 4;
+ xfer->rx_len += 4;
+ word_count -= 4;
+ }
+
+ if (word_count > 0) {
+ val = nwl_dsi_read(dsi, NWL_DSI_RX_PAYLOAD);
+ switch (word_count) {
+ case 3:
+ payload[2] = (val >> 16) & 0xff;
+ ++xfer->rx_len;
+ fallthrough;
+ case 2:
+ payload[1] = (val >> 8) & 0xff;
+ ++xfer->rx_len;
+ fallthrough;
+ case 1:
+ payload[0] = (val >> 0) & 0xff;
+ ++xfer->rx_len;
+ break;
+ }
+ }
+
+ xfer->status = xfer->rx_len;
+ err = nwl_dsi_clear_error(dsi);
+ if (err)
+ xfer->status = err;
+
+ return true;
+}
+
+static void nwl_dsi_finish_transmission(struct nwl_dsi *dsi, u32 status)
+{
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ bool end_packet = false;
+
+ if (!xfer)
+ return;
+
+ if (xfer->direction == DSI_PACKET_SEND &&
+ status & NWL_DSI_TX_PKT_DONE) {
+ xfer->status = xfer->tx_len;
+ end_packet = true;
+ } else if (status & NWL_DSI_DPHY_DIRECTION &&
+ ((status & (NWL_DSI_RX_PKT_HDR_RCVD |
+ NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)))) {
+ end_packet = nwl_dsi_read_packet(dsi, status);
+ }
+
+ if (end_packet)
+ complete(&xfer->completed);
+}
+
+static void nwl_dsi_begin_transmission(struct nwl_dsi *dsi)
+{
+ struct nwl_dsi_transfer *xfer = dsi->xfer;
+ struct mipi_dsi_packet *pkt = &xfer->packet;
+ const u8 *payload;
+ size_t length;
+ u16 word_count;
+ u8 hs_mode;
+ u32 val;
+ u32 hs_workaround = 0;
+
+ /* Send the payload, if any */
+ length = pkt->payload_length;
+ payload = pkt->payload;
+
+ while (length >= 4) {
+ val = *(u32 *)payload;
+ hs_workaround |= !(val & 0xFFFF00);
+ nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+ payload += 4;
+ length -= 4;
+ }
+ /* Send the rest of the payload */
+ val = 0;
+ switch (length) {
+ case 3:
+ val |= payload[2] << 16;
+ fallthrough;
+ case 2:
+ val |= payload[1] << 8;
+ hs_workaround |= !(val & 0xFFFF00);
+ fallthrough;
+ case 1:
+ val |= payload[0];
+ nwl_dsi_write(dsi, NWL_DSI_TX_PAYLOAD, val);
+ break;
+ }
+ xfer->tx_len = pkt->payload_length;
+
+ /*
+ * Send the header
+ * header[0] = Virtual Channel + Data Type
+ * header[1] = Word Count LSB (LP) or first param (SP)
+ * header[2] = Word Count MSB (LP) or second param (SP)
+ */
+ word_count = pkt->header[1] | (pkt->header[2] << 8);
+ if (hs_workaround && (dsi->quirks & E11418_HS_MODE_QUIRK)) {
+ DRM_DEV_DEBUG_DRIVER(dsi->dev,
+ "Using hs mode workaround for cmd 0x%x\n",
+ xfer->cmd);
+ hs_mode = 1;
+ } else {
+ hs_mode = (xfer->msg->flags & MIPI_DSI_MSG_USE_LPM) ? 0 : 1;
+ }
+ val = NWL_DSI_WC(word_count) | NWL_DSI_TX_VC(xfer->msg->channel) |
+ NWL_DSI_TX_DT(xfer->msg->type) | NWL_DSI_HS_SEL(hs_mode) |
+ NWL_DSI_BTA_TX(xfer->need_bta);
+ nwl_dsi_write(dsi, NWL_DSI_PKT_CONTROL, val);
+
+ /* Send packet command */
+ nwl_dsi_write(dsi, NWL_DSI_SEND_PACKET, 0x1);
+}
+
+static ssize_t nwl_dsi_host_transfer(struct mipi_dsi_host *dsi_host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct nwl_dsi *dsi = container_of(dsi_host, struct nwl_dsi, dsi_host);
+ struct nwl_dsi_transfer xfer;
+ ssize_t ret = 0;
+
+ /* Create packet to be sent */
+ dsi->xfer = &xfer;
+ ret = mipi_dsi_create_packet(&xfer.packet, msg);
+ if (ret < 0) {
+ dsi->xfer = NULL;
+ return ret;
+ }
+
+ if ((msg->type & MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ||
+ msg->type & MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ||
+ msg->type & MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ||
+ msg->type & MIPI_DSI_DCS_READ) &&
+ msg->rx_len > 0 && msg->rx_buf)
+ xfer.direction = DSI_PACKET_RECEIVE;
+ else
+ xfer.direction = DSI_PACKET_SEND;
+
+ xfer.need_bta = (xfer.direction == DSI_PACKET_RECEIVE);
+ xfer.need_bta |= (msg->flags & MIPI_DSI_MSG_REQ_ACK) ? 1 : 0;
+ xfer.msg = msg;
+ xfer.status = -ETIMEDOUT;
+ xfer.rx_word_count = 0;
+ xfer.rx_len = 0;
+ xfer.cmd = 0x00;
+ if (msg->tx_len > 0)
+ xfer.cmd = ((u8 *)(msg->tx_buf))[0];
+ init_completion(&xfer.completed);
+
+ ret = clk_prepare_enable(dsi->rx_esc_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable rx_esc clk: %zd\n",
+ ret);
+ return ret;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled rx_esc clk @%lu Hz\n",
+ clk_get_rate(dsi->rx_esc_clk));
+
+ /* Initiate the DSI packet transmision */
+ nwl_dsi_begin_transmission(dsi);
+
+ if (!wait_for_completion_timeout(&xfer.completed,
+ NWL_DSI_MIPI_FIFO_TIMEOUT)) {
+ DRM_DEV_ERROR(dsi_host->dev, "[%02X] DSI transfer timed out\n",
+ xfer.cmd);
+ ret = -ETIMEDOUT;
+ } else {
+ ret = xfer.status;
+ }
+
+ clk_disable_unprepare(dsi->rx_esc_clk);
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops nwl_dsi_host_ops = {
+ .attach = nwl_dsi_host_attach,
+ .transfer = nwl_dsi_host_transfer,
+};
+
+static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
+{
+ u32 irq_status;
+ struct nwl_dsi *dsi = data;
+
+ irq_status = nwl_dsi_read(dsi, NWL_DSI_IRQ_STATUS);
+
+ if (irq_status & NWL_DSI_TX_FIFO_OVFLW)
+ DRM_DEV_ERROR_RATELIMITED(dsi->dev, "tx fifo overflow\n");
+
+ if (irq_status & NWL_DSI_HS_TX_TIMEOUT)
+ DRM_DEV_ERROR_RATELIMITED(dsi->dev, "HS tx timeout\n");
+
+ if (irq_status & NWL_DSI_TX_PKT_DONE ||
+ irq_status & NWL_DSI_RX_PKT_HDR_RCVD ||
+ irq_status & NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD)
+ nwl_dsi_finish_transmission(dsi, irq_status);
+
+ return IRQ_HANDLED;
+}
+
+static int nwl_dsi_enable(struct nwl_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
+ int ret;
+
+ if (!dsi->lanes) {
+ DRM_DEV_ERROR(dev, "Need DSI lanes: %d\n", dsi->lanes);
+ return -EINVAL;
+ }
+
+ ret = phy_init(dsi->phy);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to init DSI phy: %d\n", ret);
+ return ret;
+ }
+
+ ret = phy_configure(dsi->phy, phy_cfg);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to configure DSI phy: %d\n", ret);
+ goto uninit_phy;
+ }
+
+ ret = clk_prepare_enable(dsi->tx_esc_clk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to enable tx_esc clk: %d\n",
+ ret);
+ goto uninit_phy;
+ }
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "Enabled tx_esc clk @%lu Hz\n",
+ clk_get_rate(dsi->tx_esc_clk));
+
+ ret = nwl_dsi_config_host(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set up DSI: %d", ret);
+ goto disable_clock;
+ }
+
+ ret = nwl_dsi_config_dpi(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to set up DPI: %d", ret);
+ goto disable_clock;
+ }
+
+ ret = phy_power_on(dsi->phy);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to power on DPHY (%d)\n", ret);
+ goto disable_clock;
+ }
+
+ ret = nwl_dsi_init_interrupts(dsi);
+ if (ret < 0)
+ goto power_off_phy;
+
+ return ret;
+
+power_off_phy:
+ phy_power_off(dsi->phy);
+disable_clock:
+ clk_disable_unprepare(dsi->tx_esc_clk);
+uninit_phy:
+ phy_exit(dsi->phy);
+
+ return ret;
+}
+
+static int nwl_dsi_disable(struct nwl_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Disabling clocks and phy\n");
+
+ phy_power_off(dsi->phy);
+ phy_exit(dsi->phy);
+
+ /* Disabling the clock before the phy breaks enabling dsi again */
+ clk_disable_unprepare(dsi->tx_esc_clk);
+
+ return 0;
+}
+
+static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ nwl_dsi_disable(dsi);
+
+ ret = reset_control_assert(dsi->rst_dpi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert DPI: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_byte);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert ESC: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_esc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert BYTE: %d\n", ret);
+ return;
+ }
+ ret = reset_control_assert(dsi->rst_pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to assert PCLK: %d\n", ret);
+ return;
+ }
+
+ clk_disable_unprepare(dsi->core_clk);
+ clk_disable_unprepare(dsi->lcdif_clk);
+
+ pm_runtime_put(dsi->dev);
+}
+
+static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
+ const struct drm_display_mode *mode,
+ union phy_configure_opts *phy_opts)
+{
+ unsigned long rate;
+ int ret;
+
+ if (dsi->lanes < 1 || dsi->lanes > 4)
+ return -EINVAL;
+
+ /*
+ * So far the DPHY spec minimal timings work for both mixel
+ * dphy and nwl dsi host
+ */
+ ret = phy_mipi_dphy_get_default_config(mode->clock * 1000,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes,
+ &phy_opts->mipi_dphy);
+ if (ret < 0)
+ return ret;
+
+ rate = clk_get_rate(dsi->tx_esc_clk);
+ DRM_DEV_DEBUG_DRIVER(dsi->dev, "LP clk is @%lu Hz\n", rate);
+ phy_opts->mipi_dphy.lp_clk_rate = rate;
+
+ return 0;
+}
+
+static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* At least LCDIF + NWL needs active high sync */
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+ return true;
+}
+
+static enum drm_mode_status
+nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
+
+ if (mode->clock * bpp > 15000000 * dsi->lanes)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock * bpp < 80000 * dsi->lanes)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static void
+nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ struct device *dev = dsi->dev;
+ union phy_configure_opts new_cfg;
+ unsigned long phy_ref_rate;
+ int ret;
+
+ ret = nwl_dsi_get_dphy_params(dsi, adjusted_mode, &new_cfg);
+ if (ret < 0)
+ return;
+
+ /*
+ * If hs clock is unchanged, we're all good - all parameters are
+ * derived from it atm.
+ */
+ if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate)
+ return;
+
+ phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
+ DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
+ /* Save the new desired phy config */
+ memcpy(&dsi->phy_cfg, &new_cfg, sizeof(new_cfg));
+
+ memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
+ drm_mode_debug_printmodeline(adjusted_mode);
+}
+
+static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ pm_runtime_get_sync(dsi->dev);
+
+ if (clk_prepare_enable(dsi->lcdif_clk) < 0)
+ return;
+ if (clk_prepare_enable(dsi->core_clk) < 0)
+ return;
+
+ /* Step 1 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_pclk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret);
+ return;
+ }
+
+ /* Step 2 from DSI reset-out instructions */
+ nwl_dsi_enable(dsi);
+
+ /* Step 3 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_esc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret);
+ return;
+ }
+ ret = reset_control_deassert(dsi->rst_byte);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret);
+ return;
+ }
+}
+
+static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ int ret;
+
+ /* Step 5 from DSI reset-out instructions */
+ ret = reset_control_deassert(dsi->rst_dpi);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to deassert DPI: %d\n", ret);
+}
+
+static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+ struct drm_bridge *panel_bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
+ &panel_bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ panel_bridge = drm_panel_bridge_add(panel);
+ if (IS_ERR(panel_bridge))
+ return PTR_ERR(panel_bridge);
+ }
+ dsi->panel_bridge = panel_bridge;
+
+ if (!dsi->panel_bridge)
+ return -EPROBE_DEFER;
+
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
+}
+
+static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
+{ struct nwl_dsi *dsi = bridge_to_dsi(bridge);
+
+ drm_of_panel_bridge_remove(dsi->dev->of_node, 1, 0);
+}
+
+static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
+ .pre_enable = nwl_dsi_bridge_pre_enable,
+ .enable = nwl_dsi_bridge_enable,
+ .disable = nwl_dsi_bridge_disable,
+ .mode_fixup = nwl_dsi_bridge_mode_fixup,
+ .mode_set = nwl_dsi_bridge_mode_set,
+ .mode_valid = nwl_dsi_bridge_mode_valid,
+ .attach = nwl_dsi_bridge_attach,
+ .detach = nwl_dsi_bridge_detach,
+};
+
+static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
+{
+ struct platform_device *pdev = to_platform_device(dsi->dev);
+ struct clk *clk;
+ void __iomem *base;
+ int ret;
+
+ dsi->phy = devm_phy_get(dsi->dev, "dphy");
+ if (IS_ERR(dsi->phy)) {
+ ret = PTR_ERR(dsi->phy);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dsi->dev, "Could not get PHY: %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dsi->dev, "lcdif");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get lcdif clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->lcdif_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "core");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get core clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->core_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "phy_ref");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get phy_ref clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->phy_ref_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "rx_esc");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get rx_esc clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->rx_esc_clk = clk;
+
+ clk = devm_clk_get(dsi->dev, "tx_esc");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ DRM_DEV_ERROR(dsi->dev, "Failed to get tx_esc clock: %d\n",
+ ret);
+ return ret;
+ }
+ dsi->tx_esc_clk = clk;
+
+ dsi->mux = devm_mux_control_get(dsi->dev, NULL);
+ if (IS_ERR(dsi->mux)) {
+ ret = PTR_ERR(dsi->mux);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dsi->dev, "Failed to get mux: %d\n", ret);
+ return ret;
+ }
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ dsi->regmap =
+ devm_regmap_init_mmio(dsi->dev, base, &nwl_dsi_regmap_config);
+ if (IS_ERR(dsi->regmap)) {
+ ret = PTR_ERR(dsi->regmap);
+ DRM_DEV_ERROR(dsi->dev, "Failed to create NWL DSI regmap: %d\n",
+ ret);
+ return ret;
+ }
+
+ dsi->irq = platform_get_irq(pdev, 0);
+ if (dsi->irq < 0) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get device IRQ: %d\n",
+ dsi->irq);
+ return dsi->irq;
+ }
+
+ dsi->rst_pclk = devm_reset_control_get_exclusive(dsi->dev, "pclk");
+ if (IS_ERR(dsi->rst_pclk)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get pclk reset: %ld\n",
+ PTR_ERR(dsi->rst_pclk));
+ return PTR_ERR(dsi->rst_pclk);
+ }
+ dsi->rst_byte = devm_reset_control_get_exclusive(dsi->dev, "byte");
+ if (IS_ERR(dsi->rst_byte)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get byte reset: %ld\n",
+ PTR_ERR(dsi->rst_byte));
+ return PTR_ERR(dsi->rst_byte);
+ }
+ dsi->rst_esc = devm_reset_control_get_exclusive(dsi->dev, "esc");
+ if (IS_ERR(dsi->rst_esc)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get esc reset: %ld\n",
+ PTR_ERR(dsi->rst_esc));
+ return PTR_ERR(dsi->rst_esc);
+ }
+ dsi->rst_dpi = devm_reset_control_get_exclusive(dsi->dev, "dpi");
+ if (IS_ERR(dsi->rst_dpi)) {
+ DRM_DEV_ERROR(dsi->dev, "Failed to get dpi reset: %ld\n",
+ PTR_ERR(dsi->rst_dpi));
+ return PTR_ERR(dsi->rst_dpi);
+ }
+ return 0;
+}
+
+static int nwl_dsi_select_input(struct nwl_dsi *dsi)
+{
+ struct device_node *remote;
+ u32 use_dcss = 1;
+ int ret;
+
+ remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+ NWL_DSI_ENDPOINT_LCDIF);
+ if (remote) {
+ use_dcss = 0;
+ } else {
+ remote = of_graph_get_remote_node(dsi->dev->of_node, 0,
+ NWL_DSI_ENDPOINT_DCSS);
+ if (!remote) {
+ DRM_DEV_ERROR(dsi->dev,
+ "No valid input endpoint found\n");
+ return -EINVAL;
+ }
+ }
+
+ DRM_DEV_INFO(dsi->dev, "Using %s as input source\n",
+ (use_dcss) ? "DCSS" : "LCDIF");
+ ret = mux_control_try_select(dsi->mux, use_dcss);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to select input: %d\n", ret);
+
+ of_node_put(remote);
+ return ret;
+}
+
+static int nwl_dsi_deselect_input(struct nwl_dsi *dsi)
+{
+ int ret;
+
+ ret = mux_control_deselect(dsi->mux);
+ if (ret < 0)
+ DRM_DEV_ERROR(dsi->dev, "Failed to deselect input: %d\n", ret);
+
+ return ret;
+}
+
+static const struct drm_bridge_timings nwl_dsi_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_DE_LOW,
+};
+
+static const struct of_device_id nwl_dsi_dt_ids[] = {
+ { .compatible = "fsl,imx8mq-nwl-dsi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nwl_dsi_dt_ids);
+
+static const struct soc_device_attribute nwl_dsi_quirks_match[] = {
+ { .soc_id = "i.MX8MQ", .revision = "2.0",
+ .data = (void *)E11418_HS_MODE_QUIRK },
+ { /* sentinel. */ },
+};
+
+static int nwl_dsi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct soc_device_attribute *attr;
+ struct nwl_dsi *dsi;
+ int ret;
+
+ dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
+ if (!dsi)
+ return -ENOMEM;
+
+ dsi->dev = dev;
+
+ ret = nwl_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, dsi->irq, nwl_dsi_irq_handler, 0,
+ dev_name(dev), dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to request IRQ %d: %d\n", dsi->irq,
+ ret);
+ return ret;
+ }
+
+ dsi->dsi_host.ops = &nwl_dsi_host_ops;
+ dsi->dsi_host.dev = dev;
+ ret = mipi_dsi_host_register(&dsi->dsi_host);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "Failed to register MIPI host: %d\n", ret);
+ return ret;
+ }
+
+ attr = soc_device_match(nwl_dsi_quirks_match);
+ if (attr)
+ dsi->quirks = (uintptr_t)attr->data;
+
+ dsi->bridge.driver_private = dsi;
+ dsi->bridge.funcs = &nwl_dsi_bridge_funcs;
+ dsi->bridge.of_node = dev->of_node;
+ dsi->bridge.timings = &nwl_dsi_timings;
+
+ dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
+
+ ret = nwl_dsi_select_input(dsi);
+ if (ret < 0) {
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ return ret;
+ }
+
+ drm_bridge_add(&dsi->bridge);
+ return 0;
+}
+
+static int nwl_dsi_remove(struct platform_device *pdev)
+{
+ struct nwl_dsi *dsi = platform_get_drvdata(pdev);
+
+ nwl_dsi_deselect_input(dsi);
+ mipi_dsi_host_unregister(&dsi->dsi_host);
+ drm_bridge_remove(&dsi->bridge);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver nwl_dsi_driver = {
+ .probe = nwl_dsi_probe,
+ .remove = nwl_dsi_remove,
+ .driver = {
+ .of_match_table = nwl_dsi_dt_ids,
+ .name = DRV_NAME,
+ },
+};
+
+module_platform_driver(nwl_dsi_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_AUTHOR("Purism SPC");
+MODULE_DESCRIPTION("Northwest Logic MIPI-DSI driver");
+MODULE_LICENSE("GPL"); /* GPLv2 or later */
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.h b/drivers/gpu/drm/bridge/nwl-dsi.h
new file mode 100644
index 000000000000..a247a8a11c7c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/nwl-dsi.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * NWL MIPI DSI host driver
+ *
+ * Copyright (C) 2017 NXP
+ * Copyright (C) 2019 Purism SPC
+ */
+#ifndef __NWL_DSI_H__
+#define __NWL_DSI_H__
+
+/* DSI HOST registers */
+#define NWL_DSI_CFG_NUM_LANES 0x0
+#define NWL_DSI_CFG_NONCONTINUOUS_CLK 0x4
+#define NWL_DSI_CFG_T_PRE 0x8
+#define NWL_DSI_CFG_T_POST 0xc
+#define NWL_DSI_CFG_TX_GAP 0x10
+#define NWL_DSI_CFG_AUTOINSERT_EOTP 0x14
+#define NWL_DSI_CFG_EXTRA_CMDS_AFTER_EOTP 0x18
+#define NWL_DSI_CFG_HTX_TO_COUNT 0x1c
+#define NWL_DSI_CFG_LRX_H_TO_COUNT 0x20
+#define NWL_DSI_CFG_BTA_H_TO_COUNT 0x24
+#define NWL_DSI_CFG_TWAKEUP 0x28
+#define NWL_DSI_CFG_STATUS_OUT 0x2c
+#define NWL_DSI_RX_ERROR_STATUS 0x30
+
+/* DSI DPI registers */
+#define NWL_DSI_PIXEL_PAYLOAD_SIZE 0x200
+#define NWL_DSI_PIXEL_FIFO_SEND_LEVEL 0x204
+#define NWL_DSI_INTERFACE_COLOR_CODING 0x208
+#define NWL_DSI_PIXEL_FORMAT 0x20c
+#define NWL_DSI_VSYNC_POLARITY 0x210
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_LOW 0
+#define NWL_DSI_VSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+
+#define NWL_DSI_HSYNC_POLARITY 0x214
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_LOW 0
+#define NWL_DSI_HSYNC_POLARITY_ACTIVE_HIGH BIT(1)
+
+#define NWL_DSI_VIDEO_MODE 0x218
+#define NWL_DSI_HFP 0x21c
+#define NWL_DSI_HBP 0x220
+#define NWL_DSI_HSA 0x224
+#define NWL_DSI_ENABLE_MULT_PKTS 0x228
+#define NWL_DSI_VBP 0x22c
+#define NWL_DSI_VFP 0x230
+#define NWL_DSI_BLLP_MODE 0x234
+#define NWL_DSI_USE_NULL_PKT_BLLP 0x238
+#define NWL_DSI_VACTIVE 0x23c
+#define NWL_DSI_VC 0x240
+
+/* DSI APB PKT control */
+#define NWL_DSI_TX_PAYLOAD 0x280
+#define NWL_DSI_PKT_CONTROL 0x284
+#define NWL_DSI_SEND_PACKET 0x288
+#define NWL_DSI_PKT_STATUS 0x28c
+#define NWL_DSI_PKT_FIFO_WR_LEVEL 0x290
+#define NWL_DSI_PKT_FIFO_RD_LEVEL 0x294
+#define NWL_DSI_RX_PAYLOAD 0x298
+#define NWL_DSI_RX_PKT_HEADER 0x29c
+
+/* DSI IRQ handling */
+#define NWL_DSI_IRQ_STATUS 0x2a0
+#define NWL_DSI_SM_NOT_IDLE BIT(0)
+#define NWL_DSI_TX_PKT_DONE BIT(1)
+#define NWL_DSI_DPHY_DIRECTION BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD BIT(8)
+#define NWL_DSI_BTA_TIMEOUT BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT BIT(31)
+
+#define NWL_DSI_IRQ_STATUS2 0x2a4
+#define NWL_DSI_SINGLE_BIT_ECC_ERR BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR BIT(1)
+#define NWL_DSI_CRC_ERR BIT(2)
+
+#define NWL_DSI_IRQ_MASK 0x2a8
+#define NWL_DSI_SM_NOT_IDLE_MASK BIT(0)
+#define NWL_DSI_TX_PKT_DONE_MASK BIT(1)
+#define NWL_DSI_DPHY_DIRECTION_MASK BIT(2)
+#define NWL_DSI_TX_FIFO_OVFLW_MASK BIT(3)
+#define NWL_DSI_TX_FIFO_UDFLW_MASK BIT(4)
+#define NWL_DSI_RX_FIFO_OVFLW_MASK BIT(5)
+#define NWL_DSI_RX_FIFO_UDFLW_MASK BIT(6)
+#define NWL_DSI_RX_PKT_HDR_RCVD_MASK BIT(7)
+#define NWL_DSI_RX_PKT_PAYLOAD_DATA_RCVD_MASK BIT(8)
+#define NWL_DSI_BTA_TIMEOUT_MASK BIT(29)
+#define NWL_DSI_LP_RX_TIMEOUT_MASK BIT(30)
+#define NWL_DSI_HS_TX_TIMEOUT_MASK BIT(31)
+
+#define NWL_DSI_IRQ_MASK2 0x2ac
+#define NWL_DSI_SINGLE_BIT_ECC_ERR_MASK BIT(0)
+#define NWL_DSI_MULTI_BIT_ECC_ERR_MASK BIT(1)
+#define NWL_DSI_CRC_ERR_MASK BIT(2)
+
+/*
+ * PKT_CONTROL format:
+ * [15: 0] - word count
+ * [17:16] - virtual channel
+ * [23:18] - data type
+ * [24] - LP or HS select (0 - LP, 1 - HS)
+ * [25] - perform BTA after packet is sent
+ * [26] - perform BTA only, no packet tx
+ */
+#define NWL_DSI_WC(x) FIELD_PREP(GENMASK(15, 0), (x))
+#define NWL_DSI_TX_VC(x) FIELD_PREP(GENMASK(17, 16), (x))
+#define NWL_DSI_TX_DT(x) FIELD_PREP(GENMASK(23, 18), (x))
+#define NWL_DSI_HS_SEL(x) FIELD_PREP(GENMASK(24, 24), (x))
+#define NWL_DSI_BTA_TX(x) FIELD_PREP(GENMASK(25, 25), (x))
+#define NWL_DSI_BTA_NO_TX(x) FIELD_PREP(GENMASK(26, 26), (x))
+
+/*
+ * RX_PKT_HEADER format:
+ * [15: 0] - word count
+ * [21:16] - data type
+ * [23:22] - virtual channel
+ */
+#define NWL_DSI_RX_DT(x) FIELD_GET(GENMASK(21, 16), (x))
+#define NWL_DSI_RX_VC(x) FIELD_GET(GENMASK(23, 22), (x))
+
+/* DSI Video mode */
+#define NWL_DSI_VM_BURST_MODE_WITH_SYNC_PULSES 0
+#define NWL_DSI_VM_NON_BURST_MODE_WITH_SYNC_EVENTS BIT(0)
+#define NWL_DSI_VM_BURST_MODE BIT(1)
+
+/* * DPI color coding */
+#define NWL_DSI_DPI_16_BIT_565_PACKED 0
+#define NWL_DSI_DPI_16_BIT_565_ALIGNED 1
+#define NWL_DSI_DPI_16_BIT_565_SHIFTED 2
+#define NWL_DSI_DPI_18_BIT_PACKED 3
+#define NWL_DSI_DPI_18_BIT_ALIGNED 4
+#define NWL_DSI_DPI_24_BIT 5
+
+/* * DPI Pixel format */
+#define NWL_DSI_PIXEL_FORMAT_16 0
+#define NWL_DSI_PIXEL_FORMAT_18 BIT(0)
+#define NWL_DSI_PIXEL_FORMAT_18L BIT(1)
+#define NWL_DSI_PIXEL_FORMAT_24 (BIT(0) | BIT(1))
+
+#endif /* __NWL_DSI_H__ */
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index 8461ee8304ba..1e63ed6b18aa 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -166,7 +166,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
*
* The connector type is set to @panel->connector_type, which must be set to a
* known type. Calling this function with a panel whose connector type is
- * DRM_MODE_CONNECTOR_Unknown will return NULL.
+ * DRM_MODE_CONNECTOR_Unknown will return ERR_PTR(-EINVAL).
*
* See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
@@ -174,7 +174,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
{
if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
- return NULL;
+ return ERR_PTR(-EINVAL);
return drm_panel_bridge_add_typed(panel, panel->connector_type);
}
@@ -265,7 +265,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
struct drm_panel *panel)
{
if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown))
- return NULL;
+ return ERR_PTR(-EINVAL);
return devm_drm_panel_bridge_add_typed(dev, panel,
panel->connector_type);
@@ -311,6 +311,7 @@ EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed);
/**
* drm_panel_bridge_connector - return the connector for the panel bridge
+ * @bridge: The drm_bridge.
*
* drm_panel_bridge creates the connector.
* This function gives external access to the connector.
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index d3a53442d449..4b099196afeb 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -268,8 +268,6 @@ static int ps8640_probe(struct i2c_client *client)
if (!panel)
return -ENODEV;
- panel->connector_type = DRM_MODE_CONNECTOR_eDP;
-
ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
if (IS_ERR(ps_bridge->panel_bridge))
return PTR_ERR(ps_bridge->panel_bridge);
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index 6dad025f8da7..19d8ae59ea03 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -360,7 +360,7 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
buf[0] = pixel_clock_10kHz & 0xff;
buf[1] = pixel_clock_10kHz >> 8;
- buf[2] = adj->vrefresh;
+ buf[2] = drm_mode_vrefresh(adj);
buf[3] = 0x00;
buf[4] = adj->hdisplay;
buf[5] = adj->hdisplay >> 8;
diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c
index f81f81b7051f..b1258f0ed205 100644
--- a/drivers/gpu/drm/bridge/sii9234.c
+++ b/drivers/gpu/drm/bridge/sii9234.c
@@ -836,7 +836,8 @@ static int sii9234_init_resources(struct sii9234 *ctx,
ctx->supplies[3].supply = "cvcc12";
ret = devm_regulator_bulk_get(ctx->dev, 4, ctx->supplies);
if (ret) {
- dev_err(ctx->dev, "regulator_bulk failed\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(ctx->dev, "regulator_bulk failed\n");
return ret;
}
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
index dd56996fe9c7..d0db1acf11d7 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c
@@ -630,7 +630,7 @@ static struct platform_driver snd_dw_hdmi_driver = {
module_platform_driver(snd_dw_hdmi_driver);
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_DESCRIPTION("Synopsis Designware HDMI AHB ALSA interface");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 383b1073d7de..30681398cfb0 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -92,6 +92,12 @@ static const u16 csc_coeff_rgb_in_eitu709[3][4] = {
{ 0x6756, 0x78ab, 0x2000, 0x0200 }
};
+static const u16 csc_coeff_rgb_full_to_rgb_limited[3][4] = {
+ { 0x1b7c, 0x0000, 0x0000, 0x0020 },
+ { 0x0000, 0x1b7c, 0x0000, 0x0020 },
+ { 0x0000, 0x0000, 0x1b7c, 0x0020 }
+};
+
struct hdmi_vmode {
bool mdataenablepolarity;
@@ -109,6 +115,7 @@ struct hdmi_data_info {
unsigned int pix_repet_factor;
unsigned int hdcp_enable;
struct hdmi_vmode video_mode;
+ bool rgb_limited_range;
};
struct dw_hdmi_i2c {
@@ -956,7 +963,14 @@ static void hdmi_video_sample(struct dw_hdmi *hdmi)
static int is_color_space_conversion(struct dw_hdmi *hdmi)
{
- return hdmi->hdmi_data.enc_in_bus_format != hdmi->hdmi_data.enc_out_bus_format;
+ struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
+ bool is_input_rgb, is_output_rgb;
+
+ is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_in_bus_format);
+ is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi_data->enc_out_bus_format);
+
+ return (is_input_rgb != is_output_rgb) ||
+ (is_input_rgb && is_output_rgb && hdmi_data->rgb_limited_range);
}
static int is_color_space_decimation(struct dw_hdmi *hdmi)
@@ -983,28 +997,37 @@ static int is_color_space_interpolation(struct dw_hdmi *hdmi)
return 0;
}
+static bool is_csc_needed(struct dw_hdmi *hdmi)
+{
+ return is_color_space_conversion(hdmi) ||
+ is_color_space_decimation(hdmi) ||
+ is_color_space_interpolation(hdmi);
+}
+
static void dw_hdmi_update_csc_coeffs(struct dw_hdmi *hdmi)
{
const u16 (*csc_coeff)[3][4] = &csc_coeff_default;
+ bool is_input_rgb, is_output_rgb;
unsigned i;
u32 csc_scale = 1;
- if (is_color_space_conversion(hdmi)) {
- if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
- if (hdmi->hdmi_data.enc_out_encoding ==
- V4L2_YCBCR_ENC_601)
- csc_coeff = &csc_coeff_rgb_out_eitu601;
- else
- csc_coeff = &csc_coeff_rgb_out_eitu709;
- } else if (hdmi_bus_fmt_is_rgb(
- hdmi->hdmi_data.enc_in_bus_format)) {
- if (hdmi->hdmi_data.enc_out_encoding ==
- V4L2_YCBCR_ENC_601)
- csc_coeff = &csc_coeff_rgb_in_eitu601;
- else
- csc_coeff = &csc_coeff_rgb_in_eitu709;
- csc_scale = 0;
- }
+ is_input_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_in_bus_format);
+ is_output_rgb = hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format);
+
+ if (!is_input_rgb && is_output_rgb) {
+ if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+ csc_coeff = &csc_coeff_rgb_out_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_out_eitu709;
+ } else if (is_input_rgb && !is_output_rgb) {
+ if (hdmi->hdmi_data.enc_out_encoding == V4L2_YCBCR_ENC_601)
+ csc_coeff = &csc_coeff_rgb_in_eitu601;
+ else
+ csc_coeff = &csc_coeff_rgb_in_eitu709;
+ csc_scale = 0;
+ } else if (is_input_rgb && is_output_rgb &&
+ hdmi->hdmi_data.rgb_limited_range) {
+ csc_coeff = &csc_coeff_rgb_full_to_rgb_limited;
}
/* The CSC registers are sequential, alternating MSB then LSB */
@@ -1614,6 +1637,18 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
drm_hdmi_avi_infoframe_from_display_mode(&frame,
&hdmi->connector, mode);
+ if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format)) {
+ drm_hdmi_avi_infoframe_quant_range(&frame, &hdmi->connector,
+ mode,
+ hdmi->hdmi_data.rgb_limited_range ?
+ HDMI_QUANTIZATION_RANGE_LIMITED :
+ HDMI_QUANTIZATION_RANGE_FULL);
+ } else {
+ frame.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT;
+ frame.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ }
+
if (hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format))
frame.colorspace = HDMI_COLORSPACE_YUV444;
else if (hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format))
@@ -1654,8 +1689,6 @@ static void hdmi_config_AVI(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
HDMI_EXTENDED_COLORIMETRY_XV_YCC_601;
}
- frame.scan_mode = HDMI_SCAN_MODE_NONE;
-
/*
* The Designware IP uses a different byte format from standard
* AVI info frames, though generally the bits are in the correct
@@ -2010,18 +2043,19 @@ static void dw_hdmi_enable_video_path(struct dw_hdmi *hdmi)
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
/* Enable csc path */
- if (is_color_space_conversion(hdmi)) {
+ if (is_csc_needed(hdmi)) {
hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE;
hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
- }
- /* Enable color space conversion if needed */
- if (is_color_space_conversion(hdmi))
hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_IN_PATH,
HDMI_MC_FLOWCTRL);
- else
+ } else {
+ hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CSCCLK_DISABLE;
+ hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS);
+
hdmi_writeb(hdmi, HDMI_MC_FLOWCTRL_FEED_THROUGH_OFF_CSC_BYPASS,
HDMI_MC_FLOWCTRL);
+ }
}
/* Workaround to clear the overflow condition */
@@ -2119,6 +2153,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ hdmi->hdmi_data.rgb_limited_range = hdmi->sink_is_hdmi &&
+ drm_default_rgb_quant_range(mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
hdmi->hdmi_data.video_mode.mdataenablepolarity = true;
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
index 1b39e8d37834..6650fe4cfc20 100644
--- a/drivers/gpu/drm/bridge/tc358768.c
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -178,6 +178,8 @@ static int tc358768_clear_error(struct tc358768_priv *priv)
static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
{
+ /* work around https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 */
+ int tmpval = val;
size_t count = 2;
if (priv->error)
@@ -187,7 +189,7 @@ static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
if (reg < 0x100 || reg >= 0x600)
count = 1;
- priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+ priv->error = regmap_bulk_write(priv->regmap, reg, &tmpval, count);
}
static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 6ad688b320ae..bd3eb0a09732 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -4,9 +4,11 @@
* datasheet: http://www.ti.com/lit/ds/symlink/sn65dsi86.pdf
*/
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
#include <linux/i2c.h>
#include <linux/iopoll.h>
#include <linux/module.h>
@@ -48,12 +50,24 @@
#define SN_CHA_VERTICAL_BACK_PORCH_REG 0x36
#define SN_CHA_HORIZONTAL_FRONT_PORCH_REG 0x38
#define SN_CHA_VERTICAL_FRONT_PORCH_REG 0x3A
+#define SN_LN_ASSIGN_REG 0x59
+#define LN_ASSIGN_WIDTH 2
#define SN_ENH_FRAME_REG 0x5A
#define VSTREAM_ENABLE BIT(3)
+#define LN_POLRS_OFFSET 4
+#define LN_POLRS_MASK 0xf0
#define SN_DATA_FORMAT_REG 0x5B
#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
+#define SN_GPIO_IO_REG 0x5E
+#define SN_GPIO_INPUT_SHIFT 4
+#define SN_GPIO_OUTPUT_SHIFT 0
+#define SN_GPIO_CTRL_REG 0x5F
+#define SN_GPIO_MUX_INPUT 0
+#define SN_GPIO_MUX_OUTPUT 1
+#define SN_GPIO_MUX_SPECIAL 2
+#define SN_GPIO_MUX_MASK 0x3
#define SN_AUX_WDATA_REG(x) (0x64 + (x))
#define SN_AUX_ADDR_19_16_REG 0x74
#define SN_AUX_ADDR_15_8_REG 0x75
@@ -88,6 +102,38 @@
#define SN_REGULATOR_SUPPLY_NUM 4
+#define SN_MAX_DP_LANES 4
+#define SN_NUM_GPIOS 4
+#define SN_GPIO_PHYSICAL_OFFSET 1
+
+/**
+ * struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver.
+ * @dev: Pointer to our device.
+ * @regmap: Regmap for accessing i2c.
+ * @aux: Our aux channel.
+ * @bridge: Our bridge.
+ * @connector: Our connector.
+ * @debugfs: Used for managing our debugfs.
+ * @host_node: Remote DSI node.
+ * @dsi: Our MIPI DSI source.
+ * @refclk: Our reference clock.
+ * @panel: Our panel.
+ * @enable_gpio: The GPIO we toggle to enable the bridge.
+ * @supplies: Data for bulk enabling/disabling our regulators.
+ * @dp_lanes: Count of dp_lanes we're using.
+ * @ln_assign: Value to program to the LN_ASSIGN register.
+ * @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
+ *
+ * @gchip: If we expose our GPIOs, this is used.
+ * @gchip_output: A cache of whether we've set GPIOs to output. This
+ * serves double-duty of keeping track of the direction and
+ * also keeping track of whether we've incremented the
+ * pm_runtime reference count for this pin, which we do
+ * whenever a pin is configured as an output. This is a
+ * bitmap so we can do atomic ops on it without an extra
+ * lock so concurrent users of our 4 GPIOs don't stomp on
+ * each other's read-modify-write.
+ */
struct ti_sn_bridge {
struct device *dev;
struct regmap *regmap;
@@ -102,6 +148,13 @@ struct ti_sn_bridge {
struct gpio_desc *enable_gpio;
struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
int dp_lanes;
+ u8 ln_assign;
+ u8 ln_polrs;
+
+#if defined(CONFIG_OF_GPIO)
+ struct gpio_chip gchip;
+ DECLARE_BITMAP(gchip_output, SN_NUM_GPIOS);
+#endif
};
static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
@@ -451,7 +504,7 @@ static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
return 24;
}
-/**
+/*
* LUT index corresponds to register value and
* LUT values corresponds to dp data rate supported
* by the bridge in Mbps unit.
@@ -475,7 +528,7 @@ static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN);
for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
- if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
+ if (ti_sn_bridge_dp_rate_lut[i] >= dp_rate_mhz)
break;
return i;
@@ -666,26 +719,20 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
int dp_rate_idx;
unsigned int val;
int ret = -EINVAL;
+ int max_dp_lanes;
- /*
- * Run with the maximum number of lanes that the DP sink supports.
- *
- * Depending use cases, we might want to revisit this later because:
- * - It's plausible that someone may have run fewer lines to the
- * sink than the sink actually supports, assuming that the lines
- * will just be driven at a higher rate.
- * - The DP spec seems to indicate that it's more important to minimize
- * the number of lanes than the link rate.
- *
- * If we do revisit, it would be important to measure the power impact.
- */
- pdata->dp_lanes = ti_sn_get_max_lanes(pdata);
+ max_dp_lanes = ti_sn_get_max_lanes(pdata);
+ pdata->dp_lanes = min(pdata->dp_lanes, max_dp_lanes);
/* DSI_A lane config */
- val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ val = CHA_DSI_LANES(SN_MAX_DP_LANES - pdata->dsi->lanes);
regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
CHA_DSI_LANES_MASK, val);
+ regmap_write(pdata->regmap, SN_LN_ASSIGN_REG, pdata->ln_assign);
+ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, LN_POLRS_MASK,
+ pdata->ln_polrs << LN_POLRS_OFFSET);
+
/* set dsi clk frequency value */
ti_sn_bridge_set_dsi_rate(pdata);
@@ -827,6 +874,12 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
buf[i]);
}
+ /* Clear old status bits before start so we don't get confused */
+ regmap_write(pdata->regmap, SN_AUX_CMD_STATUS_REG,
+ AUX_IRQ_STATUS_NAT_I2C_FAIL |
+ AUX_IRQ_STATUS_AUX_RPLY_TOUT |
+ AUX_IRQ_STATUS_AUX_SHORT);
+
regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
@@ -874,6 +927,236 @@ static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata)
return 0;
}
+#if defined(CONFIG_OF_GPIO)
+
+static int tn_sn_bridge_of_xlate(struct gpio_chip *chip,
+ const struct of_phandle_args *gpiospec,
+ u32 *flags)
+{
+ if (WARN_ON(gpiospec->args_count < chip->of_gpio_n_cells))
+ return -EINVAL;
+
+ if (gpiospec->args[0] > chip->ngpio || gpiospec->args[0] < 1)
+ return -EINVAL;
+
+ if (flags)
+ *flags = gpiospec->args[1];
+
+ return gpiospec->args[0] - SN_GPIO_PHYSICAL_OFFSET;
+}
+
+static int ti_sn_bridge_gpio_get_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+
+ /*
+ * We already have to keep track of the direction because we use
+ * that to figure out whether we've powered the device. We can
+ * just return that rather than (maybe) powering up the device
+ * to ask its direction.
+ */
+ return test_bit(offset, pdata->gchip_output) ?
+ GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+}
+
+static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ unsigned int val;
+ int ret;
+
+ /*
+ * When the pin is an input we don't forcibly keep the bridge
+ * powered--we just power it on to read the pin. NOTE: part of
+ * the reason this works is that the bridge defaults (when
+ * powered back on) to all 4 GPIOs being configured as GPIO input.
+ * Also note that if something else is keeping the chip powered the
+ * pm_runtime functions are lightweight increments of a refcount.
+ */
+ pm_runtime_get_sync(pdata->dev);
+ ret = regmap_read(pdata->regmap, SN_GPIO_IO_REG, &val);
+ pm_runtime_put(pdata->dev);
+
+ if (ret)
+ return ret;
+
+ return !!(val & BIT(SN_GPIO_INPUT_SHIFT + offset));
+}
+
+static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int val)
+{
+ struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ int ret;
+
+ if (!test_bit(offset, pdata->gchip_output)) {
+ dev_err(pdata->dev, "Ignoring GPIO set while input\n");
+ return;
+ }
+
+ val &= 1;
+ ret = regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG,
+ BIT(SN_GPIO_OUTPUT_SHIFT + offset),
+ val << (SN_GPIO_OUTPUT_SHIFT + offset));
+ if (ret)
+ dev_warn(pdata->dev,
+ "Failed to set bridge GPIO %u: %d\n", offset, ret);
+}
+
+static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ int shift = offset * 2;
+ int ret;
+
+ if (!test_and_clear_bit(offset, pdata->gchip_output))
+ return 0;
+
+ ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG,
+ SN_GPIO_MUX_MASK << shift,
+ SN_GPIO_MUX_INPUT << shift);
+ if (ret) {
+ set_bit(offset, pdata->gchip_output);
+ return ret;
+ }
+
+ /*
+ * NOTE: if nobody else is powering the device this may fully power
+ * it off and when it comes back it will have lost all state, but
+ * that's OK because the default is input and we're now an input.
+ */
+ pm_runtime_put(pdata->dev);
+
+ return 0;
+}
+
+static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ int shift = offset * 2;
+ int ret;
+
+ if (test_and_set_bit(offset, pdata->gchip_output))
+ return 0;
+
+ pm_runtime_get_sync(pdata->dev);
+
+ /* Set value first to avoid glitching */
+ ti_sn_bridge_gpio_set(chip, offset, val);
+
+ /* Set direction */
+ ret = regmap_update_bits(pdata->regmap, SN_GPIO_CTRL_REG,
+ SN_GPIO_MUX_MASK << shift,
+ SN_GPIO_MUX_OUTPUT << shift);
+ if (ret) {
+ clear_bit(offset, pdata->gchip_output);
+ pm_runtime_put(pdata->dev);
+ }
+
+ return ret;
+}
+
+static void ti_sn_bridge_gpio_free(struct gpio_chip *chip, unsigned int offset)
+{
+ /* We won't keep pm_runtime if we're input, so switch there on free */
+ ti_sn_bridge_gpio_direction_input(chip, offset);
+}
+
+static const char * const ti_sn_bridge_gpio_names[SN_NUM_GPIOS] = {
+ "GPIO1", "GPIO2", "GPIO3", "GPIO4"
+};
+
+static int ti_sn_setup_gpio_controller(struct ti_sn_bridge *pdata)
+{
+ int ret;
+
+ /* Only init if someone is going to use us as a GPIO controller */
+ if (!of_property_read_bool(pdata->dev->of_node, "gpio-controller"))
+ return 0;
+
+ pdata->gchip.label = dev_name(pdata->dev);
+ pdata->gchip.parent = pdata->dev;
+ pdata->gchip.owner = THIS_MODULE;
+ pdata->gchip.of_xlate = tn_sn_bridge_of_xlate;
+ pdata->gchip.of_gpio_n_cells = 2;
+ pdata->gchip.free = ti_sn_bridge_gpio_free;
+ pdata->gchip.get_direction = ti_sn_bridge_gpio_get_direction;
+ pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input;
+ pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output;
+ pdata->gchip.get = ti_sn_bridge_gpio_get;
+ pdata->gchip.set = ti_sn_bridge_gpio_set;
+ pdata->gchip.can_sleep = true;
+ pdata->gchip.names = ti_sn_bridge_gpio_names;
+ pdata->gchip.ngpio = SN_NUM_GPIOS;
+ pdata->gchip.base = -1;
+ ret = devm_gpiochip_add_data(pdata->dev, &pdata->gchip, pdata);
+ if (ret)
+ dev_err(pdata->dev, "can't add gpio chip\n");
+
+ return ret;
+}
+
+#else
+
+static inline int ti_sn_setup_gpio_controller(struct ti_sn_bridge *pdata)
+{
+ return 0;
+}
+
+#endif
+
+static void ti_sn_bridge_parse_lanes(struct ti_sn_bridge *pdata,
+ struct device_node *np)
+{
+ u32 lane_assignments[SN_MAX_DP_LANES] = { 0, 1, 2, 3 };
+ u32 lane_polarities[SN_MAX_DP_LANES] = { };
+ struct device_node *endpoint;
+ u8 ln_assign = 0;
+ u8 ln_polrs = 0;
+ int dp_lanes;
+ int i;
+
+ /*
+ * Read config from the device tree about lane remapping and lane
+ * polarities. These are optional and we assume identity map and
+ * normal polarity if nothing is specified. It's OK to specify just
+ * data-lanes but not lane-polarities but not vice versa.
+ *
+ * Error checking is light (we just make sure we don't crash or
+ * buffer overrun) and we assume dts is well formed and specifying
+ * mappings that the hardware supports.
+ */
+ endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
+ dp_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
+ if (dp_lanes > 0 && dp_lanes <= SN_MAX_DP_LANES) {
+ of_property_read_u32_array(endpoint, "data-lanes",
+ lane_assignments, dp_lanes);
+ of_property_read_u32_array(endpoint, "lane-polarities",
+ lane_polarities, dp_lanes);
+ } else {
+ dp_lanes = SN_MAX_DP_LANES;
+ }
+ of_node_put(endpoint);
+
+ /*
+ * Convert into register format. Loop over all lanes even if
+ * data-lanes had fewer elements so that we nicely initialize
+ * the LN_ASSIGN register.
+ */
+ for (i = SN_MAX_DP_LANES - 1; i >= 0; i--) {
+ ln_assign = ln_assign << LN_ASSIGN_WIDTH | lane_assignments[i];
+ ln_polrs = ln_polrs << 1 | lane_polarities[i];
+ }
+
+ /* Stash in our struct for when we power on */
+ pdata->dp_lanes = dp_lanes;
+ pdata->ln_assign = ln_assign;
+ pdata->ln_polrs = ln_polrs;
+}
+
static int ti_sn_bridge_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -916,6 +1199,8 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
return ret;
}
+ ti_sn_bridge_parse_lanes(pdata, client->dev.of_node);
+
ret = ti_sn_bridge_parse_regulators(pdata);
if (ret) {
DRM_ERROR("failed to parse regulators\n");
@@ -937,6 +1222,12 @@ static int ti_sn_bridge_probe(struct i2c_client *client,
pm_runtime_enable(pdata->dev);
+ ret = ti_sn_setup_gpio_controller(pdata);
+ if (ret) {
+ pm_runtime_disable(pdata->dev);
+ return ret;
+ }
+
i2c_set_clientdata(client, pdata);
pdata->aux.name = "ti-sn65dsi86-aux";
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
deleted file mode 100644
index c6bbd988b0e5..000000000000
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_CIRRUS_QEMU
- tristate "Cirrus driver for QEMU emulated device"
- depends on DRM && PCI && MMU
- select DRM_KMS_HELPER
- select DRM_GEM_SHMEM_HELPER
- help
- This is a KMS driver for emulated cirrus device in qemu.
- It is *NOT* intended for real cirrus devices. This requires
- the modesetting userspace X.org driver.
-
- Cirrus is obsolete, the hardware was designed in the 90ies
- and can't keep up with todays needs. More background:
- https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
-
- Better alternatives are:
- - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
- - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
- - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
deleted file mode 100644
index 0c1ed3f99725..000000000000
--- a/drivers/gpu/drm/cirrus/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9ccfbf213d72..965173fd0ac2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1641,10 +1641,10 @@ static const struct drm_info_list drm_atomic_debugfs_list[] = {
{"state", drm_state_info, 0},
};
-int drm_atomic_debugfs_init(struct drm_minor *minor)
+void drm_atomic_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_atomic_debugfs_list,
- ARRAY_SIZE(drm_atomic_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_atomic_debugfs_list,
+ ARRAY_SIZE(drm_atomic_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 85d163f16801..a1898c58ae3c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1097,7 +1097,7 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
else if (funcs->dpms)
funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
- if (!(dev->irq_enabled && dev->num_crtcs))
+ if (!drm_dev_has_vblank(dev))
continue;
ret = drm_crtc_vblank_get(crtc);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 531b876d0ed8..f2d46b7ac6f9 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -122,26 +122,19 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return master;
}
-static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
- bool new_master)
+static void drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
+ bool new_master)
{
- int ret = 0;
-
dev->master = drm_master_get(fpriv->master);
- if (dev->driver->master_set) {
- ret = dev->driver->master_set(dev, fpriv, new_master);
- if (unlikely(ret != 0)) {
- drm_master_put(&dev->master);
- }
- }
+ if (dev->driver->master_set)
+ dev->driver->master_set(dev, fpriv, new_master);
- return ret;
+ fpriv->was_master = true;
}
static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
{
struct drm_master *old_master;
- int ret;
lockdep_assert_held_once(&dev->master_mutex);
@@ -156,35 +149,85 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
fpriv->is_master = 1;
fpriv->authenticated = 1;
- ret = drm_set_master(dev, fpriv, true);
- if (ret)
- goto out_err;
+ drm_set_master(dev, fpriv, true);
if (old_master)
drm_master_put(&old_master);
return 0;
+}
-out_err:
- /* drop references and restore old master on failure */
- drm_master_put(&fpriv->master);
- fpriv->master = old_master;
- fpriv->is_master = 0;
+/*
+ * In the olden days the SET/DROP_MASTER ioctls used to return EACCES when
+ * CAP_SYS_ADMIN was not set. This was used to prevent rogue applications
+ * from becoming master and/or failing to release it.
+ *
+ * At the same time, the first client (for a given VT) is _always_ master.
+ * Thus in order for the ioctls to succeed, one had to _explicitly_ run the
+ * application as root or flip the setuid bit.
+ *
+ * If the CAP_SYS_ADMIN was missing, no other client could become master...
+ * EVER :-( Leading to a) the graphics session dying badly or b) a completely
+ * locked session.
+ *
+ *
+ * As some point systemd-logind was introduced to orchestrate and delegate
+ * master as applicable. It does so by opening the fd and passing it to users
+ * while in itself logind a) does the set/drop master per users' request and
+ * b) * implicitly drops master on VT switch.
+ *
+ * Even though logind looks like the future, there are a few issues:
+ * - some platforms don't have equivalent (Android, CrOS, some BSDs) so
+ * root is required _solely_ for SET/DROP MASTER.
+ * - applications may not be updated to use it,
+ * - any client which fails to drop master* can DoS the application using
+ * logind, to a varying degree.
+ *
+ * * Either due missing CAP_SYS_ADMIN or simply not calling DROP_MASTER.
+ *
+ *
+ * Here we implement the next best thing:
+ * - ensure the logind style of fd passing works unchanged, and
+ * - allow a client to drop/set master, iff it is/was master at a given point
+ * in time.
+ *
+ * Note: DROP_MASTER cannot be free for all, as an arbitrator user could:
+ * - DoS/crash the arbitrator - details would be implementation specific
+ * - open the node, become master implicitly and cause issues
+ *
+ * As a result this fixes the following when using root-less build w/o logind
+ * - startx
+ * - weston
+ * - various compositors based on wlroots
+ */
+static int
+drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
+{
+ if (file_priv->pid == task_pid(current) && file_priv->was_master)
+ return 0;
- return ret;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return 0;
}
int drm_setmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret = 0;
+ int ret;
mutex_lock(&dev->master_mutex);
+
+ ret = drm_master_check_perm(dev, file_priv);
+ if (ret)
+ goto out_unlock;
+
if (drm_is_current_master(file_priv))
goto out_unlock;
if (dev->master) {
- ret = -EINVAL;
+ ret = -EBUSY;
goto out_unlock;
}
@@ -204,7 +247,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = drm_set_master(dev, file_priv, false);
+ drm_set_master(dev, file_priv, false);
out_unlock:
mutex_unlock(&dev->master_mutex);
return ret;
@@ -221,14 +264,23 @@ static void drm_drop_master(struct drm_device *dev,
int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- int ret = -EINVAL;
+ int ret;
mutex_lock(&dev->master_mutex);
- if (!drm_is_current_master(file_priv))
+
+ ret = drm_master_check_perm(dev, file_priv);
+ if (ret)
goto out_unlock;
- if (!dev->master)
+ if (!drm_is_current_master(file_priv)) {
+ ret = -EINVAL;
goto out_unlock;
+ }
+
+ if (!dev->master) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
if (file_priv->master->lessor != NULL) {
DRM_DEBUG_LEASE("Attempt to drop lessee %d as master\n", file_priv->master->lessee_id);
@@ -236,7 +288,6 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = 0;
drm_drop_master(dev, file_priv);
out_unlock:
mutex_unlock(&dev->master_mutex);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 121481f6aa71..f1dcad96f341 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -135,7 +135,9 @@
* are underneath planes with higher Z position values. Two planes with the
* same Z position value have undefined ordering. Note that the Z position
* value can also be immutable, to inform userspace about the hard-coded
- * stacking of planes, see drm_plane_create_zpos_immutable_property().
+ * stacking of planes, see drm_plane_create_zpos_immutable_property(). If
+ * any plane has a zpos property (either mutable or immutable), then all
+ * planes shall have a zpos property.
*
* pixel blend mode:
* Pixel blend mode is set up with drm_plane_create_blend_mode_property().
@@ -183,6 +185,12 @@
* plane does not expose the "alpha" property, then this is
* assumed to be 1.0
*
+ * IN_FORMATS:
+ * Blob property which contains the set of buffer format and modifier
+ * pairs supported by this plane. The blob is a drm_format_modifier_blob
+ * struct. Without this property the plane doesn't support buffers with
+ * modifiers. Userspace cannot change this property.
+ *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
@@ -338,10 +346,10 @@ EXPORT_SYMBOL(drm_rotation_simplify);
* should be set to 0 and max to maximal number of planes for given crtc - 1.
*
* If zpos of some planes cannot be changed (like fixed background or
- * cursor/topmost planes), driver should adjust min/max values and assign those
- * planes immutable zpos property with lower or higher values (for more
+ * cursor/topmost planes), drivers shall adjust the min/max values and assign
+ * those planes immutable zpos properties with lower or higher values (for more
* information, see drm_plane_create_zpos_immutable_property() function). In such
- * case driver should also assign proper initial zpos values for all planes in
+ * case drivers shall also assign proper initial zpos values for all planes in
* its plane_reset() callback, so the planes will be always sorted properly.
*
* See also drm_atomic_normalize_zpos().
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index dcabf5698333..ef26ac57f039 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/nospec.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
@@ -43,7 +44,6 @@
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 6b0c6ef8b9b3..495f47d23d87 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -237,7 +237,7 @@ static void drm_client_buffer_delete(struct drm_client_buffer *buffer)
drm_gem_vunmap(buffer->gem, buffer->vaddr);
if (buffer->gem)
- drm_gem_object_put_unlocked(buffer->gem);
+ drm_gem_object_put(buffer->gem);
if (buffer->handle)
drm_mode_destroy_dumb(dev, buffer->handle, buffer->client->file);
@@ -437,6 +437,39 @@ void drm_client_framebuffer_delete(struct drm_client_buffer *buffer)
}
EXPORT_SYMBOL(drm_client_framebuffer_delete);
+/**
+ * drm_client_framebuffer_flush - Manually flush client framebuffer
+ * @buffer: DRM client buffer (can be NULL)
+ * @rect: Damage rectangle (if NULL flushes all)
+ *
+ * This calls &drm_framebuffer_funcs->dirty (if present) to flush buffer changes
+ * for drivers that need it.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_framebuffer_flush(struct drm_client_buffer *buffer, struct drm_rect *rect)
+{
+ if (!buffer || !buffer->fb || !buffer->fb->funcs->dirty)
+ return 0;
+
+ if (rect) {
+ struct drm_clip_rect clip = {
+ .x1 = rect->x1,
+ .y1 = rect->y1,
+ .x2 = rect->x2,
+ .y2 = rect->y2,
+ };
+
+ return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file,
+ 0, 0, &clip, 1);
+ }
+
+ return buffer->fb->funcs->dirty(buffer->fb, buffer->client->file,
+ 0, 0, NULL, 0);
+}
+EXPORT_SYMBOL(drm_client_framebuffer_flush);
+
#ifdef CONFIG_DEBUG_FS
static int drm_client_debugfs_internal_clients(struct seq_file *m, void *data)
{
@@ -457,10 +490,10 @@ static const struct drm_info_list drm_client_debugfs_list[] = {
{ "internal_clients", drm_client_debugfs_internal_clients, 0 },
};
-int drm_client_debugfs_init(struct drm_minor *minor)
+void drm_client_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_client_debugfs_list,
- ARRAY_SIZE(drm_client_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_client_debugfs_list,
+ ARRAY_SIZE(drm_client_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 7443114bd713..b7e9e1c2564c 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -186,7 +186,7 @@ again:
continue;
if (cmdline_mode->refresh_specified) {
- if (mode->vrefresh != cmdline_mode->refresh)
+ if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
continue;
}
@@ -563,7 +563,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
struct drm_client_offset *offsets,
bool *enabled, int width, int height)
{
- unsigned int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
+ const int count = min_t(unsigned int, connector_count, BITS_PER_LONG);
unsigned long conn_configured, conn_seq, mask;
struct drm_device *dev = client->dev;
int i, j;
@@ -577,6 +577,9 @@ static bool drm_client_firmware_config(struct drm_client_dev *client,
if (!drm_drv_uses_atomic_modeset(dev))
return false;
+ if (WARN_ON(count <= 0))
+ return false;
+
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
@@ -966,7 +969,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
}
EXPORT_SYMBOL(drm_client_rotation);
-static int drm_client_modeset_commit_atomic(struct drm_client_dev *client, bool active)
+static int drm_client_modeset_commit_atomic(struct drm_client_dev *client, bool active, bool check)
{
struct drm_device *dev = client->dev;
struct drm_plane *plane;
@@ -1033,7 +1036,10 @@ retry:
}
}
- ret = drm_atomic_commit(state);
+ if (check)
+ ret = drm_atomic_check_only(state);
+ else
+ ret = drm_atomic_commit(state);
out_state:
if (ret == -EDEADLK)
@@ -1095,6 +1101,30 @@ out:
}
/**
+ * drm_client_modeset_check() - Check modeset configuration
+ * @client: DRM client
+ *
+ * Check modeset configuration.
+ *
+ * Returns:
+ * Zero on success or negative error code on failure.
+ */
+int drm_client_modeset_check(struct drm_client_dev *client)
+{
+ int ret;
+
+ if (!drm_drv_uses_atomic_modeset(client->dev))
+ return 0;
+
+ mutex_lock(&client->modeset_mutex);
+ ret = drm_client_modeset_commit_atomic(client, true, true);
+ mutex_unlock(&client->modeset_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_client_modeset_check);
+
+/**
* drm_client_modeset_commit_locked() - Force commit CRTC configuration
* @client: DRM client
*
@@ -1112,7 +1142,7 @@ int drm_client_modeset_commit_locked(struct drm_client_dev *client)
mutex_lock(&client->modeset_mutex);
if (drm_drv_uses_atomic_modeset(dev))
- ret = drm_client_modeset_commit_atomic(client, true);
+ ret = drm_client_modeset_commit_atomic(client, true, false);
else
ret = drm_client_modeset_commit_legacy(client);
mutex_unlock(&client->modeset_mutex);
@@ -1188,7 +1218,7 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode)
mutex_lock(&client->modeset_mutex);
if (drm_drv_uses_atomic_modeset(dev))
- ret = drm_client_modeset_commit_atomic(client, mode == DRM_MODE_DPMS_ON);
+ ret = drm_client_modeset_commit_atomic(client, mode == DRM_MODE_DPMS_ON, false);
else
drm_client_modeset_dpms_legacy(client, mode);
mutex_unlock(&client->modeset_mutex);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 644f0ad10671..b7bd46033807 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -27,6 +27,7 @@
#include <drm/drm_print.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_sysfs.h>
#include <linux/uaccess.h>
@@ -523,6 +524,10 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registration_state = DRM_CONNECTOR_REGISTERED;
+
+ /* Let userspace know we have a new connector */
+ drm_sysfs_hotplug_event(connector->dev);
+
goto unlock;
err_debugfs:
@@ -948,8 +953,7 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* connector is linked to. Drivers should never set this property directly,
* it is handled by the DRM core by calling the &drm_connector_funcs.dpms
* callback. For atomic drivers the remapping to the "ACTIVE" property is
- * implemented in the DRM core. This is the only standard connector
- * property that userspace can change.
+ * implemented in the DRM core.
*
* Note that this property cannot be set through the MODE_ATOMIC ioctl,
* userspace must use "ACTIVE" on the CRTC instead.
@@ -995,6 +999,32 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* after modeset, the kernel driver may set this to "BAD" and issue a
* hotplug uevent. Drivers should update this value using
* drm_connector_set_link_status_property().
+ *
+ * When user-space receives the hotplug uevent and detects a "BAD"
+ * link-status, the sink doesn't receive pixels anymore (e.g. the screen
+ * becomes completely black). The list of available modes may have
+ * changed. User-space is expected to pick a new mode if the current one
+ * has disappeared and perform a new modeset with link-status set to
+ * "GOOD" to re-enable the connector.
+ *
+ * If multiple connectors share the same CRTC and one of them gets a "BAD"
+ * link-status, the other are unaffected (ie. the sinks still continue to
+ * receive pixels).
+ *
+ * When user-space performs an atomic commit on a connector with a "BAD"
+ * link-status without resetting the property to "GOOD", the sink may
+ * still not receive pixels. When user-space performs an atomic commit
+ * which resets the link-status property to "GOOD" without the
+ * ALLOW_MODESET flag set, it might fail because a modeset is required.
+ *
+ * User-space can only change link-status to "GOOD", changing it to "BAD"
+ * is a no-op.
+ *
+ * For backwards compatibility with non-atomic userspace the kernel
+ * tries to automatically set the link-status back to "GOOD" in the
+ * SETCRTC IOCTL. This might fail if the mode is no longer valid, similar
+ * to how it might fail if a different screen has been connected in the
+ * interim.
* non_desktop:
* Indicates the output should be ignored for purposes of displaying a
* standard desktop environment or console. This is most likely because
@@ -1970,6 +2000,8 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
else
drm_reset_display_info(connector);
+ drm_update_tile_info(connector, edid);
+
drm_object_property_set_value(&connector->base,
dev->mode_config.non_desktop_property,
connector->display_info.non_desktop);
@@ -2392,7 +2424,7 @@ EXPORT_SYMBOL(drm_mode_put_tile_group);
* tile group or NULL if not found.
*/
struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
- char topology[8])
+ const char topology[8])
{
struct drm_tile_group *tg;
int id;
@@ -2422,7 +2454,7 @@ EXPORT_SYMBOL(drm_mode_get_tile_group);
* new tile group or NULL.
*/
struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
- char topology[8])
+ const char topology[8])
{
struct drm_tile_group *tg;
int ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 4936e1080e41..f1216088f65f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -205,6 +205,33 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
}
/**
+ * DOC: standard CRTC properties
+ *
+ * DRM CRTCs have a few standardized properties:
+ *
+ * ACTIVE:
+ * Atomic property for setting the power state of the CRTC. When set to 1
+ * the CRTC will actively display content. When set to 0 the CRTC will be
+ * powered off. There is no expectation that user-space will reset CRTC
+ * resources like the mode and planes when setting ACTIVE to 0.
+ *
+ * User-space can rely on an ACTIVE change to 1 to never fail an atomic
+ * test as long as no other property has changed. If a change to ACTIVE
+ * fails an atomic test, this is a driver bug. For this reason setting
+ * ACTIVE to 0 must not release internal resources (like reserved memory
+ * bandwidth or clock generators).
+ *
+ * Note that the legacy DPMS property on connectors is internally routed
+ * to control this property for atomic drivers.
+ * MODE_ID:
+ * Atomic property for setting the CRTC display timings. The value is the
+ * ID of a blob containing the DRM mode info. To disable the CRTC,
+ * user-space must set this property to 0.
+ *
+ * Setting MODE_ID to 0 will release reserved resources for the CRTC.
+ */
+
+/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
* @dev: DRM device
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 16f2413403aa..da96b2f64d7e 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -82,6 +82,7 @@ int drm_mode_setcrtc(struct drm_device *dev,
/* drm_mode_config.c */
int drm_modeset_register_all(struct drm_device *dev);
void drm_modeset_unregister_all(struct drm_device *dev);
+void drm_mode_config_validate(struct drm_device *dev);
/* drm_modes.c */
const char *drm_get_mode_status_name(enum drm_mode_status status);
@@ -224,7 +225,7 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
/* drm_atomic.c */
#ifdef CONFIG_DEBUG_FS
struct drm_minor;
-int drm_atomic_debugfs_init(struct drm_minor *minor);
+void drm_atomic_debugfs_init(struct drm_minor *minor);
#endif
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
@@ -278,3 +279,4 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
void drm_mode_fixup_1366x768(struct drm_display_mode *mode);
void drm_reset_display_info(struct drm_connector *connector);
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid);
+void drm_update_tile_info(struct drm_connector *connector, const struct edid *edid);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 4e673d318503..bfe4602f206b 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -172,8 +172,8 @@ static const struct file_operations drm_debugfs_fops = {
* &struct drm_info_list in the given root directory. These files will be removed
* automatically on drm_debugfs_cleanup().
*/
-int drm_debugfs_create_files(const struct drm_info_list *files, int count,
- struct dentry *root, struct drm_minor *minor)
+void drm_debugfs_create_files(const struct drm_info_list *files, int count,
+ struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct drm_info_node *tmp;
@@ -199,7 +199,6 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
list_add(&tmp->list, &minor->debugfs_list);
mutex_unlock(&minor->debugfs_lock);
}
- return 0;
}
EXPORT_SYMBOL(drm_debugfs_create_files);
@@ -208,52 +207,28 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
{
struct drm_device *dev = minor->dev;
char name[64];
- int ret;
INIT_LIST_HEAD(&minor->debugfs_list);
mutex_init(&minor->debugfs_lock);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
- ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret) {
- debugfs_remove(minor->debugfs_root);
- minor->debugfs_root = NULL;
- DRM_ERROR("Failed to create core drm debugfs files\n");
- return ret;
- }
+ drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
if (drm_drv_uses_atomic_modeset(dev)) {
- ret = drm_atomic_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create atomic debugfs files\n");
- return ret;
- }
+ drm_atomic_debugfs_init(minor);
}
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_framebuffer_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create framebuffer debugfs file\n");
- return ret;
- }
+ drm_framebuffer_debugfs_init(minor);
- ret = drm_client_debugfs_init(minor);
- if (ret) {
- DRM_ERROR("Failed to create client debugfs file\n");
- return ret;
- }
+ drm_client_debugfs_init(minor);
}
- if (dev->driver->debugfs_init) {
- ret = dev->driver->debugfs_init(minor);
- if (ret) {
- DRM_ERROR("DRM: Driver failed to initialize "
- "/sys/kernel/debug/dri.\n");
- return ret;
- }
- }
+ if (dev->driver->debugfs_init)
+ dev->driver->debugfs_init(minor);
+
return 0;
}
@@ -336,13 +311,13 @@ static ssize_t connector_write(struct file *file, const char __user *ubuf,
buf[len] = '\0';
- if (!strcmp(buf, "on"))
+ if (sysfs_streq(buf, "on"))
connector->force = DRM_FORCE_ON;
- else if (!strcmp(buf, "digital"))
+ else if (sysfs_streq(buf, "digital"))
connector->force = DRM_FORCE_ON_DIGITAL;
- else if (!strcmp(buf, "off"))
+ else if (sysfs_streq(buf, "off"))
connector->force = DRM_FORCE_OFF;
- else if (!strcmp(buf, "unspecified"))
+ else if (sysfs_streq(buf, "unspecified"))
connector->force = DRM_FORCE_UNSPECIFIED;
else
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index a7add55a85b4..d07ba54ec945 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -34,9 +34,9 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_legacy.h"
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 43e57632b00a..6d716dcb432c 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -1315,6 +1315,7 @@ static const struct edid_quirk edid_quirk_list[] = {
{ MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
{ MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
{ MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4c, 0x83), PROD_ID(0x47, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
};
#undef MFG
@@ -1364,7 +1365,7 @@ EXPORT_SYMBOL(drm_dp_get_edid_quirks);
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
- * @desc: Device decriptor to fill from DPCD
+ * @desc: Device descriptor to fill from DPCD
* @is_branch: true for branch devices, false for sink devices
*
* Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the
@@ -1590,6 +1591,7 @@ EXPORT_SYMBOL(drm_dp_get_phy_test_pattern);
* drm_dp_set_phy_test_pattern() - set the pattern to the sink.
* @aux: DisplayPort AUX channel
* @data: DP phy compliance test parameters.
+ * @dp_rev: DP revision to use for compliance testing
*
* Returns 0 on success or a negative error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 70c4b7afed12..b2f5a84b4cfb 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -87,8 +88,8 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
u8 *guid);
-static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
-static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port);
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
#define DBG_PREFIX "[dp_mst]"
@@ -687,51 +688,45 @@ static void drm_dp_encode_sideband_reply(struct drm_dp_sideband_msg_reply_body *
raw->cur_len = idx;
}
-/* this adds a chunk of msg to the builder to get the final msg */
-static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
- u8 *replybuf, u8 replybuflen, bool hdr)
+static int drm_dp_sideband_msg_set_header(struct drm_dp_sideband_msg_rx *msg,
+ struct drm_dp_sideband_msg_hdr *hdr,
+ u8 hdrlen)
{
- int ret;
- u8 crc4;
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!hdr->somt && !msg->have_somt)
+ return false;
- if (hdr) {
- u8 hdrlen;
- struct drm_dp_sideband_msg_hdr recv_hdr;
- ret = drm_dp_decode_sideband_msg_hdr(&recv_hdr, replybuf, replybuflen, &hdrlen);
- if (ret == false) {
- print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16, 1, replybuf, replybuflen, false);
- return false;
- }
+ /* get length contained in this portion */
+ msg->curchunk_idx = 0;
+ msg->curchunk_len = hdr->msg_len;
+ msg->curchunk_hdrlen = hdrlen;
- /*
- * ignore out-of-order messages or messages that are part of a
- * failed transaction
- */
- if (!recv_hdr.somt && !msg->have_somt)
- return false;
+ /* we have already gotten an somt - don't bother parsing */
+ if (hdr->somt && msg->have_somt)
+ return false;
- /* get length contained in this portion */
- msg->curchunk_len = recv_hdr.msg_len;
- msg->curchunk_hdrlen = hdrlen;
+ if (hdr->somt) {
+ memcpy(&msg->initial_hdr, hdr,
+ sizeof(struct drm_dp_sideband_msg_hdr));
+ msg->have_somt = true;
+ }
+ if (hdr->eomt)
+ msg->have_eomt = true;
- /* we have already gotten an somt - don't bother parsing */
- if (recv_hdr.somt && msg->have_somt)
- return false;
+ return true;
+}
- if (recv_hdr.somt) {
- memcpy(&msg->initial_hdr, &recv_hdr, sizeof(struct drm_dp_sideband_msg_hdr));
- msg->have_somt = true;
- }
- if (recv_hdr.eomt)
- msg->have_eomt = true;
+/* this adds a chunk of msg to the builder to get the final msg */
+static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
+ u8 *replybuf, u8 replybuflen)
+{
+ u8 crc4;
- /* copy the bytes for the remainder of this header chunk */
- msg->curchunk_idx = min(msg->curchunk_len, (u8)(replybuflen - hdrlen));
- memcpy(&msg->chunk[0], replybuf + hdrlen, msg->curchunk_idx);
- } else {
- memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
- msg->curchunk_idx += replybuflen;
- }
+ memcpy(&msg->chunk[msg->curchunk_idx], replybuf, replybuflen);
+ msg->curchunk_idx += replybuflen;
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
@@ -1060,13 +1055,12 @@ static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
drm_dp_encode_sideband_req(&req, msg);
}
-static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
+static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@@ -1184,12 +1178,38 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
struct drm_dp_sideband_msg_tx *txmsg)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
+ unsigned long wait_timeout = msecs_to_jiffies(4000);
+ unsigned long wait_expires = jiffies + wait_timeout;
int ret;
- ret = wait_event_timeout(mgr->tx_waitq,
- check_txmsg_state(mgr, txmsg),
- (4 * HZ));
- mutex_lock(&mstb->mgr->qlock);
+ for (;;) {
+ /*
+ * If the driver provides a way for this, change to
+ * poll-waiting for the MST reply interrupt if we didn't receive
+ * it for 50 msec. This would cater for cases where the HPD
+ * pulse signal got lost somewhere, even though the sink raised
+ * the corresponding MST interrupt correctly. One example is the
+ * Club 3D CAC-1557 TypeC -> DP adapter which for some reason
+ * filters out short pulses with a duration less than ~540 usec.
+ *
+ * The poll period is 50 msec to avoid missing an interrupt
+ * after the sink has cleared it (after a 110msec timeout
+ * since it raised the interrupt).
+ */
+ ret = wait_event_timeout(mgr->tx_waitq,
+ check_txmsg_state(mgr, txmsg),
+ mgr->cbs->poll_hpd_irq ?
+ msecs_to_jiffies(50) :
+ wait_timeout);
+
+ if (ret || !mgr->cbs->poll_hpd_irq ||
+ time_after(jiffies, wait_expires))
+ break;
+
+ mgr->cbs->poll_hpd_irq(mgr);
+ }
+
+ mutex_lock(&mgr->qlock);
if (ret > 0) {
if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) {
ret = -EIO;
@@ -1203,16 +1223,9 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
/* remove from q */
if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED ||
- txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) {
+ txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
+ txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
list_del(&txmsg->next);
- }
-
- if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND ||
- txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
- mstb->tx_slots[txmsg->seqno] = NULL;
- }
- mgr->is_waiting_for_dwn_reply = false;
-
}
out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
@@ -1617,7 +1630,7 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
mutex_lock(&mgr->delayed_destroy_lock);
list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list);
mutex_unlock(&mgr->delayed_destroy_lock);
- schedule_work(&mgr->delayed_destroy_work);
+ queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
}
/**
@@ -1734,7 +1747,7 @@ static void drm_dp_destroy_port(struct kref *kref)
mutex_lock(&mgr->delayed_destroy_lock);
list_add(&port->next, &mgr->destroy_port_list);
mutex_unlock(&mgr->delayed_destroy_lock);
- schedule_work(&mgr->delayed_destroy_work);
+ queue_work(mgr->delayed_destroy_wq, &mgr->delayed_destroy_work);
}
/**
@@ -1980,7 +1993,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
}
/* remove i2c over sideband */
- drm_dp_mst_unregister_i2c_bus(&port->aux);
+ drm_dp_mst_unregister_i2c_bus(port);
} else {
mutex_lock(&mgr->lock);
drm_dp_mst_topology_put_mstb(port->mstb);
@@ -1995,7 +2008,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* add i2c over sideband */
- ret = drm_dp_mst_register_i2c_bus(&port->aux);
+ ret = drm_dp_mst_register_i2c_bus(port);
} else {
lct = drm_dp_calculate_rad(port, rad);
mstb = drm_dp_add_mst_branch_device(lct, rad);
@@ -2691,22 +2704,6 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
struct drm_dp_mst_branch *mstb = txmsg->dst;
u8 req_type;
- /* both msg slots are full */
- if (txmsg->seqno == -1) {
- if (mstb->tx_slots[0] && mstb->tx_slots[1]) {
- DRM_DEBUG_KMS("%s: failed to find slot\n", __func__);
- return -EAGAIN;
- }
- if (mstb->tx_slots[0] == NULL && mstb->tx_slots[1] == NULL) {
- txmsg->seqno = mstb->last_seqno;
- mstb->last_seqno ^= 1;
- } else if (mstb->tx_slots[0] == NULL)
- txmsg->seqno = 0;
- else
- txmsg->seqno = 1;
- mstb->tx_slots[txmsg->seqno] = txmsg;
- }
-
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY)
@@ -2718,7 +2715,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
hdr->lcr = mstb->lct - 1;
if (mstb->lct > 1)
memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
- hdr->seqno = txmsg->seqno;
+
return 0;
}
/*
@@ -2733,15 +2730,15 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
int len, space, idx, tosend;
int ret;
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_SENT)
+ return 0;
+
memset(&hdr, 0, sizeof(struct drm_dp_sideband_msg_hdr));
- if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) {
- txmsg->seqno = -1;
+ if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED)
txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND;
- }
- /* make hdr from dst mst - for replies use seqno
- otherwise assign one */
+ /* make hdr from dst mst */
ret = set_hdr_from_dst_qlock(&hdr, txmsg);
if (ret < 0)
return ret;
@@ -2794,42 +2791,17 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
if (list_empty(&mgr->tx_msg_downq))
return;
- txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
+ txmsg = list_first_entry(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
- if (ret == 1) {
- /* txmsg is sent it should be in the slots now */
- mgr->is_waiting_for_dwn_reply = true;
- list_del(&txmsg->next);
- } else if (ret) {
+ if (ret < 0) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
- mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next);
- if (txmsg->seqno != -1)
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up_all(&mgr->tx_waitq);
}
}
-/* called holding qlock */
-static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_sideband_msg_tx *txmsg)
-{
- int ret;
-
- /* construct a chunk from the first msg in the tx_msg queue */
- ret = process_single_tx_qlock(mgr, txmsg, true);
-
- if (ret != 1)
- DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
-
- if (txmsg->seqno != -1) {
- WARN_ON((unsigned int)txmsg->seqno >
- ARRAY_SIZE(txmsg->dst->tx_slots));
- txmsg->dst->tx_slots[txmsg->seqno] = NULL;
- }
-}
-
static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg)
{
@@ -2842,8 +2814,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
}
- if (list_is_singular(&mgr->tx_msg_downq) &&
- !mgr->is_waiting_for_dwn_reply)
+ if (list_is_singular(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
@@ -2950,8 +2921,9 @@ out:
return ret < 0 ? ret : changed;
}
-void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb)
+static void
+drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
int ret;
@@ -3442,8 +3414,12 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- ret = -EIO;
+ if (ret > 0) {
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ ret = -EIO;
+ else
+ ret = size;
+ }
kfree(txmsg);
fail_put:
@@ -3463,7 +3439,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
- int req_type, int seqno, bool broadcast)
+ int req_type, bool broadcast)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3472,13 +3448,11 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- txmsg->seqno = seqno;
drm_dp_encode_up_ack_reply(txmsg, req_type);
mutex_lock(&mgr->qlock);
-
- process_single_up_tx_qlock(mgr, txmsg);
-
+ /* construct a chunk from the first msg in the tx_msg queue */
+ process_single_tx_qlock(mgr, txmsg, true);
mutex_unlock(&mgr->qlock);
kfree(txmsg);
@@ -3703,31 +3677,63 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool
+drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
+ struct drm_dp_mst_branch **mstb)
{
int len;
u8 replyblock[32];
int replylen, curreply;
int ret;
- struct drm_dp_sideband_msg_rx *msg;
- int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
- msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+ u8 hdrlen;
+ struct drm_dp_sideband_msg_hdr hdr;
+ struct drm_dp_sideband_msg_rx *msg =
+ up ? &mgr->up_req_recv : &mgr->down_rep_recv;
+ int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE :
+ DP_SIDEBAND_MSG_DOWN_REP_BASE;
+
+ if (!up)
+ *mstb = NULL;
len = min(mgr->max_dpcd_transaction_bytes, 16);
- ret = drm_dp_dpcd_read(mgr->aux, basereg,
- replyblock, len);
+ ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
+
+ ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
+ if (ret == false) {
+ print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
+ 1, replyblock, len, false);
+ DRM_DEBUG_KMS("ERROR: failed header\n");
+ return false;
+ }
+
+ if (!up) {
+ /* Caller is responsible for giving back this reference */
+ *mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
+ if (!*mstb) {
+ DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
+ hdr.lct);
+ return false;
+ }
+ }
+
+ if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
+ DRM_DEBUG_KMS("sideband msg set header failed %d\n",
+ replyblock[0]);
+ return false;
+ }
+
+ replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
+ ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
return false;
}
- replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- replylen -= len;
+ replylen = msg->curchunk_len + msg->curchunk_hdrlen - len;
curreply = len;
while (replylen > 0) {
len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16);
@@ -3739,7 +3745,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
return false;
}
- ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
+ ret = drm_dp_sideband_append_payload(msg, replyblock, len);
if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
return false;
@@ -3754,67 +3760,60 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
- struct drm_dp_mst_branch *mstb;
- struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr;
- int slot = -1;
-
- if (!drm_dp_get_one_sb_msg(mgr, false))
- goto clear_down_rep_recv;
+ struct drm_dp_mst_branch *mstb = NULL;
+ struct drm_dp_sideband_msg_rx *msg = &mgr->down_rep_recv;
- if (!mgr->down_rep_recv.have_eomt)
- return 0;
+ if (!drm_dp_get_one_sb_msg(mgr, false, &mstb))
+ goto out;
- mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
- if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr->lct);
- goto clear_down_rep_recv;
- }
+ /* Multi-packet message transmission, don't clear the reply */
+ if (!msg->have_eomt)
+ goto out;
/* find the message */
- slot = hdr->seqno;
mutex_lock(&mgr->qlock);
- txmsg = mstb->tx_slots[slot];
- /* remove from slots */
+ txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
+ struct drm_dp_sideband_msg_tx, next);
mutex_unlock(&mgr->qlock);
- if (!txmsg) {
+ /* Were we actually expecting a response, and from this mstb? */
+ if (!txmsg || txmsg->dst != mstb) {
+ struct drm_dp_sideband_msg_hdr *hdr;
+ hdr = &msg->initial_hdr;
DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0],
- mgr->down_rep_recv.msg[0]);
- goto no_msg;
+ msg->msg[0]);
+ goto out_clear_reply;
}
- drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
+ drm_dp_sideband_parse_reply(msg, &txmsg->reply);
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
txmsg->reply.req_type,
drm_dp_mst_req_type_str(txmsg->reply.req_type),
txmsg->reply.u.nak.reason,
drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
txmsg->reply.u.nak.nak_data);
+ }
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
- mstb->tx_slots[slot] = NULL;
- mgr->is_waiting_for_dwn_reply = false;
+ list_del(&txmsg->next);
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
return 0;
-no_msg:
- drm_dp_mst_topology_put_mstb(mstb);
-clear_down_rep_recv:
- mutex_lock(&mgr->qlock);
- mgr->is_waiting_for_dwn_reply = false;
- mutex_unlock(&mgr->qlock);
- memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out_clear_reply:
+ memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+out:
+ if (mstb)
+ drm_dp_mst_topology_put_mstb(mstb);
return 0;
}
@@ -3890,11 +3889,9 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
- struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr;
struct drm_dp_pending_up_req *up_req;
- bool seqno;
- if (!drm_dp_get_one_sb_msg(mgr, true))
+ if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
goto out;
if (!mgr->up_req_recv.have_eomt)
@@ -3907,7 +3904,6 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
INIT_LIST_HEAD(&up_req->next);
- seqno = hdr->seqno;
drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
@@ -3919,7 +3915,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
- seqno, false);
+ false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
@@ -3941,7 +3937,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
res_stat->available_pbn);
}
- up_req->hdr = *hdr;
+ up_req->hdr = mgr->up_req_recv.initial_hdr;
mutex_lock(&mgr->up_req_lock);
list_add_tail(&up_req->next, &mgr->up_req_list);
mutex_unlock(&mgr->up_req_lock);
@@ -4047,27 +4043,6 @@ out:
EXPORT_SYMBOL(drm_dp_mst_detect_port);
/**
- * drm_dp_mst_port_has_audio() - Check whether port has audio capability or not
- * @mgr: manager for this port
- * @port: unverified pointer to a port.
- *
- * This returns whether the port supports audio or not.
- */
-bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port)
-{
- bool ret = false;
-
- port = drm_dp_mst_topology_get_port_validated(mgr, port);
- if (!port)
- return ret;
- ret = port->has_audio;
- drm_dp_mst_topology_put_port(port);
- return ret;
-}
-EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
-
-/**
* drm_dp_mst_get_edid() - get EDID for an MST port
* @connector: toplevel connector to get EDID for
* @mgr: manager for this port
@@ -4295,6 +4270,7 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
if (pos->vcpi) {
drm_dp_mst_put_port_malloc(port);
pos->vcpi = 0;
+ pos->pbn = 0;
}
return 0;
@@ -4443,42 +4419,58 @@ fail:
return ret;
}
+static int do_get_act_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
/**
- * drm_dp_check_act_status() - Check ACT handled status.
+ * drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
*
- * Check the payload status bits in the DPCD for ACT handled completion.
+ * Tries waiting for the MST hub to finish updating it's payload table by
+ * polling for the ACT handled bit for up to 3 seconds (yes-some hubs really
+ * take that long).
+ *
+ * Returns:
+ * 0 if the ACT was handled in time, negative error code on failure.
*/
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
{
- u8 status;
- int ret;
- int count = 0;
-
- do {
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
-
- if (ret < 0) {
- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (status & DP_PAYLOAD_ACT_HANDLED)
- break;
- count++;
- udelay(100);
-
- } while (count < 30);
-
- if (!(status & DP_PAYLOAD_ACT_HANDLED)) {
- DRM_DEBUG_KMS("failed to get ACT bit %d after %d retries\n", status, count);
- ret = -EINVAL;
- goto fail;
+ /*
+ * There doesn't seem to be any recommended retry count or timeout in
+ * the MST specification. Since some hubs have been observed to take
+ * over 1 second to update their payload allocations under certain
+ * conditions, we use a rather large timeout value.
+ */
+ const int timeout_ms = 3000;
+ int ret, status;
+
+ ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
+ status & DP_PAYLOAD_ACT_HANDLED || status < 0,
+ 200, timeout_ms * USEC_PER_MSEC);
+ if (ret < 0 && status >= 0) {
+ DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
+ timeout_ms, status);
+ return -EINVAL;
+ } else if (status < 0) {
+ /*
+ * Failure here isn't unexpected - the hub may have
+ * just been unplugged
+ */
+ DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
+ status);
+ return status;
}
+
return 0;
-fail:
- return ret;
}
EXPORT_SYMBOL(drm_dp_check_act_status);
@@ -4669,30 +4661,21 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock);
- if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
+ if (!list_empty(&mgr->tx_msg_downq))
process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock);
}
-static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+static inline void
+drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (!port->connector)
- return;
+ drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
- if (port->mgr->cbs->destroy_connector) {
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
- } else {
+ if (port->connector) {
drm_connector_unregister(port->connector);
drm_connector_put(port->connector);
}
-}
-
-static inline void
-drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
-{
- drm_dp_destroy_connector(port);
- drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
}
@@ -4700,26 +4683,25 @@ static inline void
drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
- struct drm_dp_mst_port *port, *tmp;
+ struct drm_dp_mst_port *port, *port_tmp;
+ struct drm_dp_sideband_msg_tx *txmsg, *txmsg_tmp;
bool wake_tx = false;
mutex_lock(&mgr->lock);
- list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
+ list_for_each_entry_safe(port, port_tmp, &mstb->ports, next) {
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
}
mutex_unlock(&mgr->lock);
- /* drop any tx slots msg */
+ /* drop any tx slot msg */
mutex_lock(&mstb->mgr->qlock);
- if (mstb->tx_slots[0]) {
- mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[0] = NULL;
- wake_tx = true;
- }
- if (mstb->tx_slots[1]) {
- mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
- mstb->tx_slots[1] = NULL;
+ list_for_each_entry_safe(txmsg, txmsg_tmp, &mgr->tx_msg_downq, next) {
+ if (txmsg->dst != mstb)
+ continue;
+
+ txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
+ list_del(&txmsg->next);
wake_tx = true;
}
mutex_unlock(&mstb->mgr->qlock);
@@ -5226,6 +5208,15 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
INIT_LIST_HEAD(&mgr->destroy_port_list);
INIT_LIST_HEAD(&mgr->destroy_branch_device_list);
INIT_LIST_HEAD(&mgr->up_req_list);
+
+ /*
+ * delayed_destroy_work will be queued on a dedicated WQ, so that any
+ * requeuing will be also flushed when deiniting the topology manager.
+ */
+ mgr->delayed_destroy_wq = alloc_ordered_workqueue("drm_dp_mst_wq", 0);
+ if (mgr->delayed_destroy_wq == NULL)
+ return -ENOMEM;
+
INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
INIT_WORK(&mgr->tx_work, drm_dp_tx_work);
INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work);
@@ -5270,7 +5261,11 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
- cancel_work_sync(&mgr->delayed_destroy_work);
+ /* The following will also drain any requeued work on the WQ. */
+ if (mgr->delayed_destroy_wq) {
+ destroy_workqueue(mgr->delayed_destroy_wq);
+ mgr->delayed_destroy_wq = NULL;
+ }
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
mgr->payloads = NULL;
@@ -5393,22 +5388,26 @@ static const struct i2c_algorithm drm_dp_mst_i2c_algo = {
/**
* drm_dp_mst_register_i2c_bus() - register an I2C adapter for I2C-over-AUX
- * @aux: DisplayPort AUX channel
+ * @port: The port to add the I2C bus on
*
* Returns 0 on success or a negative error code on failure.
*/
-static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
+static int drm_dp_mst_register_i2c_bus(struct drm_dp_mst_port *port)
{
+ struct drm_dp_aux *aux = &port->aux;
+ struct device *parent_dev = port->mgr->dev->dev;
+
aux->ddc.algo = &drm_dp_mst_i2c_algo;
aux->ddc.algo_data = aux;
aux->ddc.retries = 3;
aux->ddc.class = I2C_CLASS_DDC;
aux->ddc.owner = THIS_MODULE;
- aux->ddc.dev.parent = aux->dev;
- aux->ddc.dev.of_node = aux->dev->of_node;
+ /* FIXME: set the kdev of the port's connector as parent */
+ aux->ddc.dev.parent = parent_dev;
+ aux->ddc.dev.of_node = parent_dev->of_node;
- strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(aux->dev),
+ strlcpy(aux->ddc.name, aux->name ? aux->name : dev_name(parent_dev),
sizeof(aux->ddc.name));
return i2c_add_adapter(&aux->ddc);
@@ -5416,11 +5415,11 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux)
/**
* drm_dp_mst_unregister_i2c_bus() - unregister an I2C-over-AUX adapter
- * @aux: DisplayPort AUX channel
+ * @port: The port to remove the I2C bus from
*/
-static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux)
+static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_mst_port *port)
{
- i2c_del_adapter(&aux->ddc);
+ i2c_del_adapter(&port->aux.ddc);
}
/**
@@ -5494,7 +5493,7 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *immediate_upstream_port;
struct drm_dp_mst_port *fec_port;
- struct drm_dp_desc desc = { 0 };
+ struct drm_dp_desc desc = {};
u8 endpoint_fec;
u8 endpoint_dsc;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7b1a628d1f6e..bc38322f306e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_color_mgmt.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_print.h>
@@ -92,13 +93,27 @@ static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
}
}
+static void drm_minor_alloc_release(struct drm_device *dev, void *data)
+{
+ struct drm_minor *minor = data;
+ unsigned long flags;
+
+ WARN_ON(dev != minor->dev);
+
+ put_device(minor->kdev);
+
+ spin_lock_irqsave(&drm_minor_lock, flags);
+ idr_remove(&drm_minors_idr, minor->index);
+ spin_unlock_irqrestore(&drm_minor_lock, flags);
+}
+
static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
{
struct drm_minor *minor;
unsigned long flags;
int r;
- minor = kzalloc(sizeof(*minor), GFP_KERNEL);
+ minor = drmm_kzalloc(dev, sizeof(*minor), GFP_KERNEL);
if (!minor)
return -ENOMEM;
@@ -116,46 +131,20 @@ static int drm_minor_alloc(struct drm_device *dev, unsigned int type)
idr_preload_end();
if (r < 0)
- goto err_free;
+ return r;
minor->index = r;
+ r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
+ if (r)
+ return r;
+
minor->kdev = drm_sysfs_minor_alloc(minor);
- if (IS_ERR(minor->kdev)) {
- r = PTR_ERR(minor->kdev);
- goto err_index;
- }
+ if (IS_ERR(minor->kdev))
+ return PTR_ERR(minor->kdev);
*drm_minor_get_slot(dev, type) = minor;
return 0;
-
-err_index:
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-err_free:
- kfree(minor);
- return r;
-}
-
-static void drm_minor_free(struct drm_device *dev, unsigned int type)
-{
- struct drm_minor **slot, *minor;
- unsigned long flags;
-
- slot = drm_minor_get_slot(dev, type);
- minor = *slot;
- if (!minor)
- return;
-
- put_device(minor->kdev);
-
- spin_lock_irqsave(&drm_minor_lock, flags);
- idr_remove(&drm_minors_idr, minor->index);
- spin_unlock_irqrestore(&drm_minor_lock, flags);
-
- kfree(minor);
- *slot = NULL;
}
static int drm_minor_register(struct drm_device *dev, unsigned int type)
@@ -270,17 +259,22 @@ void drm_minor_release(struct drm_minor *minor)
* any other resources allocated at device initialization and drop the driver's
* reference to &drm_device using drm_dev_put().
*
- * Note that the lifetime rules for &drm_device instance has still a lot of
- * historical baggage. Hence use the reference counting provided by
- * drm_dev_get() and drm_dev_put() only carefully.
+ * Note that any allocation or resource which is visible to userspace must be
+ * released only when the final drm_dev_put() is called, and not when the
+ * driver is unbound from the underlying physical struct &device. Best to use
+ * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
+ * related functions.
+ *
+ * devres managed resources like devm_kmalloc() can only be used for resources
+ * directly related to the underlying hardware device, and only used in code
+ * paths fully protected by drm_dev_enter() and drm_dev_exit().
*
* Display driver example
* ~~~~~~~~~~~~~~~~~~~~~~
*
* The following example shows a typical structure of a DRM display driver.
* The example focus on the probe() function and the other functions that is
- * almost always present and serves as a demonstration of devm_drm_dev_init()
- * usage with its accompanying drm_driver->release callback.
+ * almost always present and serves as a demonstration of devm_drm_dev_init().
*
* .. code-block:: c
*
@@ -290,19 +284,8 @@ void drm_minor_release(struct drm_minor *minor)
* struct clk *pclk;
* };
*
- * static void driver_drm_release(struct drm_device *drm)
- * {
- * struct driver_device *priv = container_of(...);
- *
- * drm_mode_config_cleanup(drm);
- * drm_dev_fini(drm);
- * kfree(priv->userspace_facing);
- * kfree(priv);
- * }
- *
* static struct drm_driver driver_drm_driver = {
* [...]
- * .release = driver_drm_release,
* };
*
* static int driver_probe(struct platform_device *pdev)
@@ -322,13 +305,16 @@ void drm_minor_release(struct drm_minor *minor)
*
* ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
* if (ret) {
- * kfree(drm);
+ * kfree(priv);
* return ret;
* }
+ * drmm_add_final_kfree(drm, priv);
*
- * drm_mode_config_init(drm);
+ * ret = drmm_mode_config_init(drm);
+ * if (ret)
+ * return ret;
*
- * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
* if (!priv->userspace_facing)
* return -ENOMEM;
*
@@ -580,6 +566,23 @@ static void drm_fs_inode_free(struct inode *inode)
* used.
*/
+static void drm_dev_init_release(struct drm_device *dev, void *res)
+{
+ drm_legacy_ctxbitmap_cleanup(dev);
+ drm_legacy_remove_map_hash(dev);
+ drm_fs_inode_free(dev->anon_inode);
+
+ put_device(dev->dev);
+ /* Prevent use-after-free in drm_managed_release when debugging is
+ * enabled. Slightly awkward, but can't really be helped. */
+ dev->dev = NULL;
+ mutex_destroy(&dev->master_mutex);
+ mutex_destroy(&dev->clientlist_mutex);
+ mutex_destroy(&dev->filelist_mutex);
+ mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
+}
+
/**
* drm_dev_init - Initialise new DRM device
* @dev: DRM device
@@ -608,6 +611,9 @@ static void drm_fs_inode_free(struct inode *inode)
* arbitrary offset, you must supply a &drm_driver.release callback and control
* the finalization explicitly.
*
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
+ *
* RETURNS:
* 0 on success, or error code on failure.
*/
@@ -629,6 +635,9 @@ int drm_dev_init(struct drm_device *dev,
dev->dev = get_device(parent);
dev->driver = driver;
+ INIT_LIST_HEAD(&dev->managed.resources);
+ spin_lock_init(&dev->managed.lock);
+
/* no per-device feature limits by default */
dev->driver_features = ~0u;
@@ -644,26 +653,30 @@ int drm_dev_init(struct drm_device *dev,
mutex_init(&dev->clientlist_mutex);
mutex_init(&dev->master_mutex);
+ ret = drmm_add_action(dev, drm_dev_init_release, NULL);
+ if (ret)
+ return ret;
+
dev->anon_inode = drm_fs_inode_new();
if (IS_ERR(dev->anon_inode)) {
ret = PTR_ERR(dev->anon_inode);
DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
- goto err_free;
+ goto err;
}
if (drm_core_check_feature(dev, DRIVER_RENDER)) {
ret = drm_minor_alloc(dev, DRM_MINOR_RENDER);
if (ret)
- goto err_minors;
+ goto err;
}
ret = drm_minor_alloc(dev, DRM_MINOR_PRIMARY);
if (ret)
- goto err_minors;
+ goto err;
ret = drm_legacy_create_map_hash(dev);
if (ret)
- goto err_minors;
+ goto err;
drm_legacy_ctxbitmap_init(dev);
@@ -671,33 +684,19 @@ int drm_dev_init(struct drm_device *dev,
ret = drm_gem_init(dev);
if (ret) {
DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
- goto err_ctxbitmap;
+ goto err;
}
}
ret = drm_dev_set_unique(dev, dev_name(parent));
if (ret)
- goto err_setunique;
+ goto err;
return 0;
-err_setunique:
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-err_ctxbitmap:
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
-err_minors:
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
- drm_fs_inode_free(dev->anon_inode);
-err_free:
- put_device(dev->dev);
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->clientlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
+err:
+ drm_managed_release(dev);
+
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
@@ -714,8 +713,10 @@ static void devm_drm_dev_init_release(void *data)
* @driver: DRM driver
*
* Managed drm_dev_init(). The DRM device initialized with this function is
- * automatically put on driver detach using drm_dev_put(). You must supply a
- * &drm_driver.release callback to control the finalization explicitly.
+ * automatically put on driver detach using drm_dev_put().
+ *
+ * Note that drivers must call drmm_add_final_kfree() after this function has
+ * completed successfully.
*
* RETURNS:
* 0 on success, or error code on failure.
@@ -726,9 +727,6 @@ int devm_drm_dev_init(struct device *parent,
{
int ret;
- if (WARN_ON(!driver->release))
- return -EINVAL;
-
ret = drm_dev_init(dev, driver, parent);
if (ret)
return ret;
@@ -741,42 +739,28 @@ int devm_drm_dev_init(struct device *parent,
}
EXPORT_SYMBOL(devm_drm_dev_init);
-/**
- * drm_dev_fini - Finalize a dead DRM device
- * @dev: DRM device
- *
- * Finalize a dead DRM device. This is the converse to drm_dev_init() and
- * frees up all data allocated by it. All driver private data should be
- * finalized first. Note that this function does not free the @dev, that is
- * left to the caller.
- *
- * The ref-count of @dev must be zero, and drm_dev_fini() should only be called
- * from a &drm_driver.release callback.
- */
-void drm_dev_fini(struct drm_device *dev)
+void *__devm_drm_dev_alloc(struct device *parent, struct drm_driver *driver,
+ size_t size, size_t offset)
{
- drm_vblank_cleanup(dev);
-
- if (drm_core_check_feature(dev, DRIVER_GEM))
- drm_gem_destroy(dev);
-
- drm_legacy_ctxbitmap_cleanup(dev);
- drm_legacy_remove_map_hash(dev);
- drm_fs_inode_free(dev->anon_inode);
+ void *container;
+ struct drm_device *drm;
+ int ret;
- drm_minor_free(dev, DRM_MINOR_PRIMARY);
- drm_minor_free(dev, DRM_MINOR_RENDER);
+ container = kzalloc(size, GFP_KERNEL);
+ if (!container)
+ return ERR_PTR(-ENOMEM);
- put_device(dev->dev);
+ drm = container + offset;
+ ret = devm_drm_dev_init(parent, drm, driver);
+ if (ret) {
+ kfree(container);
+ return ERR_PTR(ret);
+ }
+ drmm_add_final_kfree(drm, container);
- mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->clientlist_mutex);
- mutex_destroy(&dev->filelist_mutex);
- mutex_destroy(&dev->struct_mutex);
- drm_legacy_destroy_members(dev);
- kfree(dev->unique);
+ return container;
}
-EXPORT_SYMBOL(drm_dev_fini);
+EXPORT_SYMBOL(__devm_drm_dev_alloc);
/**
* drm_dev_alloc - Allocate new DRM device
@@ -816,6 +800,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
return ERR_PTR(ret);
}
+ drmm_add_final_kfree(dev, dev);
+
return dev;
}
EXPORT_SYMBOL(drm_dev_alloc);
@@ -824,12 +810,13 @@ static void drm_dev_release(struct kref *ref)
{
struct drm_device *dev = container_of(ref, struct drm_device, ref);
- if (dev->driver->release) {
+ if (dev->driver->release)
dev->driver->release(dev);
- } else {
- drm_dev_fini(dev);
- kfree(dev);
- }
+
+ drm_managed_release(dev);
+
+ if (dev->managed.final_kfree)
+ kfree(dev->managed.final_kfree);
}
/**
@@ -946,6 +933,11 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
+ if (!driver->load)
+ drm_mode_config_validate(dev);
+
+ WARN_ON(!dev->managed.final_kfree);
+
if (drm_dev_needs_global_mutex(dev))
mutex_lock(&drm_global_mutex);
@@ -1046,8 +1038,8 @@ EXPORT_SYMBOL(drm_dev_unregister);
*/
int drm_dev_set_unique(struct drm_device *dev, const char *name)
{
- kfree(dev->unique);
- dev->unique = kstrdup(name, GFP_KERNEL);
+ drmm_kfree(dev, dev->unique);
+ dev->unique = drmm_kstrdup(dev, name, GFP_KERNEL);
return dev->unique ? 0 : -ENOMEM;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 116451101426..d8372d63851b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -191,10 +191,11 @@ static const struct edid_quirk {
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
- /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
{ "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
{ "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
/* Windows Mixed Reality Headsets */
{ "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
@@ -719,662 +720,662 @@ static const struct drm_display_mode edid_cea_modes_1[] = {
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 2 - 720x480@60Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 3 - 720x480@60Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 1280x720@60Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 5 - 1920x1080i@60Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 6 - 720(1440)x480i@60Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 7 - 720(1440)x480i@60Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 8 - 720(1440)x240@60Hz 4:3 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 9 - 720(1440)x240@60Hz 16:9 */
{ DRM_MODE("720x240", DRM_MODE_TYPE_DRIVER, 13500, 720, 739,
801, 858, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 10 - 2880x480i@60Hz 4:3 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 11 - 2880x480i@60Hz 16:9 */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 12 - 2880x240@60Hz 4:3 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 13 - 2880x240@60Hz 16:9 */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 14 - 1440x480@60Hz 4:3 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 15 - 1440x480@60Hz 16:9 */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 16 - 1920x1080@60Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 17 - 720x576@50Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 18 - 720x576@50Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 19 - 1280x720@50Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 20 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 21 - 720(1440)x576i@50Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 22 - 720(1440)x576i@50Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 23 - 720(1440)x288@50Hz 4:3 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 24 - 720(1440)x288@50Hz 16:9 */
{ DRM_MODE("720x288", DRM_MODE_TYPE_DRIVER, 13500, 720, 732,
795, 864, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 25 - 2880x576i@50Hz 4:3 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 26 - 2880x576i@50Hz 16:9 */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 27 - 2880x288@50Hz 4:3 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 28 - 2880x288@50Hz 16:9 */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 29 - 1440x576@50Hz 4:3 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 30 - 1440x576@50Hz 16:9 */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 31 - 1920x1080@50Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 32 - 1920x1080@24Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 33 - 1920x1080@25Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 34 - 1920x1080@30Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 35 - 2880x480@60Hz 4:3 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 36 - 2880x480@60Hz 16:9 */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 37 - 2880x576@50Hz 4:3 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 38 - 2880x576@50Hz 16:9 */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 39 - 1920x1080i@50Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 40 - 1920x1080i@100Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 41 - 1280x720@100Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 42 - 720x576@100Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 43 - 720x576@100Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 44 - 720(1440)x576i@100Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 45 - 720(1440)x576i@100Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 46 - 1920x1080i@120Hz 16:9 */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 47 - 1280x720@120Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 48 - 720x480@120Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 49 - 720x480@120Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 50 - 720(1440)x480i@120Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 51 - 720(1440)x480i@120Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 27000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 52 - 720x576@200Hz 4:3 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 53 - 720x576@200Hz 16:9 */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 54 - 720(1440)x576i@200Hz 4:3 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 55 - 720(1440)x576i@200Hz 16:9 */
{ DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 200, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 56 - 720x480@240Hz 4:3 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 57 - 720x480@240Hz 16:9 */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
- .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 58 - 720(1440)x480i@240Hz 4:3 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3, },
/* 59 - 720(1440)x480i@240Hz 16:9 */
{ DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 54000, 720, 739,
801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK),
- .vrefresh = 240, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 60 - 1280x720@24Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 61 - 1280x720@25Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 62 - 1280x720@30Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 63 - 1920x1080@120Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 64 - 1920x1080@100Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 65 - 1280x720@24Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 66 - 1280x720@25Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 67 - 1280x720@30Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 68 - 1280x720@50Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 69 - 1280x720@60Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 70 - 1280x720@100Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 71 - 1280x720@120Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 72 - 1920x1080@24Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 73 - 1920x1080@25Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 74 - 1920x1080@30Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 75 - 1920x1080@50Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 76 - 1920x1080@60Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 77 - 1920x1080@100Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 78 - 1920x1080@120Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 79 - 1680x720@24Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 80 - 1680x720@25Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2908,
2948, 3168, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 81 - 1680x720@30Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 59400, 1680, 2380,
2420, 2640, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 82 - 1680x720@50Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 82500, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 83 - 1680x720@60Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 1940,
1980, 2200, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 84 - 1680x720@100Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 165000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 85 - 1680x720@120Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 198000, 1680, 1740,
1780, 2000, 0, 720, 725, 730, 825, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 86 - 2560x1080@24Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 99000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 87 - 2560x1080@25Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 90000, 2560, 3008,
3052, 3200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 88 - 2560x1080@30Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 118800, 2560, 3328,
3372, 3520, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 89 - 2560x1080@50Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 185625, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 90 - 2560x1080@60Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 2808,
2852, 3000, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 91 - 2560x1080@100Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 371250, 2560, 2778,
2822, 2970, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 92 - 2560x1080@120Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 495000, 2560, 3108,
3152, 3300, 0, 1080, 1084, 1089, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 93 - 3840x2160@24Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 94 - 3840x2160@25Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 95 - 3840x2160@30Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 96 - 3840x2160@50Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 97 - 3840x2160@60Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 98 - 4096x2160@24Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 99 - 4096x2160@25Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 100 - 4096x2160@30Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 101 - 4096x2160@50Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5064,
5152, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 102 - 4096x2160@60Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 103 - 3840x2160@24Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 104 - 3840x2160@25Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 105 - 3840x2160@30Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 106 - 3840x2160@50Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 107 - 3840x2160@60Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 108 - 1280x720@48Hz 16:9 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
2280, 2500, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 109 - 1280x720@48Hz 64:27 */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240,
2280, 2500, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 110 - 1680x720@48Hz 64:27 */
{ DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490,
2530, 2750, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 111 - 1920x1080@48Hz 16:9 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 112 - 1920x1080@48Hz 64:27 */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 113 - 2560x1080@48Hz 64:27 */
{ DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558,
3602, 3750, 0, 1080, 1084, 1089, 1100, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 114 - 3840x2160@48Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 115 - 4096x2160@48Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 116 - 3840x2160@48Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116,
5204, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 117 - 3840x2160@100Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 118 - 3840x2160@120Hz 16:9 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 119 - 3840x2160@100Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 120 - 3840x2160@120Hz 64:27 */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016,
4104, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 121 - 5120x2160@24Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116,
7204, 7500, 0, 2160, 2168, 2178, 2200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 122 - 5120x2160@25Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816,
6904, 7200, 0, 2160, 2168, 2178, 2200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 123 - 5120x2160@30Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784,
5872, 6000, 0, 2160, 2168, 2178, 2200, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 124 - 5120x2160@48Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866,
5954, 6250, 0, 2160, 2168, 2178, 2475, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 125 - 5120x2160@50Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216,
6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 126 - 5120x2160@60Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284,
5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 127 - 5120x2160@100Hz 64:27 */
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216,
6304, 6600, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
};
/*
@@ -1387,137 +1388,137 @@ static const struct drm_display_mode edid_cea_modes_193[] = {
{ DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 5284,
5372, 5500, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 194 - 7680x4320@24Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 195 - 7680x4320@25Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 196 - 7680x4320@30Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 197 - 7680x4320@48Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 198 - 7680x4320@50Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 199 - 7680x4320@60Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 200 - 7680x4320@100Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 201 - 7680x4320@120Hz 16:9 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 202 - 7680x4320@24Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 203 - 7680x4320@25Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 204 - 7680x4320@30Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 1188000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 205 - 7680x4320@48Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10232,
10408, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 206 - 7680x4320@50Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 10032,
10208, 10800, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 207 - 7680x4320@60Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 2376000, 7680, 8232,
8408, 9000, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 208 - 7680x4320@100Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 9792,
9968, 10560, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 209 - 7680x4320@120Hz 64:27 */
{ DRM_MODE("7680x4320", DRM_MODE_TYPE_DRIVER, 4752000, 7680, 8032,
8208, 8800, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 210 - 10240x4320@24Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 11732,
11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 211 - 10240x4320@25Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 12732,
12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 212 - 10240x4320@30Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 1485000, 10240, 10528,
10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 213 - 10240x4320@48Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 11732,
11908, 12500, 0, 4320, 4336, 4356, 4950, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 214 - 10240x4320@50Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 12732,
12908, 13500, 0, 4320, 4336, 4356, 4400, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 215 - 10240x4320@60Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 2970000, 10240, 10528,
10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 216 - 10240x4320@100Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 12432,
12608, 13200, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 217 - 10240x4320@120Hz 64:27 */
{ DRM_MODE("10240x4320", DRM_MODE_TYPE_DRIVER, 5940000, 10240, 10528,
10704, 11000, 0, 4320, 4336, 4356, 4500, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, },
/* 218 - 4096x2160@100Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4896,
4984, 5280, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
/* 219 - 4096x2160@120Hz 256:135 */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 1188000, 4096, 4184,
4272, 4400, 0, 2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*
@@ -1531,25 +1532,25 @@ static const struct drm_display_mode edid_4k_modes[] = {
3840, 4016, 4104, 4400, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 2 - 3840x2160@25Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 4896, 4984, 5280, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 3 - 3840x2160@24Hz */
{ DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
3840, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, },
/* 4 - 4096x2160@24Hz (SMPTE) */
{ DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
4096, 5116, 5204, 5500, 0,
2160, 2168, 2178, 2250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
- .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
+ .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, },
};
/*** DDC fetch and block validation ***/
@@ -1583,8 +1584,6 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
-static void drm_get_displayid(struct drm_connector *connector,
- struct edid *edid);
static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
@@ -2018,18 +2017,13 @@ EXPORT_SYMBOL(drm_probe_ddc);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
- struct edid *edid;
-
if (connector->force == DRM_FORCE_OFF)
return NULL;
if (connector->force == DRM_FORCE_UNSPECIFIED && !drm_probe_ddc(adapter))
return NULL;
- edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
- if (edid)
- drm_get_displayid(connector, edid);
- return edid;
+ return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
}
EXPORT_SYMBOL(drm_get_edid);
@@ -2152,10 +2146,8 @@ static void edid_fixup_preferred(struct drm_connector *connector,
if (MODE_SIZE(cur_mode) > MODE_SIZE(preferred_mode))
preferred_mode = cur_mode;
- cur_vrefresh = cur_mode->vrefresh ?
- cur_mode->vrefresh : drm_mode_vrefresh(cur_mode);
- preferred_vrefresh = preferred_mode->vrefresh ?
- preferred_mode->vrefresh : drm_mode_vrefresh(preferred_mode);
+ cur_vrefresh = drm_mode_vrefresh(cur_mode);
+ preferred_vrefresh = drm_mode_vrefresh(preferred_mode);
/* At a given size, try to get closest to target refresh */
if ((MODE_SIZE(cur_mode) == MODE_SIZE(preferred_mode)) &&
MODE_REFRESH_DIFF(cur_vrefresh, target_refresh) <
@@ -2387,6 +2379,14 @@ bad_std_timing(u8 a, u8 b)
(a == 0x20 && b == 0x20);
}
+static int drm_mode_hsync(const struct drm_display_mode *mode)
+{
+ if (mode->htotal <= 0)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
+}
+
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @connector: connector of for the EDID block
@@ -2652,7 +2652,6 @@ set_size:
}
mode->type = DRM_MODE_TYPE_DRIVER;
- mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
return mode;
@@ -3212,16 +3211,33 @@ static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id)
}
-static u8 *drm_find_displayid_extension(const struct edid *edid)
+static u8 *drm_find_displayid_extension(const struct edid *edid,
+ int *length, int *idx)
{
- return drm_find_edid_extension(edid, DISPLAYID_EXT);
+ u8 *displayid = drm_find_edid_extension(edid, DISPLAYID_EXT);
+ struct displayid_hdr *base;
+ int ret;
+
+ if (!displayid)
+ return NULL;
+
+ /* EDID extensions block checksum isn't for us */
+ *length = EDID_LENGTH - 1;
+ *idx = 1;
+
+ ret = validate_displayid(displayid, *length, *idx);
+ if (ret)
+ return NULL;
+
+ base = (struct displayid_hdr *)&displayid[*idx];
+ *length = *idx + sizeof(*base) + base->bytes;
+
+ return displayid;
}
static u8 *drm_find_cea_extension(const struct edid *edid)
{
- int ret;
- int idx = 1;
- int length = EDID_LENGTH;
+ int length, idx;
struct displayid_block *block;
u8 *cea;
u8 *displayid;
@@ -3232,14 +3248,10 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
/* CEA blocks can also be found embedded in a DisplayID block */
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid)
return NULL;
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return NULL;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
if (block->tag == DATA_BLOCK_CTA) {
@@ -3284,7 +3296,7 @@ cea_mode_alternate_clock(const struct drm_display_mode *cea_mode)
{
unsigned int clock = cea_mode->clock;
- if (cea_mode->vrefresh % 6 != 0)
+ if (drm_mode_vrefresh(cea_mode) % 6 != 0)
return clock;
/*
@@ -3611,8 +3623,6 @@ drm_display_mode_from_vic_index(struct drm_connector *connector,
if (!newmode)
return NULL;
- newmode->vrefresh = 0;
-
return newmode;
}
@@ -5084,7 +5094,7 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
static int validate_displayid(u8 *displayid, int length, int idx)
{
- int i;
+ int i, dispid_length;
u8 csum = 0;
struct displayid_hdr *base;
@@ -5093,15 +5103,18 @@ static int validate_displayid(u8 *displayid, int length, int idx)
DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
base->rev, base->bytes, base->prod_id, base->ext_count);
- if (base->bytes + 5 > length - idx)
+ /* +1 for DispID checksum */
+ dispid_length = sizeof(*base) + base->bytes + 1;
+ if (dispid_length > length - idx)
return -EINVAL;
- for (i = idx; i <= base->bytes + 5; i++) {
- csum += displayid[i];
- }
+
+ for (i = 0; i < dispid_length; i++)
+ csum += displayid[idx + i];
if (csum) {
DRM_NOTE("DisplayID checksum invalid, remainder is %d\n", csum);
return -EINVAL;
}
+
return 0;
}
@@ -5111,7 +5124,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16));
+ (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
@@ -5144,7 +5157,6 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
if (timings->flags & 0x80)
mode->type |= DRM_MODE_TYPE_PREFERRED;
- mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
return mode;
@@ -5180,20 +5192,14 @@ static int add_displayid_detailed_modes(struct drm_connector *connector,
struct edid *edid)
{
u8 *displayid;
- int ret;
- int idx = 1;
- int length = EDID_LENGTH;
+ int length, idx;
struct displayid_block *block;
int num_modes = 0;
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid)
return 0;
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return 0;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
switch (block->tag) {
@@ -5782,9 +5788,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
static int drm_parse_tiled_block(struct drm_connector *connector,
- struct displayid_block *block)
+ const struct displayid_block *block)
{
- struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+ const struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
u16 w, h;
u8 tile_v_loc, tile_h_loc;
u8 num_v_tile, num_h_tile;
@@ -5835,22 +5841,12 @@ static int drm_parse_tiled_block(struct drm_connector *connector,
return 0;
}
-static int drm_parse_display_id(struct drm_connector *connector,
- u8 *displayid, int length,
- bool is_edid_extension)
+static int drm_displayid_parse_tiled(struct drm_connector *connector,
+ const u8 *displayid, int length, int idx)
{
- /* if this is an EDID extension the first byte will be 0x70 */
- int idx = 0;
- struct displayid_block *block;
+ const struct displayid_block *block;
int ret;
- if (is_edid_extension)
- idx = 1;
-
- ret = validate_displayid(displayid, length, idx);
- if (ret)
- return ret;
-
idx += sizeof(struct displayid_hdr);
for_each_displayid_db(displayid, block, idx, length) {
DRM_DEBUG_KMS("block id 0x%x, rev %d, len %d\n",
@@ -5862,12 +5858,6 @@ static int drm_parse_display_id(struct drm_connector *connector,
if (ret)
return ret;
break;
- case DATA_BLOCK_TYPE_1_DETAILED_TIMING:
- /* handled in mode gathering code. */
- break;
- case DATA_BLOCK_CTA:
- /* handled in the cea parser code. */
- break;
default:
DRM_DEBUG_KMS("found DisplayID tag 0x%x, unhandled\n", block->tag);
break;
@@ -5876,19 +5866,21 @@ static int drm_parse_display_id(struct drm_connector *connector,
return 0;
}
-static void drm_get_displayid(struct drm_connector *connector,
- struct edid *edid)
+void drm_update_tile_info(struct drm_connector *connector,
+ const struct edid *edid)
{
- void *displayid = NULL;
+ const void *displayid = NULL;
+ int length, idx;
int ret;
+
connector->has_tile = false;
- displayid = drm_find_displayid_extension(edid);
+ displayid = drm_find_displayid_extension(edid, &length, &idx);
if (!displayid) {
/* drop reference to any tile group we had */
goto out_drop_ref;
}
- ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+ ret = drm_displayid_parse_tiled(connector, displayid, length, idx);
if (ret < 0)
goto out_drop_ref;
if (!connector->has_tile)
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index cf804389f5ec..e464429d32df 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -61,13 +61,8 @@ int drm_i2c_encoder_init(struct drm_device *dev,
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
- client = i2c_new_device(adap, info);
- if (!client) {
- err = -ENOMEM;
- goto fail;
- }
-
- if (!client->dev.driver) {
+ client = i2c_new_client_device(adap, info);
+ if (!i2c_client_has_driver(client)) {
err = -ENODEV;
goto fail_unregister;
}
@@ -84,7 +79,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
- goto fail_unregister;
+ goto fail_module_put;
if (info->platform_data)
encoder->slave_funcs->set_config(&encoder->base,
@@ -92,10 +87,10 @@ int drm_i2c_encoder_init(struct drm_device *dev,
return 0;
+fail_module_put:
+ module_put(module);
fail_unregister:
i2c_unregister_device(client);
- module_put(module);
-fail:
return err;
}
EXPORT_SYMBOL(drm_i2c_encoder_init);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9801c0333eca..cb2349ad338d 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -2,7 +2,7 @@
/*
* drm kms/fb cma (contiguous memory allocator) helper functions
*
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
*
* Based on udl_fbdev.c
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a9771de4d17e..170aa7689110 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -307,13 +307,13 @@ static void drm_fb_helper_sysrq(int dummy1)
schedule_work(&drm_fb_helper_restore_work);
}
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
-static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
+static const struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
@@ -514,6 +514,14 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
if (ret)
goto err_release;
+ /*
+ * TODO: We really should be smarter here and alloc an apperture
+ * for each IORESOURCE_MEM resource helper->dev->dev has and also
+ * init the ranges of the appertures based on the resources.
+ * Note some drivers currently count on there being only 1 empty
+ * aperture and fill this themselves, these will need to be dealt
+ * with somehow when fixing this.
+ */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
@@ -2162,6 +2170,8 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
*
* This function sets up generic fbdev emulation for drivers that supports
* dumb buffers with a virtual address and that can be mmap'ed.
+ * drm_fbdev_generic_setup() shall be called after the DRM driver registered
+ * the new DRM device with drm_dev_register().
*
* Restore, hotplug events and teardown are all taken care of. Drivers that do
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
@@ -2178,29 +2188,30 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = {
* Setup will be retried on the next hotplug event.
*
* The fbdev is destroyed by drm_dev_unregister().
- *
- * Returns:
- * Zero on success or negative error code on failure.
*/
-int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+void drm_fbdev_generic_setup(struct drm_device *dev,
+ unsigned int preferred_bpp)
{
struct drm_fb_helper *fb_helper;
int ret;
- WARN(dev->fb_helper, "fb_helper is already set!\n");
+ drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
+ drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
if (!drm_fbdev_emulation)
- return 0;
+ return;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
- if (!fb_helper)
- return -ENOMEM;
+ if (!fb_helper) {
+ drm_err(dev, "Failed to allocate fb_helper\n");
+ return;
+ }
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
if (ret) {
kfree(fb_helper);
drm_err(dev, "Failed to register client: %d\n", ret);
- return ret;
+ return;
}
if (!preferred_bpp)
@@ -2214,8 +2225,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
drm_client_register(&fb_helper->client);
-
- return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index eb009d3ab48f..02b5ab626edb 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -253,8 +253,8 @@ void drm_file_free(struct drm_file *file)
dev = file->minor->dev;
- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- task_pid_nr(current),
+ DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, open_count=%d\n",
+ current->comm, task_pid_nr(current),
(long)old_encode_dev(file->minor->kdev->devt),
atomic_read(&dev->open_count));
@@ -342,10 +342,12 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
- if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON &&
+ dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
- DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
+ DRM_DEBUG("comm=\"%s\", pid=%d, minor=%d\n", current->comm,
+ task_pid_nr(current), minor->index);
priv = drm_file_alloc(minor);
if (IS_ERR(priv))
@@ -569,9 +571,6 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
struct drm_device *dev = file_priv->minor->dev;
ssize_t ret;
- if (!access_ok(buffer, count))
- return -EFAULT;
-
ret = mutex_lock_interruptible(&file_priv->event_read_lock);
if (ret)
return ret;
@@ -613,7 +612,8 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
- wake_up_interruptible(&file_priv->event_wait);
+ wake_up_interruptible_poll(&file_priv->event_wait,
+ EPOLLIN | EPOLLRDNORM);
break;
}
@@ -809,7 +809,8 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
list_del(&e->pending_link);
list_add_tail(&e->link,
&e->file_priv->event_list);
- wake_up_interruptible(&e->file_priv->event_wait);
+ wake_up_interruptible_poll(&e->file_priv->event_wait,
+ EPOLLIN | EPOLLRDNORM);
}
EXPORT_SYMBOL(drm_send_event_locked);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 3b818f2b2392..c043ca364c86 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -79,39 +79,60 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
/**
- * drm_fb_swab16 - Swap bytes into clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: RGB565 source buffer
+ * drm_fb_swab - Swap bytes into clip buffer
+ * @dst: Destination buffer
+ * @src: Source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
+ * @cached: Source buffer is mapped cached (eg. not write-combined)
+ *
+ * If @cached is false a temporary buffer is used to cache one pixel line at a
+ * time to speed up slow uncached reads.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
*/
-void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
+void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool cached)
{
- size_t len = (clip->x2 - clip->x1) * sizeof(u16);
+ u8 cpp = fb->format->cpp[0];
+ size_t len = drm_rect_width(clip) * cpp;
+ u16 *src16, *dst16 = dst;
+ u32 *src32, *dst32 = dst;
unsigned int x, y;
- u16 *src, *buf;
+ void *buf = NULL;
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
+ if (WARN_ON_ONCE(cpp != 2 && cpp != 4))
return;
+ if (!cached)
+ buf = kmalloc(len, GFP_KERNEL);
+
+ src += clip_offset(clip, fb->pitches[0], cpp);
+
for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++)
- *dst++ = swab16(*src++);
+ if (buf) {
+ memcpy(buf, src, len);
+ src16 = buf;
+ src32 = buf;
+ } else {
+ src16 = src;
+ src32 = src;
+ }
+
+ for (x = clip->x1; x < clip->x2; x++) {
+ if (cpp == 4)
+ *dst32++ = swab32(*src32++);
+ else
+ *dst16++ = swab16(*src16++);
+ }
+
+ src += fb->pitches[0];
}
kfree(buf);
}
-EXPORT_SYMBOL(drm_fb_swab16);
+EXPORT_SYMBOL(drm_fb_swab);
static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
unsigned int pixels,
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57ac94ce9b9e..0375b3d7f8d0 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -1207,10 +1207,10 @@ static const struct drm_info_list drm_framebuffer_debugfs_list[] = {
{ "framebuffer", drm_framebuffer_info, 0 },
};
-int drm_framebuffer_debugfs_init(struct drm_minor *minor)
+void drm_framebuffer_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(drm_framebuffer_debugfs_list,
- ARRAY_SIZE(drm_framebuffer_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(drm_framebuffer_debugfs_list,
+ ARRAY_SIZE(drm_framebuffer_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 37627d06fb06..a57f5379fc08 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -44,6 +44,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/drm_vma_manager.h>
@@ -77,6 +78,12 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
+static void
+drm_gem_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_vma_offset_manager_destroy(dev->vma_offset_manager);
+}
+
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
@@ -89,7 +96,8 @@ drm_gem_init(struct drm_device *dev)
mutex_init(&dev->object_name_lock);
idr_init_base(&dev->object_name_idr, 1);
- vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
+ vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
+ GFP_KERNEL);
if (!vma_offset_manager) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
@@ -100,16 +108,7 @@ drm_gem_init(struct drm_device *dev)
DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE);
- return 0;
-}
-
-void
-drm_gem_destroy(struct drm_device *dev)
-{
-
- drm_vma_offset_manager_destroy(dev->vma_offset_manager);
- kfree(dev->vma_offset_manager);
- dev->vma_offset_manager = NULL;
+ return drmm_add_action(dev, drm_gem_init_release, NULL);
}
/**
@@ -236,7 +235,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
mutex_unlock(&dev->object_name_lock);
if (final)
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
}
/*
@@ -332,7 +331,7 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -432,7 +431,7 @@ err_unref:
* drm_gem_handle_create - create a gem handle for an object
* @file_priv: drm file-private structure to register the handle for
* @obj: object to register
- * @handlep: pionter to return the created handle to the caller
+ * @handlep: pointer to return the created handle to the caller
*
* Create a handle for this object. This adds a handle reference to the object,
* which includes a regular reference count. Callers will likely want to
@@ -549,6 +548,10 @@ static void drm_gem_check_release_pagevec(struct pagevec *pvec)
* set during initialization. If you have special zone constraints, set them
* after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
* to keep pages in the required zone during swap-in.
+ *
+ * This function is only valid on objects initialized with
+ * drm_gem_object_init(), but not for those initialized with
+ * drm_gem_private_object_init() only.
*/
struct page **drm_gem_get_pages(struct drm_gem_object *obj)
{
@@ -557,6 +560,10 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)
struct pagevec pvec;
int i, npages;
+
+ if (WARN_ON(!obj->filp))
+ return ERR_PTR(-EINVAL);
+
/* This is the shared memory object that backs the GEM resource */
mapping = obj->filp->f_mapping;
@@ -710,6 +717,8 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
if (!objs)
return -ENOMEM;
+ *objs_out = objs;
+
handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
if (!handles) {
ret = -ENOMEM;
@@ -723,8 +732,6 @@ int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
}
ret = objects_lookup(filp, handles, count, objs);
- *objs_out = objs;
-
out:
kvfree(handles);
return ret;
@@ -786,7 +793,7 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
else if (ret > 0)
ret = 0;
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -861,7 +868,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
err:
mutex_unlock(&dev->object_name_lock);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -899,7 +906,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
if (ret)
return ret;
@@ -966,7 +973,6 @@ EXPORT_SYMBOL(drm_gem_object_release);
* @kref: kref of the object to free
*
* Called after the last reference to the object has been lost.
- * Must be called holding &drm_device.struct_mutex.
*
* Frees the object
*/
@@ -977,50 +983,15 @@ drm_gem_object_free(struct kref *kref)
container_of(kref, struct drm_gem_object, refcount);
struct drm_device *dev = obj->dev;
- if (obj->funcs) {
+ if (obj->funcs)
obj->funcs->free(obj);
- } else if (dev->driver->gem_free_object_unlocked) {
+ else if (dev->driver->gem_free_object_unlocked)
dev->driver->gem_free_object_unlocked(obj);
- } else if (dev->driver->gem_free_object) {
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- dev->driver->gem_free_object(obj);
- }
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
- * drm_gem_object_put_unlocked - drop a GEM buffer object reference
- * @obj: GEM buffer object
- *
- * This releases a reference to @obj. Callers must not hold the
- * &drm_device.struct_mutex lock when calling this function.
- *
- * See also __drm_gem_object_put().
- */
-void
-drm_gem_object_put_unlocked(struct drm_gem_object *obj)
-{
- struct drm_device *dev;
-
- if (!obj)
- return;
-
- dev = obj->dev;
-
- if (dev->driver->gem_free_object) {
- might_lock(&dev->struct_mutex);
- if (kref_put_mutex(&obj->refcount, drm_gem_object_free,
- &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
- } else {
- kref_put(&obj->refcount, drm_gem_object_free);
- }
-}
-EXPORT_SYMBOL(drm_gem_object_put_unlocked);
-
-/**
- * drm_gem_object_put - release a GEM buffer object reference
+ * drm_gem_object_put_locked - release a GEM buffer object reference
* @obj: GEM buffer object
*
* This releases a reference to @obj. Callers must hold the
@@ -1028,10 +999,10 @@ EXPORT_SYMBOL(drm_gem_object_put_unlocked);
* driver doesn't use &drm_device.struct_mutex for anything.
*
* For drivers not encumbered with legacy locking use
- * drm_gem_object_put_unlocked() instead.
+ * drm_gem_object_put() instead.
*/
void
-drm_gem_object_put(struct drm_gem_object *obj)
+drm_gem_object_put_locked(struct drm_gem_object *obj)
{
if (obj) {
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -1039,7 +1010,7 @@ drm_gem_object_put(struct drm_gem_object *obj)
kref_put(&obj->refcount, drm_gem_object_free);
}
}
-EXPORT_SYMBOL(drm_gem_object_put);
+EXPORT_SYMBOL(drm_gem_object_put_locked);
/**
* drm_gem_vm_open - vma->ops->open implementation for GEM
@@ -1067,7 +1038,7 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
@@ -1116,7 +1087,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
if (obj->funcs && obj->funcs->mmap) {
ret = obj->funcs->mmap(obj, vma);
if (ret) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
@@ -1126,7 +1097,7 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
else if (dev->driver->gem_vm_ops)
vma->vm_ops = dev->driver->gem_vm_ops;
else {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EINVAL;
}
@@ -1192,13 +1163,13 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EACCES;
}
if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EINVAL;
}
@@ -1208,7 +1179,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -1228,8 +1199,6 @@ void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
if (obj->funcs && obj->funcs->print_info)
obj->funcs->print_info(p, indent, obj);
- else if (obj->dev->driver->gem_print_info)
- obj->dev->driver->gem_print_info(p, indent, obj);
}
int drm_gem_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 12e98fb28229..06a5b9ee1fe0 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -114,7 +114,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
return cma_obj;
error:
- drm_gem_object_put_unlocked(&cma_obj->base);
+ drm_gem_object_put(&cma_obj->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_create);
@@ -156,7 +156,7 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
*/
ret = drm_gem_handle_create(file_priv, gem_obj, handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
if (ret)
return ERR_PTR(ret);
@@ -380,13 +380,13 @@ unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return -EACCES;
}
cma_obj = to_drm_gem_cma_obj(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
}
@@ -572,7 +572,7 @@ void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vunmap);
-static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
+static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = {
.free = drm_gem_cma_free_object,
.print_info = drm_gem_cma_print_info,
.get_sg_table = drm_gem_cma_prime_get_sg_table,
@@ -581,7 +581,7 @@ static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
};
/**
- * drm_cma_gem_create_object_default_funcs - Create a CMA GEM object with a
+ * drm_gem_cma_create_object_default_funcs - Create a CMA GEM object with a
* default function table
* @dev: DRM device
* @size: Size of the object to allocate
@@ -593,7 +593,7 @@ static const struct drm_gem_object_funcs drm_cma_gem_default_funcs = {
* A pointer to a allocated GEM object or an error pointer on failure.
*/
struct drm_gem_object *
-drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
+drm_gem_cma_create_object_default_funcs(struct drm_device *dev, size_t size)
{
struct drm_gem_cma_object *cma_obj;
@@ -601,11 +601,11 @@ drm_cma_gem_create_object_default_funcs(struct drm_device *dev, size_t size)
if (!cma_obj)
return NULL;
- cma_obj->base.funcs = &drm_cma_gem_default_funcs;
+ cma_obj->base.funcs = &drm_gem_cma_default_funcs;
return &cma_obj->base;
}
-EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
+EXPORT_SYMBOL(drm_gem_cma_create_object_default_funcs);
/**
* drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's
@@ -620,7 +620,7 @@ EXPORT_SYMBOL(drm_cma_gem_create_object_default_funcs);
* address set. This address is released when the object is freed.
*
* This function can be used as the &drm_driver.gem_prime_import_sg_table
- * callback. The DRM_GEM_CMA_VMAP_DRIVER_OPS() macro provides a shortcut to set
+ * callback. The &DRM_GEM_CMA_DRIVER_OPS_VMAP macro provides a shortcut to set
* the necessary DRM driver operations.
*
* Returns:
diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
index 3a7ace19a902..109d11fb4cd4 100644
--- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c
+++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c
@@ -21,6 +21,13 @@
#include <drm/drm_modeset_helper.h>
#include <drm/drm_simple_kms_helper.h>
+#define AFBC_HEADER_SIZE 16
+#define AFBC_TH_LAYOUT_ALIGNMENT 8
+#define AFBC_HDR_ALIGN 64
+#define AFBC_SUPERBLOCK_PIXELS 256
+#define AFBC_SUPERBLOCK_ALIGNMENT 128
+#define AFBC_TH_BODY_START_ALIGNMENT 4096
+
/**
* DOC: overview
*
@@ -54,32 +61,25 @@ struct drm_gem_object *drm_gem_fb_get_obj(struct drm_framebuffer *fb,
}
EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj);
-static struct drm_framebuffer *
-drm_gem_fb_alloc(struct drm_device *dev,
+static int
+drm_gem_fb_init(struct drm_device *dev,
+ struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **obj, unsigned int num_planes,
const struct drm_framebuffer_funcs *funcs)
{
- struct drm_framebuffer *fb;
int ret, i;
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb)
- return ERR_PTR(-ENOMEM);
-
drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
for (i = 0; i < num_planes; i++)
fb->obj[i] = obj[i];
ret = drm_framebuffer_init(dev, fb, funcs);
- if (ret) {
+ if (ret)
drm_err(dev, "Failed to init framebuffer: %d\n", ret);
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
+ return ret;
}
/**
@@ -95,7 +95,7 @@ void drm_gem_fb_destroy(struct drm_framebuffer *fb)
int i;
for (i = 0; i < 4; i++)
- drm_gem_object_put_unlocked(fb->obj[i]);
+ drm_gem_object_put(fb->obj[i]);
drm_framebuffer_cleanup(fb);
kfree(fb);
@@ -123,10 +123,13 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
EXPORT_SYMBOL(drm_gem_fb_create_handle);
/**
- * drm_gem_fb_create_with_funcs() - Helper function for the
- * &drm_mode_config_funcs.fb_create
- * callback
+ * drm_gem_fb_init_with_funcs() - Helper function for implementing
+ * &drm_mode_config_funcs.fb_create
+ * callback in cases when the driver
+ * allocates a subclass of
+ * struct drm_framebuffer
* @dev: DRM device
+ * @fb: framebuffer object
* @file: DRM file that holds the GEM handle(s) backing the framebuffer
* @mode_cmd: Metadata from the userspace framebuffer creation request
* @funcs: vtable to be used for the new framebuffer object
@@ -134,23 +137,26 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle);
* This function can be used to set &drm_framebuffer_funcs for drivers that need
* custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
* change &drm_framebuffer_funcs. The function does buffer size validation.
+ * The buffer size validation is for a general case, though, so users should
+ * pay attention to the checks being appropriate for them or, at least,
+ * non-conflicting.
*
* Returns:
- * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ * Zero or a negative error code.
*/
-struct drm_framebuffer *
-drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- const struct drm_framebuffer_funcs *funcs)
+int drm_gem_fb_init_with_funcs(struct drm_device *dev,
+ struct drm_framebuffer *fb,
+ struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
{
const struct drm_format_info *info;
struct drm_gem_object *objs[4];
- struct drm_framebuffer *fb;
int ret, i;
info = drm_get_format_info(dev, mode_cmd);
if (!info)
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
for (i = 0; i < info->num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
@@ -169,25 +175,61 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ mode_cmd->offsets[i];
if (objs[i]->size < min_size) {
- drm_gem_object_put_unlocked(objs[i]);
+ drm_gem_object_put(objs[i]);
ret = -EINVAL;
goto err_gem_object_put;
}
}
- fb = drm_gem_fb_alloc(dev, mode_cmd, objs, i, funcs);
- if (IS_ERR(fb)) {
- ret = PTR_ERR(fb);
+ ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs);
+ if (ret)
goto err_gem_object_put;
- }
- return fb;
+ return 0;
err_gem_object_put:
for (i--; i >= 0; i--)
- drm_gem_object_put_unlocked(objs[i]);
+ drm_gem_object_put(objs[i]);
- return ERR_PTR(ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs);
+
+/**
+ * drm_gem_fb_create_with_funcs() - Helper function for the
+ * &drm_mode_config_funcs.fb_create
+ * callback
+ * @dev: DRM device
+ * @file: DRM file that holds the GEM handle(s) backing the framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @funcs: vtable to be used for the new framebuffer object
+ *
+ * This function can be used to set &drm_framebuffer_funcs for drivers that need
+ * custom framebuffer callbacks. Use drm_gem_fb_create() if you don't need to
+ * change &drm_framebuffer_funcs. The function does buffer size validation.
+ *
+ * Returns:
+ * Pointer to a &drm_framebuffer on success or an error pointer on failure.
+ */
+struct drm_framebuffer *
+drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ const struct drm_framebuffer_funcs *funcs)
+{
+ struct drm_framebuffer *fb;
+ int ret;
+
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs);
+ if (ret) {
+ kfree(fb);
+ return ERR_PTR(ret);
+ }
+
+ return fb;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_funcs);
@@ -265,6 +307,132 @@ drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
}
EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty);
+static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ const struct drm_format_info *info;
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ /* use whatever a driver has set */
+ if (info->cpp[0])
+ return info->cpp[0] * 8;
+
+ /* guess otherwise */
+ switch (info->format) {
+ case DRM_FORMAT_YUV420_8BIT:
+ return 12;
+ case DRM_FORMAT_YUV420_10BIT:
+ return 15;
+ case DRM_FORMAT_VUY101010:
+ return 30;
+ default:
+ break;
+ }
+
+ /* all attempts failed */
+ return 0;
+}
+
+static int drm_gem_afbc_min_size(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb)
+{
+ __u32 n_blocks, w_alignment, h_alignment, hdr_alignment;
+ /* remove bpp when all users properly encode cpp in drm_format_info */
+ __u32 bpp;
+
+ switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ afbc_fb->block_width = 16;
+ afbc_fb->block_height = 16;
+ break;
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8:
+ afbc_fb->block_width = 32;
+ afbc_fb->block_height = 8;
+ break;
+ /* no user exists yet - fall through */
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_64x4:
+ case AFBC_FORMAT_MOD_BLOCK_SIZE_32x8_64x4:
+ default:
+ drm_dbg_kms(dev, "Invalid AFBC_FORMAT_MOD_BLOCK_SIZE: %lld.\n",
+ mode_cmd->modifier[0]
+ & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK);
+ return -EINVAL;
+ }
+
+ /* tiled header afbc */
+ w_alignment = afbc_fb->block_width;
+ h_alignment = afbc_fb->block_height;
+ hdr_alignment = AFBC_HDR_ALIGN;
+ if (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_TILED) {
+ w_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+ h_alignment *= AFBC_TH_LAYOUT_ALIGNMENT;
+ hdr_alignment = AFBC_TH_BODY_START_ALIGNMENT;
+ }
+
+ afbc_fb->aligned_width = ALIGN(mode_cmd->width, w_alignment);
+ afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment);
+ afbc_fb->offset = mode_cmd->offsets[0];
+
+ bpp = drm_gem_afbc_get_bpp(dev, mode_cmd);
+ if (!bpp) {
+ drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp);
+ return -EINVAL;
+ }
+
+ n_blocks = (afbc_fb->aligned_width * afbc_fb->aligned_height)
+ / AFBC_SUPERBLOCK_PIXELS;
+ afbc_fb->afbc_size = ALIGN(n_blocks * AFBC_HEADER_SIZE, hdr_alignment);
+ afbc_fb->afbc_size += n_blocks * ALIGN(bpp * AFBC_SUPERBLOCK_PIXELS / 8,
+ AFBC_SUPERBLOCK_ALIGNMENT);
+
+ return 0;
+}
+
+/**
+ * drm_gem_fb_afbc_init() - Helper function for drivers using afbc to
+ * fill and validate all the afbc-specific
+ * struct drm_afbc_framebuffer members
+ *
+ * @dev: DRM device
+ * @afbc_fb: afbc-specific framebuffer
+ * @mode_cmd: Metadata from the userspace framebuffer creation request
+ * @afbc_fb: afbc framebuffer
+ *
+ * This function can be used by drivers which support afbc to complete
+ * the preparation of struct drm_afbc_framebuffer. It must be called after
+ * allocating the said struct and calling drm_gem_fb_init_with_funcs().
+ * It is caller's responsibility to put afbc_fb->base.obj objects in case
+ * the call is unsuccessful.
+ *
+ * Returns:
+ * Zero on success or a negative error value on failure.
+ */
+int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_afbc_framebuffer *afbc_fb)
+{
+ const struct drm_format_info *info;
+ struct drm_gem_object **objs;
+ int ret;
+
+ objs = afbc_fb->base.obj;
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return -EINVAL;
+
+ ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb);
+ if (ret < 0)
+ return ret;
+
+ if (objs[0]->size < afbc_fb->afbc_size)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
+
/**
* drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index df31e5782eed..4b7cfbac4daa 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.mmap = drm_gem_shmem_mmap,
};
-/**
- * drm_gem_shmem_create - Allocate an object with the given size
- * @dev: DRM device
- * @size: Size of the object to allocate
- *
- * This function creates a shmem GEM object.
- *
- * Returns:
- * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
- * error code on failure.
- */
-struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+static struct drm_gem_shmem_object *
+__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
{
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- int ret;
+ int ret = 0;
size = PAGE_ALIGN(size);
@@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
- ret = drm_gem_object_init(dev, obj, size);
+ if (private)
+ drm_gem_private_object_init(dev, obj, size);
+ else
+ ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto err_free;
@@ -77,15 +70,17 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
- /*
- * Our buffers are kept pinned, so allocating them
- * from the MOVABLE zone is a really bad idea, and
- * conflicts with CMA. See comments above new_inode()
- * why this is required _and_ expected if you're
- * going to pin these pages.
- */
- mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ if (!private) {
+ /*
+ * Our buffers are kept pinned, so allocating them
+ * from the MOVABLE zone is a really bad idea, and
+ * conflicts with CMA. See comments above new_inode()
+ * why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ }
return shmem;
@@ -96,6 +91,21 @@ err_free:
return ERR_PTR(ret);
}
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+ return __drm_gem_shmem_create(dev, size, false);
+}
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
/**
@@ -103,7 +113,8 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
* @obj: GEM object to free
*
* This function cleans up the GEM object state and frees the memory used to
- * store the object itself.
+ * store the object itself. It should be used to implement
+ * &drm_gem_object_funcs.free.
*/
void drm_gem_shmem_free_object(struct drm_gem_object *obj)
{
@@ -112,9 +123,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
WARN_ON(shmem->vmap_use_count);
if (obj->import_attach) {
- shmem->pages_use_count--;
drm_prime_gem_destroy(obj, shmem->sgt);
- kvfree(shmem->pages);
} else {
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
@@ -169,6 +178,8 @@ int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
{
int ret;
+ WARN_ON(shmem->base.import_attach);
+
ret = mutex_lock_interruptible(&shmem->pages_lock);
if (ret)
return ret;
@@ -214,7 +225,8 @@ EXPORT_SYMBOL(drm_gem_shmem_put_pages);
* @obj: GEM object
*
* This function makes sure the backing pages are pinned in memory while the
- * buffer is exported.
+ * buffer is exported. It should only be used to implement
+ * &drm_gem_object_funcs.pin.
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -223,6 +235,8 @@ int drm_gem_shmem_pin(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ WARN_ON(shmem->base.import_attach);
+
return drm_gem_shmem_get_pages(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_pin);
@@ -232,12 +246,14 @@ EXPORT_SYMBOL(drm_gem_shmem_pin);
* @obj: GEM object
*
* This function removes the requirement that the backing pages are pinned in
- * memory.
+ * memory. It should only be used to implement &drm_gem_object_funcs.unpin.
*/
void drm_gem_shmem_unpin(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ WARN_ON(shmem->base.import_attach);
+
drm_gem_shmem_put_pages(shmem);
}
EXPORT_SYMBOL(drm_gem_shmem_unpin);
@@ -250,15 +266,15 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (shmem->vmap_use_count++ > 0)
return shmem->vaddr;
- ret = drm_gem_shmem_get_pages(shmem);
- if (ret)
- goto err_zero_use;
-
if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
} else {
pgprot_t prot = PAGE_KERNEL;
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
if (!shmem->map_cached)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
@@ -274,7 +290,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
return shmem->vaddr;
err_put_pages:
- drm_gem_shmem_put_pages(shmem);
+ if (!obj->import_attach)
+ drm_gem_shmem_put_pages(shmem);
err_zero_use:
shmem->vmap_use_count = 0;
@@ -285,8 +302,14 @@ err_zero_use:
* drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
* @shmem: shmem GEM object
*
- * This function makes sure that a virtual address exists for the buffer backing
- * the shmem GEM object.
+ * This function makes sure that a contiguous kernel virtual address mapping
+ * exists for the buffer backing the shmem GEM object.
+ *
+ * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
+ * also be called by drivers directly, in which case it will hide the
+ * differences between dma-buf imported and natively allocated objects.
+ *
+ * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap().
*
* Returns:
* 0 on success or a negative error code on failure.
@@ -330,7 +353,13 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
* drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
* @shmem: shmem GEM object
*
- * This function removes the virtual address when use count drops to zero.
+ * This function cleans up a kernel virtual address mapping acquired by
+ * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to
+ * zero.
+ *
+ * This function can be used to implement &drm_gem_object_funcs.vmap. But it can
+ * also be called by drivers directly, in which case it will hide the
+ * differences between dma-buf imported and natively allocated objects.
*/
void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
{
@@ -360,7 +389,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
*/
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(&shmem->base);
+ drm_gem_object_put(&shmem->base);
if (ret)
return ERR_PTR(ret);
@@ -434,6 +463,33 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj)
EXPORT_SYMBOL(drm_gem_shmem_purge);
/**
+ * drm_gem_shmem_create_object_cached - Create a shmem buffer object with
+ * cached mappings
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * By default, shmem buffer objects use writecombine mappings. This
+ * function implements struct drm_driver.gem_create_object for shmem
+ * buffer objects with cached mappings.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or NULL negative on failure.
+ */
+struct drm_gem_object *
+drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem;
+
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
+ return NULL;
+ shmem->map_cached = true;
+
+ return &shmem->base;
+}
+EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
+
+/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
* @dev: DRM device
@@ -495,6 +551,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
int ret;
+ WARN_ON(shmem->base.import_attach);
+
ret = drm_gem_shmem_get_pages(shmem);
WARN_ON_ONCE(ret != 0);
@@ -536,6 +594,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
/* Remove the fake offset */
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
+ if (obj->import_attach)
+ return dma_buf_mmap(obj->dma_buf, vma, 0);
+
shmem = to_drm_gem_shmem_obj(obj);
ret = drm_gem_shmem_get_pages(shmem);
@@ -559,6 +620,8 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
* @p: DRM printer
* @indent: Tab indentation level
* @obj: GEM object
+ *
+ * This implements the &drm_gem_object_funcs.info callback.
*/
void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj)
@@ -577,7 +640,12 @@ EXPORT_SYMBOL(drm_gem_shmem_print_info);
* @obj: GEM object
*
* This function exports a scatter/gather table suitable for PRIME usage by
- * calling the standard DMA mapping API.
+ * calling the standard DMA mapping API. Drivers should not call this function
+ * directly, instead it should only be used as an implementation for
+ * &drm_gem_object_funcs.get_sg_table.
+ *
+ * Drivers who need to acquire an scatter/gather table for objects need to call
+ * drm_gem_shmem_get_pages_sgt() instead.
*
* Returns:
* A pointer to the scatter/gather table of pinned pages or NULL on failure.
@@ -586,6 +654,8 @@ struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
{
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ WARN_ON(shmem->base.import_attach);
+
return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
@@ -599,6 +669,10 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
* the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
* table created.
*
+ * This is the main function for drivers to get at backing storage, and it hides
+ * and difference between dma-buf imported and natively allocated objects.
+ * drm_gem_shmem_get_sg_table() should not be directly called by drivers.
+ *
* Returns:
* A pointer to the scatter/gather table of pinned pages or errno on failure.
*/
@@ -656,36 +730,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
size_t size = PAGE_ALIGN(attach->dmabuf->size);
- size_t npages = size >> PAGE_SHIFT;
struct drm_gem_shmem_object *shmem;
- int ret;
- shmem = drm_gem_shmem_create(dev, size);
+ shmem = __drm_gem_shmem_create(dev, size, true);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
- shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!shmem->pages) {
- ret = -ENOMEM;
- goto err_free_gem;
- }
-
- ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
- if (ret < 0)
- goto err_free_array;
-
shmem->sgt = sgt;
- shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
DRM_DEBUG_PRIME("size = %zu\n", size);
return &shmem->base;
-
-err_free_array:
- kvfree(shmem->pages);
-err_free_gem:
- drm_gem_object_put_unlocked(&shmem->base);
-
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index 605a8a3da7f9..892b2288a104 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -74,7 +74,7 @@ int drm_gem_ttm_mmap(struct drm_gem_object *gem,
* ttm has its own object refcounting, so drop gem reference
* to avoid double accounting counting.
*/
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 92a11bb42365..0023ce1d2cf7 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1,10 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
+#include <linux/module.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_mode.h>
@@ -18,13 +21,93 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
/**
* DOC: overview
*
- * This library provides a GEM buffer object that is backed by video RAM
- * (VRAM). It can be used for framebuffer devices with dedicated memory.
+ * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
+ * buffer object that is backed by video RAM (VRAM). It can be used for
+ * framebuffer devices with dedicated memory.
*
* The data structure &struct drm_vram_mm and its helpers implement a memory
- * manager for simple framebuffer devices with dedicated video memory. Buffer
- * objects are either placed in video RAM or evicted to system memory. The rsp.
- * buffer object is provided by &struct drm_gem_vram_object.
+ * manager for simple framebuffer devices with dedicated video memory. GEM
+ * VRAM buffer objects are either placed in the video memory or remain evicted
+ * to system memory.
+ *
+ * With the GEM interface userspace applications create, manage and destroy
+ * graphics buffers, such as an on-screen framebuffer. GEM does not provide
+ * an implementation of these interfaces. It's up to the DRM driver to
+ * provide an implementation that suits the hardware. If the hardware device
+ * contains dedicated video memory, the DRM driver can use the VRAM helper
+ * library. Each active buffer object is stored in video RAM. Active
+ * buffer are used for drawing the current frame, typically something like
+ * the frame's scanout buffer or the cursor image. If there's no more space
+ * left in VRAM, inactive GEM objects can be moved to system memory.
+ *
+ * The easiest way to use the VRAM helper library is to call
+ * drm_vram_helper_alloc_mm(). The function allocates and initializes an
+ * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
+ * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
+ * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
+ * as illustrated below.
+ *
+ * .. code-block:: c
+ *
+ * struct file_operations fops ={
+ * .owner = THIS_MODULE,
+ * DRM_VRAM_MM_FILE_OPERATION
+ * };
+ * struct drm_driver drv = {
+ * .driver_feature = DRM_ ... ,
+ * .fops = &fops,
+ * DRM_GEM_VRAM_DRIVER
+ * };
+ *
+ * int init_drm_driver()
+ * {
+ * struct drm_device *dev;
+ * uint64_t vram_base;
+ * unsigned long vram_size;
+ * int ret;
+ *
+ * // setup device, vram base and size
+ * // ...
+ *
+ * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
+ * if (ret)
+ * return ret;
+ * return 0;
+ * }
+ *
+ * This creates an instance of &struct drm_vram_mm, exports DRM userspace
+ * interfaces for GEM buffer management and initializes file operations to
+ * allow for accessing created GEM buffers. With this setup, the DRM driver
+ * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
+ * to userspace.
+ *
+ * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
+ * in the driver's clean-up code.
+ *
+ * .. code-block:: c
+ *
+ * void fini_drm_driver()
+ * {
+ * struct drm_device *dev = ...;
+ *
+ * drm_vram_helper_release_mm(dev);
+ * }
+ *
+ * For drawing or scanout operations, buffer object have to be pinned in video
+ * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
+ * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
+ * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
+ *
+ * A buffer object that is pinned in video RAM has a fixed address within that
+ * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
+ * it's used to program the hardware's scanout engine for framebuffers, set
+ * the cursor overlay's image for a mouse cursor, or use it as input to the
+ * hardware's draing engine.
+ *
+ * To access a buffer object's memory from the DRM driver, call
+ * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
+ * space and returns the memory address. Use drm_gem_vram_kunmap() to
+ * release the mapping.
*/
/*
@@ -535,9 +618,9 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
ret = drm_gem_handle_create(file, &gbo->bo.base, &handle);
if (ret)
- goto err_drm_gem_object_put_unlocked;
+ goto err_drm_gem_object_put;
- drm_gem_object_put_unlocked(&gbo->bo.base);
+ drm_gem_object_put(&gbo->bo.base);
args->pitch = pitch;
args->size = size;
@@ -545,8 +628,8 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
return 0;
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(&gbo->bo.base);
+err_drm_gem_object_put:
+ drm_gem_object_put(&gbo->bo.base);
return ret;
}
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
@@ -654,7 +737,7 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
gbo = drm_gem_vram_of_gem(gem);
*offset = drm_gem_vram_mmap_offset(gbo);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
@@ -670,9 +753,9 @@ EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
* @plane: a DRM plane
* @new_state: the plane's new state
*
- * During plane updates, this function pins the GEM VRAM
- * objects of the plane's new framebuffer to VRAM. Call
- * drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
+ * During plane updates, this function sets the plane's fence and
+ * pins the GEM VRAM objects of the plane's new framebuffer to VRAM.
+ * Call drm_gem_vram_plane_helper_cleanup_fb() to unpin them.
*
* Returns:
* 0 on success, or
@@ -698,6 +781,10 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
goto err_drm_gem_vram_unpin;
}
+ ret = drm_gem_fb_prepare_fb(plane, new_state);
+ if (ret)
+ goto err_drm_gem_vram_unpin;
+
return 0;
err_drm_gem_vram_unpin:
@@ -1018,7 +1105,6 @@ static struct ttm_bo_driver bo_driver = {
* struct drm_vram_mm
*/
-#if defined(CONFIG_DEBUG_FS)
static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1035,27 +1121,18 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
static const struct drm_info_list drm_vram_mm_debugfs_list[] = {
{ "vram-mm", drm_vram_mm_debugfs, 0, NULL },
};
-#endif
/**
* drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file.
*
* @minor: drm minor device.
*
- * Returns:
- * 0 on success, or
- * a negative error code otherwise.
*/
-int drm_vram_mm_debugfs_init(struct drm_minor *minor)
+void drm_vram_mm_debugfs_init(struct drm_minor *minor)
{
- int ret = 0;
-
-#if defined(CONFIG_DEBUG_FS)
- ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list,
- ARRAY_SIZE(drm_vram_mm_debugfs_list),
- minor->debugfs_root, minor);
-#endif
- return ret;
+ drm_debugfs_create_files(drm_vram_mm_debugfs_list,
+ ARRAY_SIZE(drm_vram_mm_debugfs_list),
+ minor->debugfs_root, minor);
}
EXPORT_SYMBOL(drm_vram_mm_debugfs_init);
@@ -1202,3 +1279,6 @@ drm_vram_helper_mode_valid(struct drm_device *dev,
return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
}
EXPORT_SYMBOL(drm_vram_helper_mode_valid);
+
+MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 7f386adcf872..910108ccaae1 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -241,8 +241,12 @@ static int drm_hdcp_request_srm(struct drm_device *drm_dev,
ret = request_firmware_direct(&fw, (const char *)fw_name,
drm_dev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ *revoked_ksv_cnt = 0;
+ *revoked_ksv_list = NULL;
+ ret = 0;
goto exit;
+ }
if (fw->size && fw->data)
ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
@@ -287,6 +291,8 @@ int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
&revoked_ksv_cnt);
+ if (ret)
+ return ret;
/* revoked_ksv_cnt will be zero when above function failed */
for (i = 0; i < revoked_ksv_cnt; i++)
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 5714a78365ac..2470a352730b 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -89,9 +89,11 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
struct drm_minor *drm_minor_acquire(unsigned int minor_id);
void drm_minor_release(struct drm_minor *minor);
+/* drm_managed.c */
+void drm_managed_release(struct drm_device *dev);
+
/* drm_vblank.c */
void drm_vblank_disable_and_save(struct drm_device *dev, unsigned int pipe);
-void drm_vblank_cleanup(struct drm_device *dev);
/* IOCTLS */
int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
@@ -141,7 +143,6 @@ void drm_sysfs_lease_event(struct drm_device *dev);
/* drm_gem.c */
struct drm_gem_object;
int drm_gem_init(struct drm_device *dev);
-void drm_gem_destroy(struct drm_device *dev);
int drm_gem_handle_create_tail(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
@@ -235,4 +236,4 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
/* drm_framebuffer.c */
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
-int drm_framebuffer_debugfs_init(struct drm_minor *minor);
+void drm_framebuffer_debugfs_init(struct drm_minor *minor);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 22c7fd7196c8..ff5d40036e21 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -985,8 +985,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (!fn)
return drm_ioctl(filp, cmd, arg);
- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
- task_pid_nr(current),
+ DRM_DEBUG("comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated,
drm_compat_ioctls[nr].name);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9e41972c4bbc..789ee65ac1f5 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -599,8 +599,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, 0),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -741,7 +741,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
* };
*
* Please make sure that you follow all the best practices from
- * ``Documentation/ioctl/botching-up-ioctls.rst``. Note that drm_ioctl()
+ * ``Documentation/process/botching-up-ioctls.rst``. Note that drm_ioctl()
* automatically zero-extends structures, hence make sure you can add more stuff
* at the end, i.e. don't put a variable sized array there.
*
@@ -852,8 +852,8 @@ long drm_ioctl(struct file *filp,
out_size = 0;
ksize = max(max(in_size, out_size), drv_size);
- DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
- task_pid_nr(current),
+ DRM_DEBUG("comm=\"%s\" pid=%d, dev=0x%lx, auth=%d, %s\n",
+ current->comm, task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated, ioctl->name);
@@ -890,15 +890,16 @@ long drm_ioctl(struct file *filp,
err_i1:
if (!ioctl)
- DRM_DEBUG("invalid ioctl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
- task_pid_nr(current),
+ DRM_DEBUG("invalid ioctl: comm=\"%s\", pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
+ current->comm, task_pid_nr(current),
(long)old_encode_dev(file_priv->minor->kdev->devt),
file_priv->authenticated, cmd, nr);
if (kdata != stack_kdata)
kfree(kdata);
if (retcode)
- DRM_DEBUG("pid=%d, ret = %d\n", task_pid_nr(current), retcode);
+ DRM_DEBUG("comm=\"%s\", pid=%d, ret=%d\n", current->comm,
+ task_pid_nr(current), retcode);
return retcode;
}
EXPORT_SYMBOL(drm_ioctl);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 588be45abd7a..09d6e9e2e075 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -181,7 +181,7 @@ int drm_irq_uninstall(struct drm_device *dev)
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
- if (dev->num_crtcs) {
+ if (drm_dev_has_vblank(dev)) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
diff --git a/drivers/gpu/drm/drm_managed.c b/drivers/gpu/drm/drm_managed.c
new file mode 100644
index 000000000000..1e1356560c2e
--- /dev/null
+++ b/drivers/gpu/drm/drm_managed.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel
+ *
+ * Based on drivers/base/devres.c
+ */
+
+#include <drm/drm_managed.h>
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_print.h>
+
+#include "drm_internal.h"
+
+/**
+ * DOC: managed resources
+ *
+ * Inspired by struct &device managed resources, but tied to the lifetime of
+ * struct &drm_device, which can outlive the underlying physical device, usually
+ * when userspace has some open files and other handles to resources still open.
+ *
+ * Release actions can be added with drmm_add_action(), memory allocations can
+ * be done directly with drmm_kmalloc() and the related functions. Everything
+ * will be released on the final drm_dev_put() in reverse order of how the
+ * release actions have been added and memory has been allocated since driver
+ * loading started with drm_dev_init().
+ *
+ * Note that release actions and managed memory can also be added and removed
+ * during the lifetime of the driver, all the functions are fully concurrent
+ * safe. But it is recommended to use managed resources only for resources that
+ * change rarely, if ever, during the lifetime of the &drm_device instance.
+ */
+
+struct drmres_node {
+ struct list_head entry;
+ drmres_release_t release;
+ const char *name;
+ size_t size;
+};
+
+struct drmres {
+ struct drmres_node node;
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+ * buffer alignment as if it was allocated by plain kmalloc().
+ */
+ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
+};
+
+static void free_dr(struct drmres *dr)
+{
+ kfree_const(dr->node.name);
+ kfree(dr);
+}
+
+void drm_managed_release(struct drm_device *dev)
+{
+ struct drmres *dr, *tmp;
+
+ drm_dbg_drmres(dev, "drmres release begin\n");
+ list_for_each_entry_safe(dr, tmp, &dev->managed.resources, node.entry) {
+ drm_dbg_drmres(dev, "REL %p %s (%zu bytes)\n",
+ dr, dr->node.name, dr->node.size);
+
+ if (dr->node.release)
+ dr->node.release(dev, dr->node.size ? *(void **)&dr->data : NULL);
+
+ list_del(&dr->node.entry);
+ free_dr(dr);
+ }
+ drm_dbg_drmres(dev, "drmres release end\n");
+}
+
+/*
+ * Always inline so that kmalloc_track_caller tracks the actual interesting
+ * caller outside of drm_managed.c.
+ */
+static __always_inline struct drmres * alloc_dr(drmres_release_t release,
+ size_t size, gfp_t gfp, int nid)
+{
+ size_t tot_size;
+ struct drmres *dr;
+
+ /* We must catch any near-SIZE_MAX cases that could overflow. */
+ if (unlikely(check_add_overflow(sizeof(*dr), size, &tot_size)))
+ return NULL;
+
+ dr = kmalloc_node_track_caller(tot_size, gfp, nid);
+ if (unlikely(!dr))
+ return NULL;
+
+ memset(dr, 0, offsetof(struct drmres, data));
+
+ INIT_LIST_HEAD(&dr->node.entry);
+ dr->node.release = release;
+ dr->node.size = size;
+
+ return dr;
+}
+
+static void del_dr(struct drm_device *dev, struct drmres *dr)
+{
+ list_del_init(&dr->node.entry);
+
+ drm_dbg_drmres(dev, "DEL %p %s (%lu bytes)\n",
+ dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+static void add_dr(struct drm_device *dev, struct drmres *dr)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_add(&dr->node.entry, &dev->managed.resources);
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ drm_dbg_drmres(dev, "ADD %p %s (%lu bytes)\n",
+ dr, dr->node.name, (unsigned long) dr->node.size);
+}
+
+/**
+ * drmm_add_final_kfree - add release action for the final kfree()
+ * @dev: DRM device
+ * @container: pointer to the kmalloc allocation containing @dev
+ *
+ * Since the allocation containing the struct &drm_device must be allocated
+ * before it can be initialized with drm_dev_init() there's no way to allocate
+ * that memory with drmm_kmalloc(). To side-step this chicken-egg problem the
+ * pointer for this final kfree() must be specified by calling this function. It
+ * will be released in the final drm_dev_put() for @dev, after all other release
+ * actions installed through drmm_add_action() have been processed.
+ */
+void drmm_add_final_kfree(struct drm_device *dev, void *container)
+{
+ WARN_ON(dev->managed.final_kfree);
+ WARN_ON(dev < (struct drm_device *) container);
+ WARN_ON(dev + 1 > (struct drm_device *) (container + ksize(container)));
+ dev->managed.final_kfree = container;
+}
+EXPORT_SYMBOL(drmm_add_final_kfree);
+
+int __drmm_add_action(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name)
+{
+ struct drmres *dr;
+ void **void_ptr;
+
+ dr = alloc_dr(action, data ? sizeof(void*) : 0,
+ GFP_KERNEL | __GFP_ZERO,
+ dev_to_node(dev->dev));
+ if (!dr) {
+ drm_dbg_drmres(dev, "failed to add action %s for %p\n",
+ name, data);
+ return -ENOMEM;
+ }
+
+ dr->node.name = kstrdup_const(name, GFP_KERNEL);
+ if (data) {
+ void_ptr = (void **)&dr->data;
+ *void_ptr = data;
+ }
+
+ add_dr(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(__drmm_add_action);
+
+int __drmm_add_action_or_reset(struct drm_device *dev,
+ drmres_release_t action,
+ void *data, const char *name)
+{
+ int ret;
+
+ ret = __drmm_add_action(dev, action, data, name);
+ if (ret)
+ action(dev, data);
+
+ return ret;
+}
+EXPORT_SYMBOL(__drmm_add_action_or_reset);
+
+/**
+ * drmm_kmalloc - &drm_device managed kmalloc()
+ * @dev: DRM device
+ * @size: size of the memory allocation
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kmalloc(). The allocated memory is
+ * automatically freed on the final drm_dev_put(). Memory can also be freed
+ * before the final drm_dev_put() by calling drmm_kfree().
+ */
+void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp)
+{
+ struct drmres *dr;
+
+ dr = alloc_dr(NULL, size, gfp, dev_to_node(dev->dev));
+ if (!dr) {
+ drm_dbg_drmres(dev, "failed to allocate %zu bytes, %u flags\n",
+ size, gfp);
+ return NULL;
+ }
+ dr->node.name = kstrdup_const("kmalloc", GFP_KERNEL);
+
+ add_dr(dev, dr);
+
+ return dr->data;
+}
+EXPORT_SYMBOL(drmm_kmalloc);
+
+/**
+ * drmm_kstrdup - &drm_device managed kstrdup()
+ * @dev: DRM device
+ * @s: 0-terminated string to be duplicated
+ * @gfp: GFP allocation flags
+ *
+ * This is a &drm_device managed version of kstrdup(). The allocated memory is
+ * automatically freed on the final drm_dev_put() and works exactly like a
+ * memory allocation obtained by drmm_kmalloc().
+ */
+char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp)
+{
+ size_t size;
+ char *buf;
+
+ if (!s)
+ return NULL;
+
+ size = strlen(s) + 1;
+ buf = drmm_kmalloc(dev, size, gfp);
+ if (buf)
+ memcpy(buf, s, size);
+ return buf;
+}
+EXPORT_SYMBOL_GPL(drmm_kstrdup);
+
+/**
+ * drmm_kfree - &drm_device managed kfree()
+ * @dev: DRM device
+ * @data: memory allocation to be freed
+ *
+ * This is a &drm_device managed version of kfree() which can be used to
+ * release memory allocated through drmm_kmalloc() or any of its related
+ * functions before the final drm_dev_put() of @dev.
+ */
+void drmm_kfree(struct drm_device *dev, void *data)
+{
+ struct drmres *dr_match = NULL, *dr;
+ unsigned long flags;
+
+ if (!data)
+ return;
+
+ spin_lock_irqsave(&dev->managed.lock, flags);
+ list_for_each_entry(dr, &dev->managed.resources, node.entry) {
+ if (dr->data == data) {
+ dr_match = dr;
+ del_dr(dev, dr_match);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->managed.lock, flags);
+
+ if (WARN_ON(!dr_match))
+ return;
+
+ free_dr(dr_match);
+}
+EXPORT_SYMBOL(drmm_kfree);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 558baf989f5a..fd8d672972a9 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -169,7 +169,8 @@ int mipi_dbi_command_buf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
EXPORT_SYMBOL(mipi_dbi_command_buf);
/* This should only be used by mipi_dbi_command() */
-int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, u8 *data, size_t len)
+int mipi_dbi_command_stackbuf(struct mipi_dbi *dbi, u8 cmd, const u8 *data,
+ size_t len)
{
u8 *buf;
int ret;
@@ -216,7 +217,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- drm_fb_swab16(dst, src, fb, clip);
+ drm_fb_swab(dst, src, fb, clip, !import_attach);
else
drm_fb_memcpy(dst, src, fb, clip);
break;
@@ -510,6 +511,10 @@ int mipi_dbi_dev_init_with_formats(struct mipi_dbi_dev *dbidev,
if (!dbidev->dbi.command)
return -EINVAL;
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
+
dbidev->tx_buf = devm_kmalloc(drm->dev, tx_buf_size, GFP_KERNEL);
if (!dbidev->tx_buf)
return -ENOMEM;
@@ -579,26 +584,6 @@ int mipi_dbi_dev_init(struct mipi_dbi_dev *dbidev,
EXPORT_SYMBOL(mipi_dbi_dev_init);
/**
- * mipi_dbi_release - DRM driver release helper
- * @drm: DRM device
- *
- * This function finalizes and frees &mipi_dbi.
- *
- * Drivers can use this as their &drm_driver->release callback.
- */
-void mipi_dbi_release(struct drm_device *drm)
-{
- struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(drm);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(dbidev);
-}
-EXPORT_SYMBOL(mipi_dbi_release);
-
-/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @dbi: MIPI DBI structure
*
@@ -1308,10 +1293,8 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
* controller or getting the read command values.
* Drivers can use this as their &drm_driver->debugfs_init callback.
*
- * Returns:
- * Zero on success, negative error code on failure.
*/
-int mipi_dbi_debugfs_init(struct drm_minor *minor)
+void mipi_dbi_debugfs_init(struct drm_minor *minor)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
@@ -1320,8 +1303,6 @@ int mipi_dbi_debugfs_init(struct drm_minor *minor)
mode |= S_IRUGO;
debugfs_create_file("command", mode, minor->debugfs_root, dbidev,
&mipi_dbi_debugfs_command_fops);
-
- return 0;
}
EXPORT_SYMBOL(mipi_dbi_debugfs_init);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 8981abe8b7c9..82d2888eb7fe 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -212,20 +212,6 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node,
&drm_mm_interval_tree_augment);
}
-#define RB_INSERT(root, member, expr) do { \
- struct rb_node **link = &root.rb_node, *rb = NULL; \
- u64 x = expr(node); \
- while (*link) { \
- rb = *link; \
- if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
- link = &rb->rb_left; \
- else \
- link = &rb->rb_right; \
- } \
- rb_link_node(&node->member, rb, link); \
- rb_insert_color(&node->member, &root); \
-} while (0)
-
#define HOLE_SIZE(NODE) ((NODE)->hole_size)
#define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE))
@@ -255,16 +241,42 @@ static void insert_hole_size(struct rb_root_cached *root,
rb_insert_color_cached(&node->rb_hole_size, root, first);
}
+RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks,
+ struct drm_mm_node, rb_hole_addr,
+ u64, subtree_max_hole, HOLE_SIZE)
+
+static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node)
+{
+ struct rb_node **link = &root->rb_node, *rb_parent = NULL;
+ u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole;
+ struct drm_mm_node *parent;
+
+ while (*link) {
+ rb_parent = *link;
+ parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr);
+ if (parent->subtree_max_hole < subtree_max_hole)
+ parent->subtree_max_hole = subtree_max_hole;
+ if (start < HOLE_ADDR(parent))
+ link = &parent->rb_hole_addr.rb_left;
+ else
+ link = &parent->rb_hole_addr.rb_right;
+ }
+
+ rb_link_node(&node->rb_hole_addr, rb_parent, link);
+ rb_insert_augmented(&node->rb_hole_addr, root, &augment_callbacks);
+}
+
static void add_hole(struct drm_mm_node *node)
{
struct drm_mm *mm = node->mm;
node->hole_size =
__drm_mm_hole_node_end(node) - __drm_mm_hole_node_start(node);
+ node->subtree_max_hole = node->hole_size;
DRM_MM_BUG_ON(!drm_mm_hole_follows(node));
insert_hole_size(&mm->holes_size, node);
- RB_INSERT(mm->holes_addr, rb_hole_addr, HOLE_ADDR);
+ insert_hole_addr(&mm->holes_addr, node);
list_add(&node->hole_stack, &mm->hole_stack);
}
@@ -275,8 +287,10 @@ static void rm_hole(struct drm_mm_node *node)
list_del(&node->hole_stack);
rb_erase_cached(&node->rb_hole_size, &node->mm->holes_size);
- rb_erase(&node->rb_hole_addr, &node->mm->holes_addr);
+ rb_erase_augmented(&node->rb_hole_addr, &node->mm->holes_addr,
+ &augment_callbacks);
node->hole_size = 0;
+ node->subtree_max_hole = 0;
DRM_MM_BUG_ON(drm_mm_hole_follows(node));
}
@@ -361,9 +375,88 @@ first_hole(struct drm_mm *mm,
}
}
+/**
+ * next_hole_high_addr - returns next hole for a DRM_MM_INSERT_HIGH mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether left subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return previous node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete left subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * previous node of @entry if left subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_high_addr(struct drm_mm_node *entry, u64 size)
+{
+ struct rb_node *rb_node, *left_rb_node, *parent_rb_node;
+ struct drm_mm_node *left_node;
+
+ if (!entry)
+ return NULL;
+
+ rb_node = &entry->rb_hole_addr;
+ if (rb_node->rb_left) {
+ left_rb_node = rb_node->rb_left;
+ parent_rb_node = rb_parent(rb_node);
+ left_node = rb_entry(left_rb_node,
+ struct drm_mm_node, rb_hole_addr);
+ if (left_node->subtree_max_hole < size &&
+ parent_rb_node && parent_rb_node->rb_left != rb_node)
+ return rb_hole_addr_to_node(parent_rb_node);
+ }
+
+ return rb_hole_addr_to_node(rb_prev(rb_node));
+}
+
+/**
+ * next_hole_low_addr - returns next hole for a DRM_MM_INSERT_LOW mode request
+ * @entry: previously selected drm_mm_node
+ * @size: size of the a hole needed for the request
+ *
+ * This function will verify whether right subtree of @entry has hole big enough
+ * to fit the requtested size. If so, it will return next node of @entry or
+ * else it will return parent node of @entry
+ *
+ * It will also skip the complete right subtree if subtree_max_hole of that
+ * subtree is same as the subtree_max_hole of the @entry.
+ *
+ * Returns:
+ * next node of @entry if right subtree of @entry can serve the request or
+ * else return parent of @entry
+ */
+static struct drm_mm_node *
+next_hole_low_addr(struct drm_mm_node *entry, u64 size)
+{
+ struct rb_node *rb_node, *right_rb_node, *parent_rb_node;
+ struct drm_mm_node *right_node;
+
+ if (!entry)
+ return NULL;
+
+ rb_node = &entry->rb_hole_addr;
+ if (rb_node->rb_right) {
+ right_rb_node = rb_node->rb_right;
+ parent_rb_node = rb_parent(rb_node);
+ right_node = rb_entry(right_rb_node,
+ struct drm_mm_node, rb_hole_addr);
+ if (right_node->subtree_max_hole < size &&
+ parent_rb_node && parent_rb_node->rb_right != rb_node)
+ return rb_hole_addr_to_node(parent_rb_node);
+ }
+
+ return rb_hole_addr_to_node(rb_next(rb_node));
+}
+
static struct drm_mm_node *
next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
+ u64 size,
enum drm_mm_insert_mode mode)
{
switch (mode) {
@@ -372,10 +465,10 @@ next_hole(struct drm_mm *mm,
return rb_hole_size_to_node(rb_prev(&node->rb_hole_size));
case DRM_MM_INSERT_LOW:
- return rb_hole_addr_to_node(rb_next(&node->rb_hole_addr));
+ return next_hole_low_addr(node, size);
case DRM_MM_INSERT_HIGH:
- return rb_hole_addr_to_node(rb_prev(&node->rb_hole_addr));
+ return next_hole_high_addr(node, size);
case DRM_MM_INSERT_EVICT:
node = list_next_entry(node, hole_stack);
@@ -489,7 +582,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0;
for (hole = first_hole(mm, range_start, range_end, size, mode);
hole;
- hole = once ? NULL : next_hole(mm, hole, mode)) {
+ hole = once ? NULL : next_hole(mm, hole, size, mode)) {
u64 hole_start = __drm_mm_hole_node_start(hole);
u64 hole_end = hole_start + hole->hole_size;
u64 adj_start, adj_end;
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 08e6eff6a179..5761f838a057 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_file.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mode_config.h>
#include <drm/drm_print.h>
#include <linux/dma-resv.h>
@@ -373,8 +374,14 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return 0;
}
+static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
+{
+ drm_mode_config_cleanup(dev);
+}
+
/**
- * drm_mode_config_init - initialize DRM mode_configuration structure
+ * drmm_mode_config_init - managed DRM mode_configuration structure
+ * initialization
* @dev: DRM device
*
* Initialize @dev's mode_config structure, used for tracking the graphics
@@ -384,8 +391,12 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
* problem, since this should happen single threaded at init time. It is the
* driver's problem to ensure this guarantee.
*
+ * Cleanup is automatically handled through registering drm_mode_config_cleanup
+ * with drmm_add_action().
+ *
+ * Returns: 0 on success, negative error value on failure.
*/
-void drm_mode_config_init(struct drm_device *dev)
+int drmm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
@@ -443,8 +454,11 @@ void drm_mode_config_init(struct drm_device *dev)
drm_modeset_acquire_fini(&modeset_ctx);
dma_resv_fini(&resv);
}
+
+ return drmm_add_action_or_reset(dev, drm_mode_config_init_release,
+ NULL);
}
-EXPORT_SYMBOL(drm_mode_config_init);
+EXPORT_SYMBOL(drmm_mode_config_init);
/**
* drm_mode_config_cleanup - free up DRM mode_config info
@@ -456,6 +470,9 @@ EXPORT_SYMBOL(drm_mode_config_init);
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
+ *
+ * FIXME: With the managed drmm_mode_config_init() it is no longer necessary for
+ * drivers to explicitly call this function.
*/
void drm_mode_config_cleanup(struct drm_device *dev)
{
@@ -532,3 +549,90 @@ void drm_mode_config_cleanup(struct drm_device *dev)
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
+
+static u32 full_encoder_mask(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+ u32 encoder_mask = 0;
+
+ drm_for_each_encoder(encoder, dev)
+ encoder_mask |= drm_encoder_mask(encoder);
+
+ return encoder_mask;
+}
+
+/*
+ * For some reason we want the encoder itself included in
+ * possible_clones. Make life easy for drivers by allowing them
+ * to leave possible_clones unset if no cloning is possible.
+ */
+static void fixup_encoder_possible_clones(struct drm_encoder *encoder)
+{
+ if (encoder->possible_clones == 0)
+ encoder->possible_clones = drm_encoder_mask(encoder);
+}
+
+static void validate_encoder_possible_clones(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ u32 encoder_mask = full_encoder_mask(dev);
+ struct drm_encoder *other;
+
+ drm_for_each_encoder(other, dev) {
+ WARN(!!(encoder->possible_clones & drm_encoder_mask(other)) !=
+ !!(other->possible_clones & drm_encoder_mask(encoder)),
+ "possible_clones mismatch: "
+ "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x vs. "
+ "[ENCODER:%d:%s] mask=0x%x possible_clones=0x%x\n",
+ encoder->base.id, encoder->name,
+ drm_encoder_mask(encoder), encoder->possible_clones,
+ other->base.id, other->name,
+ drm_encoder_mask(other), other->possible_clones);
+ }
+
+ WARN((encoder->possible_clones & drm_encoder_mask(encoder)) == 0 ||
+ (encoder->possible_clones & ~encoder_mask) != 0,
+ "Bogus possible_clones: "
+ "[ENCODER:%d:%s] possible_clones=0x%x (full encoder mask=0x%x)\n",
+ encoder->base.id, encoder->name,
+ encoder->possible_clones, encoder_mask);
+}
+
+static u32 full_crtc_mask(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ u32 crtc_mask = 0;
+
+ drm_for_each_crtc(crtc, dev)
+ crtc_mask |= drm_crtc_mask(crtc);
+
+ return crtc_mask;
+}
+
+static void validate_encoder_possible_crtcs(struct drm_encoder *encoder)
+{
+ u32 crtc_mask = full_crtc_mask(encoder->dev);
+
+ WARN((encoder->possible_crtcs & crtc_mask) == 0 ||
+ (encoder->possible_crtcs & ~crtc_mask) != 0,
+ "Bogus possible_crtcs: "
+ "[ENCODER:%d:%s] possible_crtcs=0x%x (full crtc mask=0x%x)\n",
+ encoder->base.id, encoder->name,
+ encoder->possible_crtcs, crtc_mask);
+}
+
+void drm_mode_config_validate(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ drm_for_each_encoder(encoder, dev)
+ fixup_encoder_possible_clones(encoder);
+
+ drm_for_each_encoder(encoder, dev) {
+ validate_encoder_possible_clones(encoder);
+ validate_encoder_possible_crtcs(encoder);
+ }
+}
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index 35c2719407a8..901b078abf40 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -402,12 +402,13 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
{
struct drm_mode_obj_get_properties *arg = data;
struct drm_mode_object *obj;
+ struct drm_modeset_acquire_ctx ctx;
int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
obj = drm_mode_object_find(dev, file_priv, arg->obj_id, arg->obj_type);
if (!obj) {
@@ -427,7 +428,7 @@ int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
out_unref:
drm_mode_object_put(obj);
out:
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
@@ -449,12 +450,13 @@ static int set_property_legacy(struct drm_mode_object *obj,
{
struct drm_device *dev = prop->dev;
struct drm_mode_object *ref;
+ struct drm_modeset_acquire_ctx ctx;
int ret = -EINVAL;
if (!drm_property_change_valid_get(prop, prop_value, &ref))
return -EINVAL;
- drm_modeset_lock_all(dev);
+ DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
switch (obj->type) {
case DRM_MODE_OBJECT_CONNECTOR:
ret = drm_connector_set_obj_prop(obj, prop, prop_value);
@@ -468,7 +470,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
break;
}
drm_property_change_valid_put(prop, ref);
- drm_modeset_unlock_all(dev);
+ DRM_MODESET_LOCK_ALL_END(ctx, ret);
return ret;
}
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d4d64518e11b..f2865f88bd54 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -748,32 +748,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_set_name);
/**
- * drm_mode_hsync - get the hsync of a mode
- * @mode: mode
- *
- * Returns:
- * @modes's hsync rate in kHz, rounded to the nearest integer. Calculates the
- * value first if it is not yet set.
- */
-int drm_mode_hsync(const struct drm_display_mode *mode)
-{
- unsigned int calc_val;
-
- if (mode->hsync)
- return mode->hsync;
-
- if (mode->htotal <= 0)
- return 0;
-
- calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
- calc_val += 500; /* round to 1000Hz */
- calc_val /= 1000; /* truncate to kHz */
-
- return calc_val;
-}
-EXPORT_SYMBOL(drm_mode_hsync);
-
-/**
* drm_mode_vrefresh - get the vrefresh of a mode
* @mode: mode
*
@@ -783,26 +757,22 @@ EXPORT_SYMBOL(drm_mode_hsync);
*/
int drm_mode_vrefresh(const struct drm_display_mode *mode)
{
- int refresh = 0;
+ unsigned int num, den;
- if (mode->vrefresh > 0)
- refresh = mode->vrefresh;
- else if (mode->htotal > 0 && mode->vtotal > 0) {
- unsigned int num, den;
+ if (mode->htotal == 0 || mode->vtotal == 0)
+ return 0;
- num = mode->clock * 1000;
- den = mode->htotal * mode->vtotal;
+ num = mode->clock * 1000;
+ den = mode->htotal * mode->vtotal;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- num *= 2;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- den *= 2;
- if (mode->vscan > 1)
- den *= mode->vscan;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ num *= 2;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ den *= 2;
+ if (mode->vscan > 1)
+ den *= mode->vscan;
- refresh = DIV_ROUND_CLOSEST(num, den);
- }
- return refresh;
+ return DIV_ROUND_CLOSEST(num, den);
}
EXPORT_SYMBOL(drm_mode_vrefresh);
@@ -1334,7 +1304,7 @@ static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head
if (diff)
return diff;
- diff = b->vrefresh - a->vrefresh;
+ diff = drm_mode_vrefresh(b) - drm_mode_vrefresh(a);
if (diff)
return diff;
@@ -1929,13 +1899,6 @@ EXPORT_SYMBOL(drm_mode_create_from_cmdline_mode);
void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
const struct drm_display_mode *in)
{
- WARN(in->hdisplay > USHRT_MAX || in->hsync_start > USHRT_MAX ||
- in->hsync_end > USHRT_MAX || in->htotal > USHRT_MAX ||
- in->hskew > USHRT_MAX || in->vdisplay > USHRT_MAX ||
- in->vsync_start > USHRT_MAX || in->vsync_end > USHRT_MAX ||
- in->vtotal > USHRT_MAX || in->vscan > USHRT_MAX,
- "timing values too large for mode info\n");
-
out->clock = in->clock;
out->hdisplay = in->hdisplay;
out->hsync_start = in->hsync_start;
@@ -1947,7 +1910,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
+ out->vrefresh = drm_mode_vrefresh(in);
out->flags = in->flags;
out->type = in->type;
@@ -2007,7 +1970,6 @@ int drm_mode_convert_umode(struct drm_device *dev,
out->vsync_end = in->vsync_end;
out->vtotal = in->vtotal;
out->vscan = in->vscan;
- out->vrefresh = in->vrefresh;
out->flags = in->flags;
/*
* Old xf86-video-vmware (possibly others too) used to
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 81aa21561982..75e2b7053f35 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -30,12 +30,13 @@
#include <drm/drm.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_drv.h>
-#include <drm/drm_pci.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
#include "drm_legacy.h"
+#ifdef CONFIG_DRM_LEGACY
+
/**
* drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
* @dev: DRM device
@@ -93,6 +94,7 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
}
EXPORT_SYMBOL(drm_pci_free);
+#endif
static int drm_get_pci_domain(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index d6ad60ab0d38..4af173ced327 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -289,6 +289,8 @@ EXPORT_SYMBOL(drm_universal_plane_init);
int drm_plane_register_all(struct drm_device *dev)
{
+ unsigned int num_planes = 0;
+ unsigned int num_zpos = 0;
struct drm_plane *plane;
int ret = 0;
@@ -297,8 +299,15 @@ int drm_plane_register_all(struct drm_device *dev)
ret = plane->funcs->late_register(plane);
if (ret)
return ret;
+
+ if (plane->zpos_property)
+ num_zpos++;
+ num_planes++;
}
+ drm_WARN(dev, num_zpos && num_planes != num_zpos,
+ "Mixing planes with and without zpos property is invalid\n");
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 282774e469ac..bbfc713bfdc3 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -270,7 +270,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
struct drm_device *dev = obj->dev;
/* drop the reference on the export fd holds */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
drm_dev_put(dev);
}
@@ -329,7 +329,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, handle);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
if (ret)
goto out_put;
@@ -500,7 +500,7 @@ out_have_handle:
fail_put_dmabuf:
dma_buf_put(dmabuf);
out:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
out_unlock:
mutex_unlock(&file_priv->prime.lock);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 576b4b7dcd89..26e997f1524f 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -159,6 +159,8 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
continue;
}
+ /* Mark the matching mode as being preferred by the user */
+ mode->type |= DRM_MODE_TYPE_USERDEF;
return 0;
}
@@ -532,9 +534,6 @@ prune:
if (list_empty(&connector->modes))
return 0;
- list_for_each_entry(mode, &connector->modes, head)
- mode->vrefresh = drm_mode_vrefresh(mode);
-
drm_mode_sort(&connector->modes);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index ca520028b2cb..f4e6184d1877 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -43,15 +43,6 @@
#define DEBUG_SCATTER 0
-static inline void *drm_vmalloc_dma(unsigned long size)
-{
-#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
- return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
-#else
- return vmalloc_32(size);
-#endif
-}
-
static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
@@ -126,7 +117,7 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
return -ENOMEM;
}
- entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
+ entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
if (!entry->virtual) {
kfree(entry->busaddr);
kfree(entry->pagelist);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 939f0032aab1..f0336c804639 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -291,9 +291,6 @@ int drm_sysfs_connector_add(struct drm_connector *connector)
return PTR_ERR(connector->kdev);
}
- /* Let userspace know we have a new connector */
- drm_sysfs_hotplug_event(dev);
-
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index da7b0b0c1090..85e5f2db1608 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -40,6 +41,69 @@
/**
* DOC: vblank handling
*
+ * From the computer's perspective, every time the monitor displays
+ * a new frame the scanout engine has "scanned out" the display image
+ * from top to bottom, one row of pixels at a time. The current row
+ * of pixels is referred to as the current scanline.
+ *
+ * In addition to the display's visible area, there's usually a couple of
+ * extra scanlines which aren't actually displayed on the screen.
+ * These extra scanlines don't contain image data and are occasionally used
+ * for features like audio and infoframes. The region made up of these
+ * scanlines is referred to as the vertical blanking region, or vblank for
+ * short.
+ *
+ * For historical reference, the vertical blanking period was designed to
+ * give the electron gun (on CRTs) enough time to move back to the top of
+ * the screen to start scanning out the next frame. Similar for horizontal
+ * blanking periods. They were designed to give the electron gun enough
+ * time to move back to the other side of the screen to start scanning the
+ * next scanline.
+ *
+ * ::
+ *
+ *
+ * physical → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ * top of | |
+ * display | |
+ * | New frame |
+ * | |
+ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓|
+ * |~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~| ← Scanline,
+ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the
+ * | | frame as it
+ * | | travels down
+ * | | ("sacn out")
+ * | Old frame |
+ * | |
+ * | |
+ * | |
+ * | | physical
+ * | | bottom of
+ * vertical |⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽| ← display
+ * blanking ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * region → ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * ┆xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx┆
+ * start of → ⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽⎽
+ * new frame
+ *
+ * "Physical top of display" is the reference point for the high-precision/
+ * corrected timestamp.
+ *
+ * On a lot of display hardware, programming needs to take effect during the
+ * vertical blanking period so that settings like gamma, the image buffer
+ * buffer to be scanned out, etc. can safely be changed without showing
+ * any visual artifacts on the screen. In some unforgiving hardware, some of
+ * this programming has to both start and end in the same vblank. To help
+ * with the timing of the hardware programming, an interrupt is usually
+ * available to notify the driver when it can start the updating of registers.
+ * The interrupt is in this context named the vblank interrupt.
+ *
+ * The vblank interrupt may be fired at different points depending on the
+ * hardware. Some hardware implementations will fire the interrupt when the
+ * new frame start, other implementations will fire the interrupt at different
+ * points in time.
+ *
* Vertical blanking plays a major role in graphics rendering. To achieve
* tear-free display, users must synchronize page flips and/or rendering to
* vertical blanking. The DRM API offers ioctls to perform page flips
@@ -130,7 +194,7 @@ static u32 drm_max_vblank_count(struct drm_device *dev, unsigned int pipe)
*/
static u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe)
{
- WARN_ON_ONCE(drm_max_vblank_count(dev, pipe) != 0);
+ drm_WARN_ON_ONCE(dev, drm_max_vblank_count(dev, pipe) != 0);
return 0;
}
@@ -139,7 +203,7 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
- if (WARN_ON(!crtc))
+ if (drm_WARN_ON(dev, !crtc))
return 0;
if (crtc->funcs->get_vblank_counter)
@@ -247,15 +311,15 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* frame/field duration.
*/
- DRM_DEBUG_VBL("crtc %u: Calculating number of vblanks."
- " diff_ns = %lld, framedur_ns = %d)\n",
- pipe, (long long) diff_ns, framedur_ns);
+ drm_dbg_vbl(dev, "crtc %u: Calculating number of vblanks."
+ " diff_ns = %lld, framedur_ns = %d)\n",
+ pipe, (long long)diff_ns, framedur_ns);
diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
if (diff == 0 && in_vblank_irq)
- DRM_DEBUG_VBL("crtc %u: Redundant vblirq ignored\n",
- pipe);
+ drm_dbg_vbl(dev, "crtc %u: Redundant vblirq ignored\n",
+ pipe);
} else {
/* some kind of default for drivers w/o accurate vbl timestamping */
diff = in_vblank_irq ? 1 : 0;
@@ -271,18 +335,19 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
* random large forward jumps of the software vblank counter.
*/
if (diff > 1 && (vblank->inmodeset & 0x2)) {
- DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
- " due to pre-modeset.\n", pipe, diff);
+ drm_dbg_vbl(dev,
+ "clamping vblank bump to 1 on crtc %u: diffr=%u"
+ " due to pre-modeset.\n", pipe, diff);
diff = 1;
}
- DRM_DEBUG_VBL("updating vblank count on crtc %u:"
- " current=%llu, diff=%u, hw=%u hw_last=%u\n",
- pipe, atomic64_read(&vblank->count), diff,
- cur_vblank, vblank->last);
+ drm_dbg_vbl(dev, "updating vblank count on crtc %u:"
+ " current=%llu, diff=%u, hw=%u hw_last=%u\n",
+ pipe, (unsigned long long)atomic64_read(&vblank->count),
+ diff, cur_vblank, vblank->last);
if (diff == 0) {
- WARN_ON_ONCE(cur_vblank != vblank->last);
+ drm_WARN_ON_ONCE(dev, cur_vblank != vblank->last);
return;
}
@@ -303,7 +368,7 @@ static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe)
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
u64 count;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return 0;
count = atomic64_read(&vblank->count);
@@ -338,9 +403,9 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) &&
- !crtc->funcs->get_vblank_timestamp,
- "This function requires support for accurate vblank timestamps.");
+ drm_WARN_ONCE(dev, drm_debug_enabled(DRM_UT_VBL) &&
+ !crtc->funcs->get_vblank_timestamp,
+ "This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -358,7 +423,7 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
- if (WARN_ON(!crtc))
+ if (drm_WARN_ON(dev, !crtc))
return;
if (crtc->funcs->disable_vblank)
@@ -419,32 +484,24 @@ static void vblank_disable_fn(struct timer_list *t)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&vblank->refcount) == 0 && vblank->enabled) {
- DRM_DEBUG("disabling vblank on crtc %u\n", pipe);
+ drm_dbg_core(dev, "disabling vblank on crtc %u\n", pipe);
drm_vblank_disable_and_save(dev, pipe);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
-void drm_vblank_cleanup(struct drm_device *dev)
+static void drm_vblank_init_release(struct drm_device *dev, void *ptr)
{
unsigned int pipe;
- /* Bail if the driver didn't call drm_vblank_init() */
- if (dev->num_crtcs == 0)
- return;
-
for (pipe = 0; pipe < dev->num_crtcs; pipe++) {
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- WARN_ON(READ_ONCE(vblank->enabled) &&
- drm_core_check_feature(dev, DRIVER_MODESET));
+ drm_WARN_ON(dev, READ_ONCE(vblank->enabled) &&
+ drm_core_check_feature(dev, DRIVER_MODESET));
del_timer_sync(&vblank->disable_timer);
}
-
- kfree(dev->vblank);
-
- dev->num_crtcs = 0;
}
/**
@@ -453,25 +510,29 @@ void drm_vblank_cleanup(struct drm_device *dev)
* @num_crtcs: number of CRTCs supported by @dev
*
* This function initializes vblank support for @num_crtcs display pipelines.
- * Cleanup is handled by the DRM core, or through calling drm_dev_fini() for
- * drivers with a &drm_driver.release callback.
+ * Cleanup is handled automatically through a cleanup function added with
+ * drmm_add_action().
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
{
- int ret = -ENOMEM;
+ int ret;
unsigned int i;
spin_lock_init(&dev->vbl_lock);
spin_lock_init(&dev->vblank_time_lock);
+ dev->vblank = drmm_kcalloc(dev, num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
+ if (!dev->vblank)
+ return -ENOMEM;
+
dev->num_crtcs = num_crtcs;
- dev->vblank = kcalloc(num_crtcs, sizeof(*dev->vblank), GFP_KERNEL);
- if (!dev->vblank)
- goto err;
+ ret = drmm_add_action(dev, drm_vblank_init_release, NULL);
+ if (ret)
+ return ret;
for (i = 0; i < num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
@@ -483,13 +544,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
seqlock_init(&vblank->seqlock);
}
- DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
-
return 0;
-
-err:
- dev->num_crtcs = 0;
- return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
@@ -550,10 +605,10 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
int linedur_ns = 0, framedur_ns = 0;
int dotclock = mode->crtc_clock;
- if (!dev->num_crtcs)
+ if (!drm_dev_has_vblank(dev))
return;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
/* Valid dotclock? */
@@ -573,19 +628,21 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
*/
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
framedur_ns /= 2;
- } else
- DRM_ERROR("crtc %u: Can't calculate constants, dotclock = 0!\n",
- crtc->base.id);
+ } else {
+ drm_err(dev, "crtc %u: Can't calculate constants, dotclock = 0!\n",
+ crtc->base.id);
+ }
vblank->linedur_ns = linedur_ns;
vblank->framedur_ns = framedur_ns;
vblank->hwmode = *mode;
- DRM_DEBUG("crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
- crtc->base.id, mode->crtc_htotal,
- mode->crtc_vtotal, mode->crtc_vdisplay);
- DRM_DEBUG("crtc %u: clock %d kHz framedur %d linedur %d\n",
- crtc->base.id, dotclock, framedur_ns, linedur_ns);
+ drm_dbg_core(dev,
+ "crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
+ crtc->base.id, mode->crtc_htotal,
+ mode->crtc_vtotal, mode->crtc_vdisplay);
+ drm_dbg_core(dev, "crtc %u: clock %d kHz framedur %d linedur %d\n",
+ crtc->base.id, dotclock, framedur_ns, linedur_ns);
}
EXPORT_SYMBOL(drm_calc_timestamping_constants);
@@ -638,13 +695,13 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
int delta_ns, duration_ns;
if (pipe >= dev->num_crtcs) {
- DRM_ERROR("Invalid crtc %u\n", pipe);
+ drm_err(dev, "Invalid crtc %u\n", pipe);
return false;
}
/* Scanout position query not supported? Should not happen. */
if (!get_scanout_position) {
- DRM_ERROR("Called from CRTC w/o get_scanout_position()!?\n");
+ drm_err(dev, "Called from CRTC w/o get_scanout_position()!?\n");
return false;
}
@@ -657,8 +714,9 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
* Happens during initial modesetting of a crtc.
*/
if (mode->crtc_clock == 0) {
- DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
- WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
+ drm_dbg_core(dev, "crtc %u: Noop due to uninitialized mode.\n",
+ pipe);
+ drm_WARN_ON_ONCE(dev, drm_drv_uses_atomic_modeset(dev));
return false;
}
@@ -681,8 +739,9 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
/* Return as no-op if scanout query unsupported or failed. */
if (!vbl_status) {
- DRM_DEBUG("crtc %u : scanoutpos query failed.\n",
- pipe);
+ drm_dbg_core(dev,
+ "crtc %u : scanoutpos query failed.\n",
+ pipe);
return false;
}
@@ -696,8 +755,9 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
/* Noisy system timing? */
if (i == DRM_TIMESTAMP_MAXRETRIES) {
- DRM_DEBUG("crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
- pipe, duration_ns/1000, *max_error/1000, i);
+ drm_dbg_core(dev,
+ "crtc %u: Noisy timestamp %d us > %d us [%d reps].\n",
+ pipe, duration_ns / 1000, *max_error / 1000, i);
}
/* Return upper bound of timestamp precision error. */
@@ -721,11 +781,12 @@ drm_crtc_vblank_helper_get_vblank_timestamp_internal(
ts_etime = ktime_to_timespec64(etime);
ts_vblank_time = ktime_to_timespec64(*vblank_time);
- DRM_DEBUG_VBL("crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
- pipe, hpos, vpos,
- (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
- (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
- duration_ns / 1000, i);
+ drm_dbg_vbl(dev,
+ "crtc %u : v p(%d,%d)@ %lld.%06ld -> %lld.%06ld [e %d us, %d rep]\n",
+ pipe, hpos, vpos,
+ (u64)ts_etime.tv_sec, ts_etime.tv_nsec / 1000,
+ (u64)ts_vblank_time.tv_sec, ts_vblank_time.tv_nsec / 1000,
+ duration_ns / 1000, i);
return true;
}
@@ -869,7 +930,7 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
u64 vblank_count;
unsigned int seq;
- if (WARN_ON(pipe >= dev->num_crtcs)) {
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs)) {
*vblanktime = 0;
return 0;
}
@@ -1010,7 +1071,7 @@ void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
unsigned int pipe = drm_crtc_index(crtc);
ktime_t now;
- if (dev->num_crtcs > 0) {
+ if (drm_dev_has_vblank(dev)) {
seq = drm_vblank_count_and_time(dev, pipe, &now);
} else {
seq = 0;
@@ -1027,7 +1088,7 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
- if (WARN_ON(!crtc))
+ if (drm_WARN_ON(dev, !crtc))
return 0;
if (crtc->funcs->enable_vblank)
@@ -1057,7 +1118,8 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
* prevent double-accounting of same vblank interval.
*/
ret = __enable_vblank(dev, pipe);
- DRM_DEBUG("enabling vblank on crtc %u, ret: %d\n", pipe, ret);
+ drm_dbg_core(dev, "enabling vblank on crtc %u, ret: %d\n",
+ pipe, ret);
if (ret) {
atomic_dec(&vblank->refcount);
} else {
@@ -1082,10 +1144,10 @@ static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
unsigned long irqflags;
int ret = 0;
- if (!dev->num_crtcs)
+ if (!drm_dev_has_vblank(dev))
return -EINVAL;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return -EINVAL;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -1123,10 +1185,10 @@ static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
{
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
- if (WARN_ON(atomic_read(&vblank->refcount) == 0))
+ if (drm_WARN_ON(dev, atomic_read(&vblank->refcount) == 0))
return;
/* Last user schedules interrupt disable */
@@ -1171,11 +1233,12 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
int ret;
u64 last;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
ret = drm_vblank_get(dev, pipe);
- if (WARN(ret, "vblank not available on crtc %i, ret=%i\n", pipe, ret))
+ if (drm_WARN(dev, ret, "vblank not available on crtc %i, ret=%i\n",
+ pipe, ret))
return;
last = drm_vblank_count(dev, pipe);
@@ -1184,7 +1247,7 @@ void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe)
last != drm_vblank_count(dev, pipe),
msecs_to_jiffies(100));
- WARN(ret == 0, "vblank wait timed out on crtc %i\n", pipe);
+ drm_WARN(dev, ret == 0, "vblank wait timed out on crtc %i\n", pipe);
drm_vblank_put(dev, pipe);
}
@@ -1226,14 +1289,14 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
unsigned long irqflags;
u64 seq;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
- DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
- pipe, vblank->enabled, vblank->inmodeset);
+ drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
/* Avoid redundant vblank disables without previous
* drm_crtc_vblank_on(). */
@@ -1258,9 +1321,9 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc)
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != pipe)
continue;
- DRM_DEBUG("Sending premature vblank event on disable: "
- "wanted %llu, current %llu\n",
- e->sequence, seq);
+ drm_dbg_core(dev, "Sending premature vblank event on disable: "
+ "wanted %llu, current %llu\n",
+ e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
send_vblank_event(dev, e, seq, now);
@@ -1303,7 +1366,7 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc)
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- WARN_ON(!list_empty(&dev->vblank_event_list));
+ drm_WARN_ON(dev, !list_empty(&dev->vblank_event_list));
}
EXPORT_SYMBOL(drm_crtc_vblank_reset);
@@ -1331,8 +1394,8 @@ void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
unsigned int pipe = drm_crtc_index(crtc);
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
- WARN_ON(dev->max_vblank_count);
- WARN_ON(!READ_ONCE(vblank->inmodeset));
+ drm_WARN_ON(dev, dev->max_vblank_count);
+ drm_WARN_ON(dev, !READ_ONCE(vblank->inmodeset));
vblank->max_vblank_count = max_vblank_count;
}
@@ -1355,12 +1418,12 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
unsigned long irqflags;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
- pipe, vblank->enabled, vblank->inmodeset);
+ drm_dbg_vbl(dev, "crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
@@ -1375,7 +1438,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
* user wishes vblank interrupts to be enabled all the time.
*/
if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
- WARN_ON(drm_vblank_enable(dev, pipe));
+ drm_WARN_ON(dev, drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
@@ -1402,15 +1465,16 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
u32 cur_vblank, diff = 1;
int count = DRM_TIMESTAMP_MAXRETRIES;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
assert_spin_locked(&dev->vbl_lock);
assert_spin_locked(&dev->vblank_time_lock);
vblank = &dev->vblank[pipe];
- WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
- "Cannot compute missed vblanks without frame duration\n");
+ drm_WARN_ONCE(dev,
+ drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns,
+ "Cannot compute missed vblanks without frame duration\n");
framedur_ns = vblank->framedur_ns;
do {
@@ -1423,8 +1487,9 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns);
- DRM_DEBUG_VBL("missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n",
- diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
+ drm_dbg_vbl(dev,
+ "missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n",
+ diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
EXPORT_SYMBOL(drm_vblank_restore);
@@ -1451,10 +1516,10 @@ static void drm_legacy_vblank_pre_modeset(struct drm_device *dev,
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
/* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!dev->num_crtcs)
+ if (!drm_dev_has_vblank(dev))
return;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
/*
@@ -1478,10 +1543,10 @@ static void drm_legacy_vblank_post_modeset(struct drm_device *dev,
unsigned long irqflags;
/* vblank is not initialized (IRQ not installed ?), or has been freed */
- if (!dev->num_crtcs)
+ if (!drm_dev_has_vblank(dev))
return;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return;
if (vblank->inmodeset) {
@@ -1503,7 +1568,7 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
unsigned int pipe;
/* If drm_vblank_init() hasn't been called yet, just no-op */
- if (!dev->num_crtcs)
+ if (!drm_dev_has_vblank(dev))
return 0;
/* KMS drivers handle this internally */
@@ -1583,8 +1648,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
seq = drm_vblank_count_and_time(dev, pipe, &now);
- DRM_DEBUG("event on vblank count %llu, current %llu, crtc %u\n",
- req_seq, seq, pipe);
+ drm_dbg_core(dev, "event on vblank count %llu, current %llu, crtc %u\n",
+ req_seq, seq, pipe);
trace_drm_vblank_event_queued(file_priv, pipe, req_seq);
@@ -1675,10 +1740,11 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK)) {
- DRM_DEBUG("Unsupported type value 0x%x, supported mask 0x%x\n",
- vblwait->request.type,
- (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
- _DRM_VBLANK_HIGH_CRTC_MASK));
+ drm_dbg_core(dev,
+ "Unsupported type value 0x%x, supported mask 0x%x\n",
+ vblwait->request.type,
+ (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
+ _DRM_VBLANK_HIGH_CRTC_MASK));
return -EINVAL;
}
@@ -1721,7 +1787,9 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
ret = drm_vblank_get(dev, pipe);
if (ret) {
- DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ drm_dbg_core(dev,
+ "crtc %d failed to acquire vblank counter, %d\n",
+ pipe, ret);
return ret;
}
seq = drm_vblank_count(dev, pipe);
@@ -1757,8 +1825,8 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (req_seq != seq) {
int wait;
- DRM_DEBUG("waiting on vblank count %llu, crtc %u\n",
- req_seq, pipe);
+ drm_dbg_core(dev, "waiting on vblank count %llu, crtc %u\n",
+ req_seq, pipe);
wait = wait_event_interruptible_timeout(vblank->queue,
vblank_passed(drm_vblank_count(dev, pipe), req_seq) ||
!READ_ONCE(vblank->enabled),
@@ -1782,10 +1850,11 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (ret != -EINTR) {
drm_wait_vblank_reply(dev, pipe, &vblwait->reply);
- DRM_DEBUG("crtc %d returning %u to client\n",
- pipe, vblwait->reply.sequence);
+ drm_dbg_core(dev, "crtc %d returning %u to client\n",
+ pipe, vblwait->reply.sequence);
} else {
- DRM_DEBUG("crtc %d vblank wait interrupted by signal\n", pipe);
+ drm_dbg_core(dev, "crtc %d vblank wait interrupted by signal\n",
+ pipe);
}
done:
@@ -1811,8 +1880,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
if (!vblank_passed(seq, e->sequence))
continue;
- DRM_DEBUG("vblank event on %llu, current %llu\n",
- e->sequence, seq);
+ drm_dbg_core(dev, "vblank event on %llu, current %llu\n",
+ e->sequence, seq);
list_del(&e->base.link);
drm_vblank_put(dev, pipe);
@@ -1841,10 +1910,10 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
unsigned long irqflags;
bool disable_irq;
- if (WARN_ON_ONCE(!dev->num_crtcs))
+ if (drm_WARN_ON_ONCE(dev, !drm_dev_has_vblank(dev)))
return false;
- if (WARN_ON(pipe >= dev->num_crtcs))
+ if (drm_WARN_ON(dev, pipe >= dev->num_crtcs))
return false;
spin_lock_irqsave(&dev->event_lock, irqflags);
@@ -1951,7 +2020,9 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
if (!vblank_enabled) {
ret = drm_crtc_vblank_get(crtc);
if (ret) {
- DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ drm_dbg_core(dev,
+ "crtc %d failed to acquire vblank counter, %d\n",
+ pipe, ret);
return ret;
}
}
@@ -2017,7 +2088,9 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
ret = drm_crtc_vblank_get(crtc);
if (ret) {
- DRM_DEBUG("crtc %d failed to acquire vblank counter, %d\n", pipe, ret);
+ drm_dbg_core(dev,
+ "crtc %d failed to acquire vblank counter, %d\n",
+ pipe, ret);
goto err_free;
}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index aa88911bbc06..1a6369633789 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -37,6 +37,7 @@
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
#if defined(__ia64__)
#include <linux/efi.h>
@@ -44,14 +45,12 @@
#endif
#include <linux/mem_encrypt.h>
-#include <asm/pgtable.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
-#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include "drm_internal.h"
@@ -595,8 +594,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
}
+ fallthrough; /* to _DRM_FRAME_BUFFER... */
#endif
- /* fall through - to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
@@ -621,7 +620,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through - to _DRM_SHM */
+ fallthrough; /* to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c
deleted file mode 100644
index 2000d9b33fd5..000000000000
--- a/drivers/gpu/drm/drm_vram_helper_common.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM
- * buffer object that is backed by video RAM. It can be used for
- * framebuffer devices with dedicated memory. The video RAM is managed
- * by &struct drm_vram_mm (VRAM MM).
- *
- * With the GEM interface userspace applications create, manage and destroy
- * graphics buffers, such as an on-screen framebuffer. GEM does not provide
- * an implementation of these interfaces. It's up to the DRM driver to
- * provide an implementation that suits the hardware. If the hardware device
- * contains dedicated video memory, the DRM driver can use the VRAM helper
- * library. Each active buffer object is stored in video RAM. Active
- * buffer are used for drawing the current frame, typically something like
- * the frame's scanout buffer or the cursor image. If there's no more space
- * left in VRAM, inactive GEM objects can be moved to system memory.
- *
- * The easiest way to use the VRAM helper library is to call
- * drm_vram_helper_alloc_mm(). The function allocates and initializes an
- * instance of &struct drm_vram_mm in &struct drm_device.vram_mm . Use
- * &DRM_GEM_VRAM_DRIVER to initialize &struct drm_driver and
- * &DRM_VRAM_MM_FILE_OPERATIONS to initialize &struct file_operations;
- * as illustrated below.
- *
- * .. code-block:: c
- *
- * struct file_operations fops ={
- * .owner = THIS_MODULE,
- * DRM_VRAM_MM_FILE_OPERATION
- * };
- * struct drm_driver drv = {
- * .driver_feature = DRM_ ... ,
- * .fops = &fops,
- * DRM_GEM_VRAM_DRIVER
- * };
- *
- * int init_drm_driver()
- * {
- * struct drm_device *dev;
- * uint64_t vram_base;
- * unsigned long vram_size;
- * int ret;
- *
- * // setup device, vram base and size
- * // ...
- *
- * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size);
- * if (ret)
- * return ret;
- * return 0;
- * }
- *
- * This creates an instance of &struct drm_vram_mm, exports DRM userspace
- * interfaces for GEM buffer management and initializes file operations to
- * allow for accessing created GEM buffers. With this setup, the DRM driver
- * manages an area of video RAM with VRAM MM and provides GEM VRAM objects
- * to userspace.
- *
- * To clean up the VRAM memory management, call drm_vram_helper_release_mm()
- * in the driver's clean-up code.
- *
- * .. code-block:: c
- *
- * void fini_drm_driver()
- * {
- * struct drm_device *dev = ...;
- *
- * drm_vram_helper_release_mm(dev);
- * }
- *
- * For drawing or scanout operations, buffer object have to be pinned in video
- * RAM. Call drm_gem_vram_pin() with &DRM_GEM_VRAM_PL_FLAG_VRAM or
- * &DRM_GEM_VRAM_PL_FLAG_SYSTEM to pin a buffer object in video RAM or system
- * memory. Call drm_gem_vram_unpin() to release the pinned object afterwards.
- *
- * A buffer object that is pinned in video RAM has a fixed address within that
- * memory region. Call drm_gem_vram_offset() to retrieve this value. Typically
- * it's used to program the hardware's scanout engine for framebuffers, set
- * the cursor overlay's image for a mouse cursor, or use it as input to the
- * hardware's draing engine.
- *
- * To access a buffer object's memory from the DRM driver, call
- * drm_gem_vram_kmap(). It (optionally) maps the buffer into kernel address
- * space and returns the memory address. Use drm_gem_vram_kunmap() to
- * release the mapping.
- */
-
-MODULE_DESCRIPTION("DRM VRAM memory-management helpers");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index 43d9e3bb3a94..dccf4504f1bb 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -108,7 +108,6 @@ static const struct dma_fence_ops drm_writeback_fence_ops = {
.get_driver_name = drm_writeback_fence_get_driver_name,
.get_timeline_name = drm_writeback_fence_get_timeline_name,
.enable_signaling = drm_writeback_fence_enable_signaling,
- .wait = dma_fence_default_wait,
};
static int create_writeback_properties(struct drm_device *dev)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index a8685b2e1803..a9a3afaef9a1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -231,21 +231,11 @@ static struct drm_info_list etnaviv_debugfs_list[] = {
{"ring", show_each_gpu, 0, etnaviv_ring_show},
};
-static int etnaviv_debugfs_init(struct drm_minor *minor)
+static void etnaviv_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(etnaviv_debugfs_list,
- ARRAY_SIZE(etnaviv_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
- return ret;
- }
-
- return ret;
+ drm_debugfs_create_files(etnaviv_debugfs_list,
+ ARRAY_SIZE(etnaviv_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -299,7 +289,7 @@ static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_prep(obj, args->op, &args->timeout);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -320,7 +310,7 @@ static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = etnaviv_gem_cpu_fini(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -340,7 +330,7 @@ static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = etnaviv_gem_mmap_offset(obj, &args->offset);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -423,7 +413,7 @@ static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -736,7 +726,7 @@ static void __exit etnaviv_exit(void)
module_exit(etnaviv_exit);
MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
-MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
+MODULE_AUTHOR("Russell King <rmk+kernel@armlinux.org.uk>");
MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
MODULE_DESCRIPTION("etnaviv DRM Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 648cf0207309..706af0304ca4 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -154,8 +154,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
file_size += sizeof(*iter.hdr) * n_obj;
/* Allocate the file in vmalloc memory, it's likely to be big */
- iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
- PAGE_KERNEL);
+ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN |
+ __GFP_NORETRY);
if (!iter.start) {
mutex_unlock(&gpu->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index dc9ef302f517..f5e5bb8ba953 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -244,7 +244,7 @@ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
mapping->use -= 1;
mutex_unlock(&etnaviv_obj->lock);
- drm_gem_object_put_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put(&etnaviv_obj->base);
}
struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
@@ -633,7 +633,7 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
/* drop reference from allocate - handle holds it now */
fail:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
- might_lock_read(&current->mm->mmap_sem);
+ might_lock_read(&current->mm->mmap_lock);
if (userptr->mm != current->mm)
return -EPERM;
@@ -742,6 +742,6 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put(&etnaviv_obj->base);
return ret;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f24dd21c2363..6d9e5c3c4dd5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -136,7 +136,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
return &etnaviv_obj->base;
fail:
- drm_gem_object_put_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put(&etnaviv_obj->base);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 3b0afa156d92..d05c35994579 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -238,8 +238,10 @@ static int submit_pin_objects(struct etnaviv_gem_submit *submit)
}
if ((submit->flags & ETNA_SUBMIT_SOFTPIN) &&
- submit->bos[i].va != mapping->iova)
+ submit->bos[i].va != mapping->iova) {
+ etnaviv_gem_mapping_unreference(mapping);
return -EINVAL;
+ }
atomic_inc(&etnaviv_obj->gpu_active);
@@ -396,7 +398,7 @@ static void submit_cleanup(struct kref *kref)
/* if the GPU submit failed, objects might still be locked */
submit_unlock_object(submit, i);
- drm_gem_object_put_unlocked(&etnaviv_obj->base);
+ drm_gem_object_put(&etnaviv_obj->base);
}
wake_up_all(&submit->gpu->fence_event);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index e6795bafcbb9..75f9db8f7bec 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -453,7 +453,7 @@ static const struct etnaviv_pm_domain *pm_domain(const struct etnaviv_gpu *gpu,
if (!(gpu->identity.features & meta->feature))
continue;
- if (meta->nr_domains < (index - offset)) {
+ if (index - offset >= meta->nr_domains) {
offset += meta->nr_domains;
continue;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 5ee090691390..9ac51b6ab34b 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -25,6 +25,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_crtc.h"
@@ -135,10 +136,6 @@ static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = {
.disable = exynos_dp_nop,
};
-static const struct drm_encoder_funcs exynos_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
{
int ret;
@@ -167,8 +164,7 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 43fa0f26c052..7ba5354e7d94 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -14,6 +14,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -149,10 +150,6 @@ static const struct drm_encoder_helper_funcs exynos_dpi_encoder_helper_funcs = {
.disable = exynos_dpi_disable,
};
-static const struct drm_encoder_funcs exynos_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
enum {
FIMD_PORT_IN0,
FIMD_PORT_IN1,
@@ -201,8 +198,7 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
{
int ret;
- drm_encoder_init(dev, encoder, &exynos_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dpi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 57defeb44522..dbd80f1e4c78 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -76,7 +76,6 @@ static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
}
static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
- .fault = exynos_drm_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e080aa92338c..ee96a95fb6be 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -30,6 +30,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
@@ -211,7 +212,7 @@
#define OLD_SCLK_MIPI_CLK_NAME "pll_clk"
-static char *clk_names[5] = { "bus_clk", "sclk_mipi",
+static const char *const clk_names[5] = { "bus_clk", "sclk_mipi",
"phyclk_mipidphy0_bitclkdiv8", "phyclk_mipidphy0_rxclkesc0",
"sclk_rgb_vclk_to_dsim0" };
@@ -1523,10 +1524,6 @@ static const struct drm_encoder_helper_funcs exynos_dsi_encoder_helper_funcs = {
.disable = exynos_dsi_disable,
};
-static const struct drm_encoder_funcs exynos_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
@@ -1704,8 +1701,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
struct drm_bridge *in_bridge;
int ret;
- drm_encoder_init(drm_dev, encoder, &exynos_dsi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_dsi_encoder_helper_funcs);
@@ -1763,10 +1759,6 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->dev = dev;
dsi->driver_data = of_device_get_match_data(dev);
- ret = exynos_dsi_parse_dt(dsi);
- if (ret)
- return ret;
-
dsi->supplies[0].supply = "vddcore";
dsi->supplies[1].supply = "vddio";
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
@@ -1813,10 +1805,8 @@ static int exynos_dsi_probe(struct platform_device *pdev)
}
dsi->irq = platform_get_irq(pdev, 0);
- if (dsi->irq < 0) {
- dev_err(dev, "failed to request dsi irq resource\n");
+ if (dsi->irq < 0)
return dsi->irq;
- }
irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
@@ -1827,11 +1817,25 @@ static int exynos_dsi_probe(struct platform_device *pdev)
return ret;
}
+ ret = exynos_dsi_parse_dt(dsi);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, &dsi->encoder);
pm_runtime_enable(dev);
- return component_add(dev, &exynos_dsi_component_ops);
+ ret = component_add(dev, &exynos_dsi_component_ops);
+ if (ret)
+ goto err_disable_runtime;
+
+ return 0;
+
+err_disable_runtime:
+ pm_runtime_disable(dev);
+ of_node_put(dsi->in_bridge_node);
+
+ return ret;
}
static int exynos_dsi_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index e6ceaf36fb04..56a2b47e1af7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -76,7 +76,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
struct fb_info *fbi;
struct drm_framebuffer *fb = helper->fb;
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
- unsigned int nr_pages;
unsigned long offset;
fbi = drm_fb_helper_alloc_fbi(helper);
@@ -90,16 +89,6 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
- nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
- exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
- if (!exynos_gem->kvaddr) {
- DRM_DEV_ERROR(to_dma_dev(helper->dev),
- "failed to map pages to kernel space.\n");
- return -EIO;
- }
-
offset = fbi->var.xoffset * fb->format->cpp[0];
offset += fbi->var.yoffset * fb->pitches[0];
@@ -133,18 +122,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
- /*
- * If physically contiguous memory allocation fails and if IOMMU is
- * supported then try to get buffer from non physically contiguous
- * memory area.
- */
- if (IS_ERR(exynos_gem) && is_drm_iommu_supported(dev)) {
- dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
- exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
- size);
- }
-
+ exynos_gem = exynos_drm_gem_create(dev, EXYNOS_BO_WC, size, true);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
@@ -229,12 +207,8 @@ err_init:
static void exynos_drm_fbdev_destroy(struct drm_device *dev,
struct drm_fb_helper *fb_helper)
{
- struct exynos_drm_fbdev *exynos_fbd = to_exynos_fbdev(fb_helper);
- struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
struct drm_framebuffer *fb;
- vunmap(exynos_gem->kvaddr);
-
/* release drm framebuffer and real buffer */
if (fb_helper->fb && fb_helper->fb->funcs) {
fb = fb_helper->fb;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d734d9d51762..efa476858db5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -17,28 +17,23 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
-static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
+static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem, bool kvmap)
{
struct drm_device *dev = exynos_gem->base.dev;
- unsigned long attr;
- unsigned int nr_pages;
- struct sg_table sgt;
- int ret = -ENOMEM;
+ unsigned long attr = 0;
if (exynos_gem->dma_addr) {
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
- exynos_gem->dma_attrs = 0;
-
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
* region will be allocated else physically contiguous
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
- exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
+ attr |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
@@ -46,61 +41,29 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
*/
if (exynos_gem->flags & EXYNOS_BO_WC ||
!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
- attr = DMA_ATTR_WRITE_COMBINE;
+ attr |= DMA_ATTR_WRITE_COMBINE;
else
- attr = DMA_ATTR_NON_CONSISTENT;
-
- exynos_gem->dma_attrs |= attr;
- exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attr |= DMA_ATTR_NON_CONSISTENT;
- nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
- exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
- GFP_KERNEL | __GFP_ZERO);
- if (!exynos_gem->pages) {
- DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
- return -ENOMEM;
- }
+ /* FBDev emulation requires kernel mapping */
+ if (!kvmap)
+ attr |= DMA_ATTR_NO_KERNEL_MAPPING;
+ exynos_gem->dma_attrs = attr;
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
- goto err_free;
- }
-
- ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->size,
- exynos_gem->dma_attrs);
- if (ret < 0) {
- DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
- goto err_dma_free;
- }
-
- if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
- nr_pages)) {
- DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
- ret = -EINVAL;
- goto err_sgt_free;
+ return -ENOMEM;
}
- sg_free_table(&sgt);
+ if (kvmap)
+ exynos_gem->kvaddr = exynos_gem->cookie;
DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
-
return 0;
-
-err_sgt_free:
- sg_free_table(&sgt);
-err_dma_free:
- dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
- exynos_gem->dma_addr, exynos_gem->dma_attrs);
-err_free:
- kvfree(exynos_gem->pages);
-
- return ret;
}
static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
@@ -118,8 +81,6 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
exynos_gem->dma_attrs);
-
- kvfree(exynos_gem->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -139,7 +100,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return 0;
}
@@ -203,7 +164,8 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
- unsigned long size)
+ unsigned long size,
+ bool kvmap)
{
struct exynos_drm_gem *exynos_gem;
int ret;
@@ -237,7 +199,7 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem->flags = flags;
- ret = exynos_drm_alloc_buf(exynos_gem);
+ ret = exynos_drm_alloc_buf(exynos_gem, kvmap);
if (ret < 0) {
drm_gem_object_release(&exynos_gem->base);
kfree(exynos_gem);
@@ -254,7 +216,7 @@ int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
struct exynos_drm_gem *exynos_gem;
int ret;
- exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size);
+ exynos_gem = exynos_drm_gem_create(dev, args->flags, args->size, false);
if (IS_ERR(exynos_gem))
return PTR_ERR(exynos_gem);
@@ -333,7 +295,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return 0;
}
@@ -365,7 +327,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
else
flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
- exynos_gem = exynos_drm_gem_create(dev, flags, args->size);
+ exynos_gem = exynos_drm_gem_create(dev, flags, args->size, false);
if (IS_ERR(exynos_gem)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem);
@@ -381,26 +343,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
return 0;
}
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
- unsigned long pfn;
- pgoff_t page_offset;
-
- page_offset = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
-
- if (page_offset >= (exynos_gem->size >> PAGE_SHIFT)) {
- DRM_ERROR("invalid page offset\n");
- return VM_FAULT_SIGBUS;
- }
-
- pfn = page_to_pfn(exynos_gem->pages[page_offset]);
- return vmf_insert_mixed(vma, vmf->address,
- __pfn_to_pfn_t(pfn, PFN_DEV));
-}
-
static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma)
{
@@ -462,11 +404,24 @@ struct drm_gem_object *exynos_drm_gem_prime_import(struct drm_device *dev,
struct sg_table *exynos_drm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
- int npages;
+ struct drm_device *drm_dev = obj->dev;
+ struct sg_table *sgt;
+ int ret;
- npages = exynos_gem->size >> PAGE_SHIFT;
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
- return drm_prime_pages_to_sg(exynos_gem->pages, npages);
+ ret = dma_get_sgtable_attrs(to_dma_dev(drm_dev), sgt, exynos_gem->cookie,
+ exynos_gem->dma_addr, exynos_gem->size,
+ exynos_gem->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to get sgtable, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
}
struct drm_gem_object *
@@ -475,52 +430,47 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct exynos_drm_gem *exynos_gem;
- int npages;
- int ret;
-
- exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
- if (IS_ERR(exynos_gem)) {
- ret = PTR_ERR(exynos_gem);
- return ERR_PTR(ret);
- }
- exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+ if (sgt->nents < 1)
+ return ERR_PTR(-EINVAL);
- npages = exynos_gem->size >> PAGE_SHIFT;
- exynos_gem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!exynos_gem->pages) {
- ret = -ENOMEM;
- goto err;
+ /*
+ * Check if the provided buffer has been mapped as contiguous
+ * into DMA address space.
+ */
+ if (sgt->nents > 1) {
+ dma_addr_t next_addr = sg_dma_address(sgt->sgl);
+ struct scatterlist *s;
+ unsigned int i;
+
+ for_each_sg(sgt->sgl, s, sgt->nents, i) {
+ if (!sg_dma_len(s))
+ break;
+ if (sg_dma_address(s) != next_addr) {
+ DRM_ERROR("buffer chunks must be mapped contiguously");
+ return ERR_PTR(-EINVAL);
+ }
+ next_addr = sg_dma_address(s) + sg_dma_len(s);
+ }
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, exynos_gem->pages, NULL,
- npages);
- if (ret < 0)
- goto err_free_large;
-
- exynos_gem->sgt = sgt;
+ exynos_gem = exynos_drm_gem_init(dev, attach->dmabuf->size);
+ if (IS_ERR(exynos_gem))
+ return ERR_CAST(exynos_gem);
- if (sgt->nents == 1) {
- /* always physically continuous memory if sgt->nents is 1. */
- exynos_gem->flags |= EXYNOS_BO_CONTIG;
- } else {
- /*
- * this case could be CONTIG or NONCONTIG type but for now
- * sets NONCONTIG.
- * TODO. we have to find a way that exporter can notify
- * the type of its own buffer to importer.
- */
+ /*
+ * Buffer has been mapped as contiguous into DMA address space,
+ * but if there is IOMMU, it can be either CONTIG or NONCONTIG.
+ * We assume a simplified logic below:
+ */
+ if (is_drm_iommu_supported(dev))
exynos_gem->flags |= EXYNOS_BO_NONCONTIG;
- }
+ else
+ exynos_gem->flags |= EXYNOS_BO_CONTIG;
+ exynos_gem->dma_addr = sg_dma_address(sgt->sgl);
+ exynos_gem->sgt = sgt;
return &exynos_gem->base;
-
-err_free_large:
- kvfree(exynos_gem->pages);
-err:
- drm_gem_object_release(&exynos_gem->base);
- kfree(exynos_gem);
- return ERR_PTR(ret);
}
void *exynos_drm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 42ec67bc262d..7445748288da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -21,20 +21,15 @@
* @base: a gem object.
* - a new handle to this gem object would be created
* by drm_gem_handle_create().
- * @buffer: a pointer to exynos_drm_gem_buffer object.
- * - contain the information to memory region allocated
- * by user request or at framebuffer creation.
- * continuous memory region allocated by user request
- * or at framebuffer creation.
* @flags: indicate memory type to allocated buffer and cache attruibute.
* @size: size requested from user, in bytes and this size is aligned
* in page unit.
* @cookie: cookie returned by dma_alloc_attrs
- * @kvaddr: kernel virtual address to allocated memory region.
+ * @kvaddr: kernel virtual address to allocated memory region (for fbdev)
* @dma_addr: bus address(accessed by dma) to allocated memory region.
* - this address could be physical address without IOMMU and
* device address with IOMMU.
- * @pages: Array of backing pages.
+ * @dma_attrs: attrs passed dma mapping framework
* @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
@@ -48,7 +43,6 @@ struct exynos_drm_gem {
void __iomem *kvaddr;
dma_addr_t dma_addr;
unsigned long dma_attrs;
- struct page **pages;
struct sg_table *sgt;
};
@@ -58,7 +52,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem);
/* create a new buffer with gem object */
struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
- unsigned long size);
+ unsigned long size,
+ bool kvmap);
/*
* request gem object creation and buffer allocation as the size
@@ -86,7 +81,7 @@ struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
*/
static inline void exynos_drm_gem_put(struct exynos_drm_gem *exynos_gem)
{
- drm_gem_object_put_unlocked(&exynos_gem->base);
+ drm_gem_object_put(&exynos_gem->base);
}
/* get buffer information to memory region allocated by gem. */
@@ -101,9 +96,6 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-vm_fault_t exynos_drm_gem_fault(struct vm_fault *vmf);
-
/* set vm_flags and we can change the vm attribute to other one at here. */
int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index f41d75923557..a86abc173605 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -88,7 +88,7 @@
#define MIC_BS_SIZE_2D(x) ((x) & 0x3fff)
-static char *clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
+static const char *const clk_names[] = { "pclk_mic0", "sclk_rgb_vclk_to_mic0" };
#define NUM_CLKS ARRAY_SIZE(clk_names)
static DEFINE_MUTEX(mic_mutex);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index dafa87b82052..2d94afba031e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -293,10 +293,8 @@ static int rotator_probe(struct platform_device *pdev)
return PTR_ERR(rot->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, rotator_irq_handler, 0, dev_name(dev),
rot);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 93c43c8d914e..ce1857138f89 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -502,10 +502,8 @@ static int scaler_probe(struct platform_device *pdev)
return PTR_ERR(scaler->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to get irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, NULL, scaler_irq_handler,
IRQF_ONESHOT, "drm_scaler", scaler);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index b320b3a21ad4..e5662bdcbbde 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include <drm/exynos_drm.h>
@@ -213,6 +214,12 @@ static ssize_t vidi_store_connection(struct device *dev,
static DEVICE_ATTR(connection, 0644, vidi_show_connection,
vidi_store_connection);
+static struct attribute *vidi_attrs[] = {
+ &dev_attr_connection.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vidi);
+
int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file_priv)
{
@@ -369,10 +376,6 @@ static const struct drm_encoder_helper_funcs exynos_vidi_encoder_helper_funcs =
.disable = exynos_vidi_disable,
};
-static const struct drm_encoder_funcs exynos_vidi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vidi_bind(struct device *dev, struct device *master, void *data)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -406,8 +409,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(ctx->crtc);
}
- drm_encoder_init(drm_dev, encoder, &exynos_vidi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_vidi_encoder_helper_funcs);
@@ -443,7 +445,6 @@ static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
struct device *dev = &pdev->dev;
- int ret;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -457,23 +458,7 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(dev, &dev_attr_connection);
- if (ret < 0) {
- DRM_DEV_ERROR(dev,
- "failed to create connection sysfs.\n");
- return ret;
- }
-
- ret = component_add(dev, &vidi_component_ops);
- if (ret)
- goto err_remove_file;
-
- return ret;
-
-err_remove_file:
- device_remove_file(dev, &dev_attr_connection);
-
- return ret;
+ return component_add(dev, &vidi_component_ops);
}
static int vidi_remove(struct platform_device *pdev)
@@ -498,5 +483,6 @@ struct platform_driver vidi_driver = {
.driver = {
.name = "exynos-drm-vidi",
.owner = THIS_MODULE,
+ .dev_groups = vidi_groups,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 1a7c828fc41d..8c3f5b21eff4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -38,6 +38,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "exynos_drm_crtc.h"
#include "regs-hdmi.h"
@@ -920,7 +921,8 @@ static int hdmi_mode_valid(struct drm_connector *connector,
DRM_DEV_DEBUG_KMS(hdata->dev,
"xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ mode->hdisplay, mode->vdisplay,
+ drm_mode_vrefresh(mode),
(mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
false, mode->clock * 1000);
@@ -1019,7 +1021,7 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
DRM_DEV_DEBUG_KMS(dev->dev,
"Adjusted Mode: [%d]x[%d] [%d]Hz\n",
m->hdisplay, m->vdisplay,
- m->vrefresh);
+ drm_mode_vrefresh(m));
drm_mode_copy(adjusted_mode, m);
break;
@@ -1559,10 +1561,6 @@ static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs =
.disable = hdmi_disable,
};
-static const struct drm_encoder_funcs exynos_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void hdmi_audio_shutdown(struct device *dev, void *data)
{
struct hdmi_context *hdata = dev_get_drvdata(dev);
@@ -1843,8 +1841,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdata->phy_clk.enable = hdmiphy_clk_enable;
- drm_encoder_init(drm_dev, encoder, &exynos_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &exynos_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 21b726baedea..af192e5a16ef 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1046,7 +1046,7 @@ static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
u32 w = mode->hdisplay, h = mode->vdisplay;
DRM_DEV_DEBUG_KMS(ctx->dev, "xres=%d, yres=%d, refresh=%d, intl=%d\n",
- w, h, mode->vrefresh,
+ w, h, drm_mode_vrefresh(mode),
!!(mode->flags & DRM_MODE_FLAG_INTERLACE));
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
@@ -1244,9 +1244,11 @@ static int mixer_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
+ pm_runtime_enable(dev);
+
ret = component_add(&pdev->dev, &mixer_component_ops);
- if (!ret)
- pm_runtime_enable(dev);
+ if (ret)
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index f15d2e7967a3..abbc1ddbf27f 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -141,16 +141,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
.irq_handler = fsl_dcu_drm_irq,
.irq_preinstall = fsl_dcu_irq_uninstall,
.irq_uninstall = fsl_dcu_irq_uninstall,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
- .dumb_create = drm_gem_cma_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &fsl_dcu_drm_fops,
.name = "fsl-dcu-drm",
.desc = "Freescale DCU DRM",
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index cff344367f81..9b0c4736c21a 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -13,19 +13,11 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "fsl_dcu_drm_drv.h"
#include "fsl_tcon.h"
-static void fsl_dcu_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = fsl_dcu_drm_encoder_destroy,
-};
-
int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
struct drm_crtc *crtc)
{
@@ -38,8 +30,8 @@ int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
- ret = drm_encoder_init(fsl_dev->drm, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(fsl_dev->drm, encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 29c36d63b20e..88535f5aacc5 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -28,6 +28,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
@@ -237,15 +239,6 @@ static const struct drm_connector_helper_funcs
.best_encoder = gma_best_encoder,
};
-static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_crt_enc_funcs = {
- .destroy = cdv_intel_crt_enc_destroy,
-};
-
void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
@@ -271,8 +264,7 @@ void cdv_intel_crt_init(struct drm_device *dev,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
encoder = &gma_encoder->base;
- drm_encoder_init(dev, encoder,
- &cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
gma_connector_attach_encoder(gma_connector, gma_encoder);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 5772b2dce0d6..f41cbb753bb4 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -32,6 +32,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "gma_display.h"
#include "psb_drv.h"
@@ -1271,37 +1272,8 @@ cdv_intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZ
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
-
-#if 0
-static char *voltage_names[] = {
- "0.4V", "0.6V", "0.8V", "1.2V"
-};
-static char *pre_emph_names[] = {
- "0dB", "3.5dB", "6dB", "9.5dB"
-};
-static char *link_train_names[] = {
- "pattern 1", "pattern 2", "idle", "off"
-};
-#endif
-
#define CDV_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_LEVEL_3
-/*
-static uint8_t
-cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
-{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
- }
-}
-*/
+
static void
cdv_intel_get_adjust_train(struct gma_encoder *encoder)
{
@@ -1908,11 +1880,6 @@ cdv_intel_dp_destroy(struct drm_connector *connector)
kfree(connector);
}
-static void cdv_intel_dp_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static const struct drm_encoder_helper_funcs cdv_intel_dp_helper_funcs = {
.dpms = cdv_intel_dp_dpms,
.mode_fixup = cdv_intel_dp_mode_fixup,
@@ -1935,11 +1902,6 @@ static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_fun
.best_encoder = gma_best_encoder,
};
-static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
- .destroy = cdv_intel_dp_encoder_destroy,
-};
-
-
static void cdv_intel_dp_add_properties(struct drm_connector *connector)
{
cdv_intel_attach_force_audio_property(connector);
@@ -2016,8 +1978,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
- drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
@@ -2120,7 +2081,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
if (ret == 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
- cdv_intel_dp_encoder_destroy(encoder);
+ drm_encoder_cleanup(encoder);
cdv_intel_dp_destroy(connector);
goto err_priv;
} else {
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 1711a41acc16..0d12c6ffbc40 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -32,6 +32,7 @@
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_simple_kms_helper.h>
#include "cdv_device.h"
#include "psb_drv.h"
@@ -311,8 +312,7 @@ void cdv_hdmi_init(struct drm_device *dev,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index ea0a5d9a0acc..eaaf4efec217 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -12,6 +12,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "cdv_device.h"
#include "intel_bios.h"
#include "power.h"
@@ -72,89 +74,6 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
return retval;
}
-#if 0
-/*
- * Set LVDS backlight level by I2C command
- */
-static int cdv_lvds_i2c_set_brightness(struct drm_device *dev,
- unsigned int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
- u8 out_buf[2];
- unsigned int blc_i2c_brightness;
-
- struct i2c_msg msgs[] = {
- {
- .addr = lvds_i2c_bus->slave_addr,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- blc_i2c_brightness = BRIGHTNESS_MASK & ((unsigned int)level *
- BRIGHTNESS_MASK /
- BRIGHTNESS_MAX_LEVEL);
-
- if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
- blc_i2c_brightness = BRIGHTNESS_MASK - blc_i2c_brightness;
-
- out_buf[0] = dev_priv->lvds_bl->brightnesscmd;
- out_buf[1] = (u8)blc_i2c_brightness;
-
- if (i2c_transfer(&lvds_i2c_bus->adapter, msgs, 1) == 1)
- return 0;
-
- DRM_ERROR("I2C transfer error\n");
- return -1;
-}
-
-
-static int cdv_lvds_pwm_set_brightness(struct drm_device *dev, int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- u32 max_pwm_blc;
- u32 blc_pwm_duty_cycle;
-
- max_pwm_blc = cdv_intel_lvds_get_max_backlight(dev);
-
- /*BLC_PWM_CTL Should be initiated while backlight device init*/
- BUG_ON((max_pwm_blc & PSB_BLC_MAX_PWM_REG_FREQ) == 0);
-
- blc_pwm_duty_cycle = level * max_pwm_blc / BRIGHTNESS_MAX_LEVEL;
-
- if (dev_priv->lvds_bl->pol == BLC_POLARITY_INVERSE)
- blc_pwm_duty_cycle = max_pwm_blc - blc_pwm_duty_cycle;
-
- blc_pwm_duty_cycle &= PSB_BACKLIGHT_PWM_POLARITY_BIT_CLEAR;
- REG_WRITE(BLC_PWM_CTL,
- (max_pwm_blc << PSB_BACKLIGHT_PWM_CTL_SHIFT) |
- (blc_pwm_duty_cycle));
-
- return 0;
-}
-
-/*
- * Set LVDS backlight level either by I2C or PWM
- */
-void cdv_intel_lvds_set_brightness(struct drm_device *dev, int level)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->lvds_bl) {
- DRM_ERROR("NO LVDS Backlight Info\n");
- return;
- }
-
- if (dev_priv->lvds_bl->type == BLC_I2C_TYPE)
- cdv_lvds_i2c_set_brightness(dev, level);
- else
- cdv_lvds_pwm_set_brightness(dev, level);
-}
-#endif
-
/**
* Sets the backlight level.
*
@@ -499,16 +418,6 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
.destroy = cdv_intel_lvds_destroy,
};
-
-static void cdv_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
- .destroy = cdv_intel_lvds_enc_destroy,
-};
-
/*
* Enumerate the child dev array parsed from VBT to check whether
* the LVDS is present.
@@ -616,10 +525,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
&cdv_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder,
- &cdv_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
-
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1d8f67e4795a..da02d7e8a8f5 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -491,7 +491,7 @@ static int psb_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(fb);
if (fb->obj[0])
- drm_gem_object_put_unlocked(fb->obj[0]);
+ drm_gem_object_put(fb->obj[0]);
kfree(fb);
return 0;
@@ -577,31 +577,31 @@ static void psb_setup_outputs(struct drm_device *dev)
break;
case INTEL_OUTPUT_SDVO:
crtc_mask = dev_priv->ops->sdvo_mask;
- clone_mask = (1 << INTEL_OUTPUT_SDVO);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_LVDS:
- crtc_mask = dev_priv->ops->lvds_mask;
- clone_mask = (1 << INTEL_OUTPUT_LVDS);
+ crtc_mask = dev_priv->ops->lvds_mask;
+ clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI:
crtc_mask = (1 << 0);
- clone_mask = (1 << INTEL_OUTPUT_MIPI);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_MIPI2:
crtc_mask = (1 << 2);
- clone_mask = (1 << INTEL_OUTPUT_MIPI2);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_HDMI:
- crtc_mask = dev_priv->ops->hdmi_mask;
+ crtc_mask = dev_priv->ops->hdmi_mask;
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
case INTEL_OUTPUT_DISPLAYPORT:
crtc_mask = (1 << 0) | (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_DISPLAYPORT);
+ clone_mask = 0;
break;
case INTEL_OUTPUT_EDP:
crtc_mask = (1 << 1);
- clone_mask = (1 << INTEL_OUTPUT_EDP);
+ clone_mask = 0;
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 83ee86f70b89..f9c4b1d76f56 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -82,7 +82,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
return ret;
}
/* We have the initial and handle reference but need only one now */
- drm_gem_object_put_unlocked(&r->gem);
+ drm_gem_object_put(&r->gem);
*handlep = handle;
return 0;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index 17f136985d21..3df6d6e850f5 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -351,7 +351,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
gt = container_of(gma_crtc->cursor_obj,
struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put(gma_crtc->cursor_obj);
gma_crtc->cursor_obj = NULL;
}
return 0;
@@ -427,7 +427,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
if (gma_crtc->cursor_obj) {
gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
psb_gtt_unpin(gt);
- drm_gem_object_put_unlocked(gma_crtc->cursor_obj);
+ drm_gem_object_put(gma_crtc->cursor_obj);
}
gma_crtc->cursor_obj = obj;
@@ -435,7 +435,7 @@ unlock:
return ret;
unref_cursor:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
index d4c65f268922..c976a9dd9240 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c
@@ -27,6 +27,8 @@
#include <linux/delay.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_pkg_sender.h"
#include "mdfld_output.h"
@@ -993,10 +995,7 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/*create drm encoder object*/
connector = &dsi_connector->base.base;
encoder = &dpi_output->base.base.base;
- drm_encoder_init(dev,
- encoder,
- p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(encoder,
p_funcs->encoder_helper_funcs);
@@ -1006,10 +1005,10 @@ struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
/*set possible crtcs and clones*/
if (dsi_connector->pipe) {
encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = (1 << 1);
+ encoder->possible_clones = 0;
} else {
encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = (1 << 0);
+ encoder->possible_clones = 0;
}
dsi_connector->base.encoder = &dpi_output->base.base;
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 4fff110c4921..aae2d358364c 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -658,16 +658,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
dev_dbg(dev->dev, "pipe = 0x%x\n", pipe);
-#if 0
- if (pipe == 1) {
- if (!gma_power_begin(dev, true))
- return 0;
- android_hdmi_crtc_mode_set(crtc, mode, adjusted_mode,
- x, y, old_fb);
- goto mrst_crtc_mode_set_exit;
- }
-#endif
-
ret = check_fb(crtc->primary->fb);
if (ret)
return ret;
@@ -918,14 +908,6 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
}
dpll = 0;
-#if 0 /* FIXME revisit later */
- if (ksel == KSEL_CRYSTAL_19 || ksel == KSEL_BYPASS_19 ||
- ksel == KSEL_BYPASS_25)
- dpll &= ~MDFLD_INPUT_REF_SEL;
- else if (ksel == KSEL_BYPASS_83_100)
- dpll |= MDFLD_INPUT_REF_SEL;
-#endif /* FIXME revisit later */
-
if (is_hdmi)
dpll |= MDFLD_VCO_SEL;
@@ -935,20 +917,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 2)) << 17;
-#if 0 /* 1080p30 & 720p */
- dpll = 0x00050000;
- fp = 0x000001be;
-#endif
-#if 0 /* 480p */
- dpll = 0x02010000;
- fp = 0x000000d2;
-#endif
} else {
-#if 0 /*DBI_TPO_480x864*/
- dpll = 0x00020000;
- fp = 0x00000156;
-#endif /* DBI_TPO_480x864 */ /* get from spec. */
-
dpll = 0x00800000;
fp = 0x000000c1;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_output.h b/drivers/gpu/drm/gma500/mdfld_output.h
index ab2b27c0f037..17a944d70add 100644
--- a/drivers/gpu/drm/gma500/mdfld_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_output.h
@@ -51,7 +51,6 @@ struct panel_info {
};
struct panel_funcs {
- const struct drm_encoder_funcs *encoder_funcs;
const struct drm_encoder_helper_funcs *encoder_helper_funcs;
struct drm_display_mode * (*get_config_mode)(struct drm_device *);
int (*get_panel_info)(struct drm_device *, int, struct panel_info *);
diff --git a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
index 49c92debb7b2..25e897b98f86 100644
--- a/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tmd_vid.c
@@ -188,13 +188,7 @@ static const struct drm_encoder_helper_funcs
.commit = mdfld_dsi_dpi_commit,
};
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tmd_vid_funcs = {
- .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
.get_config_mode = &tmd_vid_get_config_mode,
.get_panel_info = tmd_vid_get_panel_info,
diff --git a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
index a9420bf9a419..11845978fb0a 100644
--- a/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
+++ b/drivers/gpu/drm/gma500/mdfld_tpo_vid.c
@@ -76,13 +76,7 @@ static const struct drm_encoder_helper_funcs
.commit = mdfld_dsi_dpi_commit,
};
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tpo_vid_funcs = {
- .encoder_funcs = &mdfld_tpo_dpi_encoder_funcs,
.encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs,
.get_config_mode = &tpo_vid_get_config_mode,
.get_panel_info = tpo_vid_get_panel_info,
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f4370232767d..a097a59a9eae 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -27,6 +27,7 @@
#include <linux/delay.h>
#include <drm/drm.h>
+#include <drm/drm_simple_kms_helper.h>
#include "psb_drv.h"
#include "psb_intel_drv.h"
@@ -620,15 +621,6 @@ static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
.destroy = oaktrail_hdmi_destroy,
};
-static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
- .destroy = oaktrail_hdmi_enc_destroy,
-};
-
void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
@@ -651,9 +643,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
&oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, encoder,
- &oaktrail_hdmi_enc_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_TMDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
@@ -673,11 +663,6 @@ failed_connector:
kfree(gma_encoder);
}
-static const struct pci_device_id hdmi_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080d) },
- { 0 }
-};
-
void oaktrail_hdmi_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 582e09597500..2828360153d1 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -13,6 +13,8 @@
#include <asm/intel-mid.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
@@ -311,8 +313,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 2411eb9827b8..34b4aae9a15e 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -507,7 +507,7 @@ static struct drm_driver driver = {
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .gem_free_object = psb_gem_free_object,
+ .gem_free_object_unlocked = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 16c6136f778b..3dd5718c3e31 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -56,25 +56,6 @@
#define INTEL_OUTPUT_DISPLAYPORT 9
#define INTEL_OUTPUT_EDP 10
-#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
-#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
-
-static inline void
-psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
- int multiplier)
-{
- mode->clock *= multiplier;
- mode->private_flags |= multiplier;
-}
-
-static inline int
-psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
-{
- return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
- >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
-}
-
-
/*
* Hold information useally put on the device driver privates here,
* since it needs to be shared across multiple of devices drivers privates.
@@ -252,7 +233,6 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t value);
extern void psb_intel_lvds_destroy(struct drm_connector *connector);
-extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
/* intel_gmbus.c */
extern void gma_intel_i2c_reset(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index afaebab7bc17..063c66bb946d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -11,6 +11,8 @@
#include <linux/i2c.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_simple_kms_helper.h>
+
#include "intel_bios.h"
#include "power.h"
#include "psb_drv.h"
@@ -621,18 +623,6 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
.destroy = psb_intel_lvds_destroy,
};
-
-static void psb_intel_lvds_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
- .destroy = psb_intel_lvds_enc_destroy,
-};
-
-
-
/**
* psb_intel_lvds_init - setup LVDS connectors on this device
* @dev: drm device
@@ -683,9 +673,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, encoder,
- &psb_intel_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
gma_connector_attach_encoder(gma_connector, gma_encoder);
gma_encoder->type = INTEL_OUTPUT_LVDS;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 264d7ad004b4..06e44f47e73e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -132,6 +132,8 @@ struct psb_intel_sdvo {
/* DDC bus used by this SDVO encoder */
uint8_t ddc_bus;
+ u8 pixel_multiplier;
+
/* Input timings for adjusted_mode */
struct psb_intel_sdvo_dtd input_dtd;
@@ -864,36 +866,6 @@ static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sd
DRM_INFO("HDMI is not supported yet");
return false;
-#if 0
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
- uint8_t set_buf_index[2] = { 1, 0 };
- uint64_t *data = (uint64_t *)&avi_if;
- unsigned i;
-
- intel_dip_infoframe_csum(&avi_if);
-
- if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_INDEX,
- set_buf_index, 2))
- return false;
-
- for (i = 0; i < sizeof(avi_if); i += 8) {
- if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_DATA,
- data, 8))
- return false;
- data++;
- }
-
- return psb_intel_sdvo_set_value(psb_intel_sdvo,
- SDVO_CMD_SET_HBUF_TXRATE,
- &tx_rate, 1);
-#endif
}
static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
@@ -958,7 +930,6 @@ static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
- int multiplier;
/* We need to construct preferred input timings based on our
* output timings. To do that, we have to set the output
@@ -985,8 +956,9 @@ static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
/* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
*/
- multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
- psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+ psb_intel_sdvo->pixel_multiplier =
+ psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ adjusted_mode->clock *= psb_intel_sdvo->pixel_multiplier;
return true;
}
@@ -1002,7 +974,6 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
u32 sdvox;
struct psb_intel_sdvo_in_out_map in_out;
struct psb_intel_sdvo_dtd input_dtd;
- int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
int rate;
int need_aux = IS_MRST(dev) ? 1 : 0;
@@ -1060,7 +1031,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
(void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
- switch (pixel_multiplier) {
+ switch (psb_intel_sdvo->pixel_multiplier) {
default:
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1227,75 +1198,6 @@ static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdv
return true;
}
-/* No use! */
-#if 0
-struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct psb_intel_sdvo *iout = NULL;
- struct psb_intel_sdvo *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- iout = to_psb_intel_sdvo(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
-
- sdvo = iout->dev_priv;
-
- if (sdvo->sdvo_reg == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->sdvo_reg == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
-}
-
-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_sdvo *psb_intel_sdvo;
- DRM_DEBUG_KMS("\n");
-
- if (!connector)
- return 0;
-
- psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
- return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- &response, 2) && response[0];
-}
-
-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
- if (on) {
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
- status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
- }
-
- psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
- psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
-}
-#endif
-
static bool
psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
{
diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
index 9e8224456ea2..e5bdd99ad453 100644
--- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
+++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c
@@ -747,11 +747,11 @@ static int cmi_lcd_hack_create_device(void)
return -EINVAL;
}
- client = i2c_new_device(adapter, &info);
- if (!client) {
- pr_err("%s: i2c_new_device() failed\n", __func__);
+ client = i2c_new_client_device(adapter, &info);
+ if (IS_ERR(client)) {
+ pr_err("%s: creating I2C device failed\n", __func__);
i2c_put_adapter(adapter);
- return -EINVAL;
+ return PTR_ERR(client);
}
return 0;
@@ -765,12 +765,7 @@ static const struct drm_encoder_helper_funcs tc35876x_encoder_helper_funcs = {
.commit = mdfld_dsi_dpi_commit,
};
-static const struct drm_encoder_funcs tc35876x_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
const struct panel_funcs mdfld_tc35876x_funcs = {
- .encoder_funcs = &tc35876x_encoder_funcs,
.encoder_helper_funcs = &tc35876x_encoder_helper_funcs,
.get_config_mode = tc35876x_get_config_mode,
.get_panel_info = tc35876x_get_panel_info,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 55b46a7150a5..cc70e836522f 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -94,6 +94,10 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (state->fb->pitches[0] % 128 != 0) {
+ DRM_DEBUG_ATOMIC("wrong stride with 128-byte aligned\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -119,11 +123,8 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
reg = state->fb->width * (state->fb->format->cpp[0]);
- /* now line_pad is 16 */
- reg = PADDING(16, reg);
- line_l = state->fb->width * state->fb->format->cpp[0];
- line_l = PADDING(16, line_l);
+ line_l = state->fb->pitches[0];
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
priv->mmio + HIBMC_CRT_FB_WIDTH);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 222356a4f9a8..a6fd0c29e5b8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -94,7 +94,7 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
- priv->dev->mode_config.preferred_depth = 24;
+ priv->dev->mode_config.preferred_depth = 32;
priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -307,11 +307,7 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- ret = drm_fbdev_generic_setup(dev, 16);
- if (ret) {
- DRM_ERROR("failed to initialize fbdev: %d\n", ret);
- goto err;
- }
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 99397ac3b363..322bd542e89d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -50,7 +50,7 @@ void hibmc_mm_fini(struct hibmc_drm_private *hibmc)
int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- return drm_gem_vram_fill_create_dumb(file, dev, 0, 16, args);
+ return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args);
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index f31068d74b18..00e87c290796 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -20,11 +20,11 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
-#include <drm/drm_encoder_slave.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dw_dsi_reg.h"
@@ -696,10 +696,6 @@ static const struct drm_encoder_helper_funcs dw_encoder_helper_funcs = {
.disable = dsi_encoder_disable
};
-static const struct drm_encoder_funcs dw_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int dw_drm_encoder_init(struct device *dev,
struct drm_device *drm_dev,
struct drm_encoder *encoder)
@@ -713,8 +709,7 @@ static int dw_drm_encoder_init(struct device *dev,
}
encoder->possible_crtcs = crtc_mask;
- ret = drm_encoder_init(drm_dev, encoder, &dw_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("failed to init dsi encoder\n");
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 86000127d4ee..e1108c1735ad 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -921,17 +921,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ade_fops);
static struct drm_driver ade_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ade_fops,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create_internal,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
-
+ DRM_GEM_CMA_DRIVER_OPS,
.name = "kirin",
.desc = "Hisilicon Kirin620 SoC DRM Driver",
.date = "20150718",
@@ -940,7 +930,6 @@ static struct drm_driver ade_driver = {
};
struct kirin_drm_data ade_driver_data = {
- .register_connects = false,
.num_planes = ADE_CH_NUM,
.prim_plane = ADE_CH1,
.channel_formats = channel_formats,
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index d3145ae877d7..4349da3e2379 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -219,40 +219,6 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev)
return 0;
}
-static int kirin_drm_connectors_register(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct drm_connector *failed_connector;
- struct drm_connector_list_iter conn_iter;
- int ret;
-
- mutex_lock(&dev->mode_config.mutex);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- ret = drm_connector_register(connector);
- if (ret) {
- failed_connector = connector;
- goto err;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
-
-err:
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (failed_connector == connector)
- break;
- drm_connector_unregister(connector);
- }
- drm_connector_list_iter_end(&conn_iter);
- mutex_unlock(&dev->mode_config.mutex);
-
- return ret;
-}
-
static int kirin_drm_bind(struct device *dev)
{
struct kirin_drm_data *driver_data;
@@ -279,17 +245,8 @@ static int kirin_drm_bind(struct device *dev)
drm_fbdev_generic_setup(drm_dev, 32);
- /* connectors should be registered after drm device register */
- if (driver_data->register_connects) {
- ret = kirin_drm_connectors_register(drm_dev);
- if (ret)
- goto err_drm_dev_unregister;
- }
-
return 0;
-err_drm_dev_unregister:
- drm_dev_unregister(drm_dev);
err_kms_cleanup:
kirin_drm_kms_cleanup(drm_dev);
err_drm_dev_put:
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
index 4d5c05a24065..dee8ec2f7f2e 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h
@@ -37,7 +37,6 @@ struct kirin_drm_data {
u32 channel_formats_cnt;
int config_max_width;
int config_max_height;
- bool register_connects;
u32 num_planes;
u32 prim_plane;
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index bb5f67f10edb..6afe6d0ee630 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -121,7 +121,6 @@ const struct ch7006_tv_norm_info ch7006_tv_norms[] = {
.vscan = 0, \
.flags = DRM_MODE_FLAG_##hsynp##HSYNC | \
DRM_MODE_FLAG_##vsynp##VSYNC, \
- .vrefresh = 0, \
}, \
.enc_hdisp = e_hd, \
.enc_vdisp = e_vd, \
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
index a839f78a4c8a..741886b54419 100644
--- a/drivers/gpu/drm/i2c/sil164_drv.c
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -393,7 +393,7 @@ sil164_detect_slave(struct i2c_client *client)
return NULL;
}
- return i2c_new_device(adap, &info);
+ return i2c_new_client_device(adap, &info);
}
static int
@@ -402,6 +402,7 @@ sil164_encoder_init(struct i2c_client *client,
struct drm_encoder_slave *encoder)
{
struct sil164_priv *priv;
+ struct i2c_client *slave_client;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -410,7 +411,9 @@ sil164_encoder_init(struct i2c_client *client,
encoder->slave_priv = priv;
encoder->slave_funcs = &sil164_encoder_funcs;
- priv->duallink_slave = sil164_detect_slave(client);
+ slave_client = sil164_detect_slave(client);
+ if (!IS_ERR(slave_client))
+ priv->duallink_slave = slave_client;
return 0;
}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index c3332209f27a..9517f522dcb9 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -19,6 +19,7 @@
#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/i2c/tda998x.h>
#include <media/cec-notifier.h>
@@ -1132,7 +1133,8 @@ static void tda998x_audio_shutdown(struct device *dev, void *data)
mutex_unlock(&priv->audio_mutex);
}
-int tda998x_audio_digital_mute(struct device *dev, void *data, bool enable)
+static int tda998x_audio_digital_mute(struct device *dev, void *data,
+ bool enable)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -1949,9 +1951,9 @@ static int tda998x_create(struct device *dev)
cec_info.platform_data = &priv->cec_glue;
cec_info.irq = client->irq;
- priv->cec = i2c_new_device(client->adapter, &cec_info);
- if (!priv->cec) {
- ret = -ENODEV;
+ priv->cec = i2c_new_client_device(client->adapter, &cec_info);
+ if (IS_ERR(priv->cec)) {
+ ret = PTR_ERR(priv->cec);
goto fail;
}
@@ -1997,15 +1999,6 @@ err_irq:
/* DRM encoder functions */
-static void tda998x_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs tda998x_encoder_funcs = {
- .destroy = tda998x_encoder_destroy,
-};
-
static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
@@ -2023,8 +2016,8 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
priv->encoder.possible_crtcs = crtcs;
- ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm, &priv->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret)
goto err_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 77681356505b..a11bb675f9b3 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -8899,8 +8899,6 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
mode->clock = pipe_config->hw.adjusted_mode.crtc_clock;
- mode->hsync = drm_mode_hsync(mode);
- mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index cfe2517e0088..d1cb48b3f462 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -632,15 +632,9 @@ static void intel_dp_info(struct seq_file *m,
}
static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
+ struct intel_connector *intel_connector)
{
- struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(intel_encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
+ bool has_audio = intel_connector->port->has_audio;
seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
}
@@ -1105,10 +1099,10 @@ static void drrs_status_per_crtc(struct seq_file *m,
seq_puts(m, "\n\t\t");
if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = panel->fixed_mode->vrefresh;
+ vrefresh = drm_mode_vrefresh(panel->fixed_mode);
} else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = panel->downclock_mode->vrefresh;
+ vrefresh = drm_mode_vrefresh(panel->downclock_mode);
} else {
seq_printf(m, "DRRS_State: Unknown(%d)\n",
drrs->refresh_rate_type);
@@ -1984,7 +1978,7 @@ static const struct {
{"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
};
-int intel_display_debugfs_register(struct drm_i915_private *i915)
+void intel_display_debugfs_register(struct drm_i915_private *i915)
{
struct drm_minor *minor = i915->drm.primary;
int i;
@@ -1997,9 +1991,9 @@ int intel_display_debugfs_register(struct drm_i915_private *i915)
intel_display_debugfs_files[i].fops);
}
- return drm_debugfs_create_files(intel_display_debugfs_list,
- ARRAY_SIZE(intel_display_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
}
static int i915_panel_show(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index a3bea1ce04c2..c922c1745bfe 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -10,10 +10,10 @@ struct drm_connector;
struct drm_i915_private;
#ifdef CONFIG_DEBUG_FS
-int intel_display_debugfs_register(struct drm_i915_private *i915);
+void intel_display_debugfs_register(struct drm_i915_private *i915);
int intel_connector_debugfs_add(struct drm_connector *connector);
#else
-static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 76a49eac7305..4b0aaa3081c9 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -438,7 +438,7 @@ struct intel_connector {
state of connector->polled in case hotplug storm detection changes it */
u8 polled;
- void *port; /* store this opaque as its illegal to dereference it */
+ struct drm_dp_mst_port *port;
struct intel_dp *mst_port;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 7765a8b95b9d..3df5d901dd9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -7656,7 +7656,7 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
return;
}
- if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
+ if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
refresh_rate)
index = DRRS_LOW_RR;
@@ -7769,7 +7769,7 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp,
if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv, old_crtc_state,
- intel_dp->attached_connector->panel.fixed_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
dev_priv->drrs.dp = NULL;
mutex_unlock(&dev_priv->drrs.mutex);
@@ -7802,7 +7802,7 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- intel_dp->attached_connector->panel.downclock_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
}
unlock:
@@ -7822,6 +7822,7 @@ unlock:
void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits)
{
+ struct intel_dp *intel_dp;
struct drm_crtc *crtc;
enum pipe pipe;
@@ -7831,12 +7832,14 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
cancel_delayed_work(&dev_priv->drrs.work);
mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
+
+ intel_dp = dev_priv->drrs.dp;
+ if (!intel_dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
- crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -7845,7 +7848,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
/* invalidate means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
mutex_unlock(&dev_priv->drrs.mutex);
}
@@ -7865,6 +7868,7 @@ void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits)
{
+ struct intel_dp *intel_dp;
struct drm_crtc *crtc;
enum pipe pipe;
@@ -7874,12 +7878,14 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
cancel_delayed_work(&dev_priv->drrs.work);
mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
+
+ intel_dp = dev_priv->drrs.dp;
+ if (!intel_dp) {
mutex_unlock(&dev_priv->drrs.mutex);
return;
}
- crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
pipe = to_intel_crtc(crtc)->pipe;
frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
@@ -7888,7 +7894,7 @@ void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
/* flush means busy screen hence upclock */
if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
+ drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
/*
* flush also means no more activity hence schedule downclock, if all
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index f7df7a5b7c13..8273f2e07427 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -33,6 +33,7 @@
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_display_types.h"
+#include "intel_hotplug.h"
#include "intel_dp.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
@@ -113,9 +114,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = false;
if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
- pipe_config->has_audio =
- drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- connector->port);
+ pipe_config->has_audio = connector->port->has_audio;
else
pipe_config->has_audio =
intel_conn_state->force_audio == HDMI_AUDIO_ON;
@@ -795,8 +794,17 @@ err:
return NULL;
}
+static void
+intel_dp_mst_poll_hpd_irq(struct drm_dp_mst_topology_mgr *mgr)
+{
+ struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
+
+ intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
+}
+
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
+ .poll_hpd_irq = intel_dp_mst_poll_hpd_irq,
};
static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index d794dd5f170c..2e94c1413c02 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -356,6 +356,24 @@ static void i915_digport_work_func(struct work_struct *work)
}
}
+/**
+ * intel_hpd_trigger_irq - trigger an hpd irq event for a port
+ * @dig_port: digital port
+ *
+ * Trigger an HPD interrupt event for the given port, emulating a short pulse
+ * generated by the sink, and schedule the dig port work to handle it.
+ */
+void intel_hpd_trigger_irq(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ i915->hotplug.short_port_mask |= BIT(dig_port->base.port);
+ spin_unlock_irq(&i915->irq_lock);
+
+ queue_work(i915->hotplug.dp_wq, &i915->hotplug.dig_port_work);
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 777b0743257e..a704d7c94d16 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -10,6 +10,7 @@
struct drm_i915_private;
struct intel_connector;
+struct intel_digital_port;
struct intel_encoder;
enum port;
@@ -18,6 +19,7 @@ enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
+void intel_hpd_trigger_irq(struct intel_digital_port *dig_port);
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index 48093f19ec22..777032d9697b 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1038,9 +1038,6 @@ intel_tv_mode_to_mode(struct drm_display_mode *mode,
/* TV has it's own notion of sync and other mode flags, so clear them. */
mode->flags = 0;
- mode->vrefresh = 0;
- mode->vrefresh = drm_mode_vrefresh(mode);
-
snprintf(mode->name, sizeof(mode->name),
"%dx%d%c (%s)",
mode->hdisplay, mode->vdisplay,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 4d88faeb4d4c..5c13809dc3c8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1921,11 +1921,6 @@ get_engines(struct i915_gem_context *ctx,
}
user = u64_to_user_ptr(args->value);
- if (!access_ok(user, size)) {
- err = -EFAULT;
- goto err_free;
- }
-
if (put_user(0, &user->extensions)) {
err = -EFAULT;
goto err_free;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 23db79b806db..c38ab51e82f0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2782,7 +2782,8 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
* And this range already got effectively checked earlier
* when we did the "copy_from_user()" above.
*/
- if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
+ if (!user_write_access_begin(user_exec_list,
+ count * sizeof(*user_exec_list)))
goto end;
for (i = 0; i < args->buffer_count; i++) {
@@ -2796,7 +2797,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
end_user);
}
end_user:
- user_access_end();
+ user_write_access_end();
end:;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 9d306dc9849d..fe27c5b344e3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -93,7 +93,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- if (down_write_killable(&mm->mmap_sem)) {
+ if (mmap_write_lock_killable(mm)) {
addr = -EINTR;
goto err;
}
@@ -103,7 +103,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
addr = -ENOMEM;
- up_write(&mm->mmap_sem);
+ mmap_write_unlock(mm);
if (IS_ERR_VALUE(addr))
goto err;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index f4277afb89eb..28147aab47b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -10,8 +10,6 @@
#include <drm/drm.h> /* for drm_legacy.h! */
#include <drm/drm_cache.h>
-#include <drm/drm_legacy.h> /* for drm_pci.h! */
-#include <drm/drm_pci.h>
#include "gt/intel_gt.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 2adc0ea429fb..9c53eb883400 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -200,10 +200,10 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
if (IS_ERR(mn))
err = PTR_ERR(mn);
- down_write(&mm->mm->mmap_sem);
+ mmap_write_lock(mm->mm);
mutex_lock(&mm->i915->mm_lock);
if (mm->mn == NULL && !err) {
- /* Protected by mmap_sem (write-lock) */
+ /* Protected by mmap_lock (write-lock) */
err = __mmu_notifier_register(&mn->mn, mm->mm);
if (!err) {
/* Protected by mm_lock */
@@ -217,7 +217,7 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
err = 0;
}
mutex_unlock(&mm->i915->mm_lock);
- up_write(&mm->mm->mmap_sem);
+ mmap_write_unlock(mm->mm);
if (mn && !IS_ERR(mn))
kfree(mn);
@@ -468,10 +468,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
if (mmget_not_zero(mm)) {
while (pinned < npages) {
if (!locked) {
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
locked = 1;
}
- ret = get_user_pages_remote
+ ret = pin_user_pages_remote
(work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned,
@@ -483,7 +483,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
pinned += ret;
}
if (locked)
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
}
}
@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
mutex_unlock(&obj->mm.lock);
- release_pages(pvec, pinned);
+ unpin_user_pages(pvec, pinned);
kvfree(pvec);
i915_gem_object_put(obj);
@@ -522,8 +522,8 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
/* Spawn a worker so that we can acquire the
* user pages without holding our mutex. Access
- * to the user pages requires mmap_sem, and we have
- * a strict lock ordering of mmap_sem, struct_mutex -
+ * to the user pages requires mmap_lock, and we have
+ * a strict lock ordering of mmap_lock, struct_mutex -
* we already hold struct_mutex here and so cannot
* call gup without encountering a lock inversion.
*
@@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages;
bool active;
int pinned;
+ unsigned int gup_flags = 0;
/* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion
@@ -598,11 +599,22 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
GFP_KERNEL |
__GFP_NORETRY |
__GFP_NOWARN);
- if (pvec) /* defer to worker if malloc fails */
- pinned = __get_user_pages_fast(obj->userptr.ptr,
- num_pages,
- !i915_gem_object_is_readonly(obj),
- pvec);
+ /*
+ * Using __get_user_pages_fast() with a read-only
+ * access is questionable. A read-only page may be
+ * COW-broken, and then this might end up giving
+ * the wrong side of the COW..
+ *
+ * We may or may not care.
+ */
+ if (pvec) {
+ /* defer to worker if malloc fails */
+ if (!i915_gem_object_is_readonly(obj))
+ gup_flags |= FOLL_WRITE;
+ pinned = pin_user_pages_fast_only(obj->userptr.ptr,
+ num_pages, gup_flags,
+ pvec);
+ }
}
active = false;
@@ -620,7 +632,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages))
- release_pages(pvec, pinned);
+ unpin_user_pages(pvec, pinned);
kvfree(pvec);
return PTR_ERR_OR_ZERO(pages);
@@ -675,7 +687,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
}
mark_page_accessed(page);
- put_page(page);
+ unpin_user_page(page);
}
obj->mm.dirty = false;
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
index 9272bef57092..debaf7b18ab5 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_dmabuf.c
@@ -66,7 +66,7 @@ static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
{
struct mock_dmabuf *mock = to_mock(dma_buf);
- return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
+ return vm_map_ram(mock->pages, mock->npages, 0);
}
static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 776a73a19503..7ba16ddfe75f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -210,14 +210,41 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
- vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
- LCPLL_PLL_ENABLE |
- LCPLL_PLL_LOCK;
- vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
-
+ /*
+ * Only 1 PIPE enabled in current vGPU display and PIPE_A is
+ * tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
+ * TRANSCODER_A can be enabled. PORT_x depends on the input of
+ * setup_virtual_dp_monitor, we can bind DPLL0 to any PORT_x
+ * so we fixed to DPLL0 here.
+ * Setup DPLL0: DP link clk 1620 MHz, non SSC, DP Mode
+ */
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) =
+ DPLL_CTRL1_OVERRIDE(DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, DPLL_CTRL1) |=
+ DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, DPLL_ID_SKL_DPLL0);
+ vgpu_vreg_t(vgpu, LCPLL1_CTL) =
+ LCPLL_PLL_ENABLE | LCPLL_PLL_LOCK;
+ vgpu_vreg_t(vgpu, DPLL_STATUS) = DPLL_LOCK(DPLL_ID_SKL_DPLL0);
+ /*
+ * Golden M/N are calculated based on:
+ * 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
+ * DP link clk 1620 MHz and non-constant_n.
+ * TODO: calculate DP link symbol clk and stream clk m/n.
+ */
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
+ vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
+ vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
+ vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
+ vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_B);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_B);
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -238,6 +265,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_C);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_C);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
@@ -258,6 +291,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) &=
+ ~DPLL_CTRL2_DDI_CLK_OFF(PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_CLK_SEL(DPLL_ID_SKL_DPLL0, PORT_D);
+ vgpu_vreg_t(vgpu, DPLL_CTRL2) |=
+ DPLL_CTRL2_DDI_SEL_OVERRIDE(PORT_D);
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 074c4efb58eb..ad8a9df49f29 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -31,7 +31,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/mm.h>
-#include <linux/mmu_context.h>
+#include <linux/kthread.h>
#include <linux/sched/mm.h>
#include <linux/types.h>
#include <linux/list.h>
@@ -131,6 +131,7 @@ struct kvmgt_vdev {
struct work_struct release_work;
atomic_t released;
struct vfio_device *vfio_device;
+ struct vfio_group *vfio_group;
};
static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
@@ -151,6 +152,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
int total_pages;
int npage;
int ret;
@@ -160,7 +162,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+ ret = vfio_group_unpin_pages(vdev->vfio_group, &cur_gfn, 1);
drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -169,6 +171,7 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size, struct page **page)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long base_pfn = 0;
int total_pages;
int npage;
@@ -183,8 +186,8 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
- IOMMU_READ | IOMMU_WRITE, &pfn);
+ ret = vfio_group_pin_pages(vdev->vfio_group, &cur_gfn, 1,
+ IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
cur_gfn, ret);
@@ -792,6 +795,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
+ struct vfio_group *vfio_group;
vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
@@ -814,6 +818,14 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu;
}
+ vfio_group = vfio_group_get_external_user_from_dev(mdev_dev(mdev));
+ if (IS_ERR_OR_NULL(vfio_group)) {
+ ret = !vfio_group ? -EFAULT : PTR_ERR(vfio_group);
+ gvt_vgpu_err("vfio_group_get_external_user_from_dev failed\n");
+ goto undo_register;
+ }
+ vdev->vfio_group = vfio_group;
+
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
@@ -830,6 +842,10 @@ static int intel_vgpu_open(struct mdev_device *mdev)
return ret;
undo_group:
+ vfio_group_put_external_user(vdev->vfio_group);
+ vdev->vfio_group = NULL;
+
+undo_register:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
&vdev->group_notifier);
@@ -884,6 +900,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
kvmgt_guest_exit(info);
intel_vgpu_release_msi_eventfd_ctx(vgpu);
+ vfio_group_put_external_user(vdev->vfio_group);
vdev->kvm = NULL;
vgpu->handle = 0;
@@ -2035,33 +2052,14 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
void *buf, unsigned long len, bool write)
{
struct kvmgt_guest_info *info;
- struct kvm *kvm;
- int idx, ret;
- bool kthread = current->mm == NULL;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
- kvm = info->kvm;
-
- if (kthread) {
- if (!mmget_not_zero(kvm->mm))
- return -EFAULT;
- use_mm(kvm->mm);
- }
-
- idx = srcu_read_lock(&kvm->srcu);
- ret = write ? kvm_write_guest(kvm, gpa, buf, len) :
- kvm_read_guest(kvm, gpa, buf, len);
- srcu_read_unlock(&kvm->srcu, idx);
-
- if (kthread) {
- unuse_mm(kvm->mm);
- mmput(kvm->mm);
- }
- return ret;
+ return vfio_dma_rw(kvmgt_vdev(info->vgpu)->vfio_group,
+ gpa, buf, len, write);
}
static int kvmgt_read_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f5dc52a80fe5..3c3b9842bbbd 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -416,7 +416,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
struct i915_page_directory * const pd =
i915_pd_entry(ppgtt->pd, i);
-
+ /* skip now as current i915 ppgtt alloc won't allocate
+ top level pdp for non 4-level table, won't impact
+ shadow ppgtt. */
+ if (!pd)
+ break;
px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
}
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 1d5ff88078bd..7d361623ff67 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -124,7 +124,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
+ num_types = ARRAY_SIZE(vgpu_types);
gvt->types = kcalloc(num_types, sizeof(struct intel_vgpu_type),
GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 242f59910c19..8594a8ef08ce 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1898,7 +1898,7 @@ static const struct i915_debugfs_files {
#endif
};
-int i915_debugfs_register(struct drm_i915_private *dev_priv)
+void i915_debugfs_register(struct drm_i915_private *dev_priv)
{
struct drm_minor *minor = dev_priv->drm.primary;
int i;
@@ -1915,7 +1915,7 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
i915_debugfs_files[i].fops);
}
- return drm_debugfs_create_files(i915_debugfs_list,
- I915_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(i915_debugfs_list,
+ I915_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index 6da39c76ab5e..1de2736f1248 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -12,10 +12,10 @@ struct drm_i915_private;
struct seq_file;
#ifdef CONFIG_DEBUG_FS
-int i915_debugfs_register(struct drm_i915_private *dev_priv);
+void i915_debugfs_register(struct drm_i915_private *dev_priv);
void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
#else
-static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
+static inline void i915_debugfs_register(struct drm_i915_private *dev_priv) {}
static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index da991d1967a2..67102dc26fce 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,6 +43,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "display/intel_acpi.h"
@@ -907,17 +908,11 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
(struct intel_device_info *)ent->driver_data;
struct intel_device_info *device_info;
struct drm_i915_private *i915;
- int err;
- i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
- if (!i915)
- return ERR_PTR(-ENOMEM);
-
- err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
- if (err) {
- kfree(i915);
- return ERR_PTR(err);
- }
+ i915 = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct drm_i915_private, drm);
+ if (IS_ERR(i915))
+ return i915;
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -935,17 +930,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return i915;
}
-static void i915_driver_destroy(struct drm_i915_private *i915)
-{
- struct pci_dev *pdev = i915->drm.pdev;
-
- drm_dev_fini(&i915->drm);
- kfree(i915);
-
- /* And make sure we never chase our dangling pointer from pci_dev */
- pci_set_drvdata(pdev, NULL);
-}
-
/**
* i915_driver_probe - setup chip and create an initial config
* @pdev: PCI device
@@ -1027,6 +1011,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i915_welcome_messages(i915);
+ i915->do_release = true;
+
return 0;
out_cleanup_irq:
@@ -1046,7 +1032,6 @@ out_pci_disable:
pci_disable_device(pdev);
out_fini:
i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
- i915_driver_destroy(i915);
return ret;
}
@@ -1086,6 +1071,9 @@ static void i915_driver_release(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ if (!dev_priv->do_release)
+ return;
+
disable_rpm_wakeref_asserts(rpm);
i915_gem_driver_release(dev_priv);
@@ -1099,7 +1087,6 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
- i915_driver_destroy(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2697960f15a9..9aad3ec979bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -827,6 +827,9 @@ struct i915_selftest_stash {
struct drm_i915_private {
struct drm_device drm;
+ /* FIXME: Device release actions should all be moved to drmm_ */
+ bool do_release;
+
/* i915 device parameters */
struct i915_params params;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 8e45ca3d2ede..55b97c3a3dde 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -47,20 +47,16 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct drm_i915_getparam32 req32;
- drm_i915_getparam_t __user *request;
+ struct drm_i915_getparam req;
if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
- request = compat_alloc_user_space(sizeof(*request));
- if (!access_ok(request, sizeof(*request)) ||
- __put_user(req32.param, &request->param) ||
- __put_user((void __user *)(unsigned long)req32.value,
- &request->value))
- return -EFAULT;
+ req.param = req32.param;
+ req.value = compat_ptr(req32.value);
- return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
- (unsigned long)request);
+ return drm_ioctl_kernel(file, i915_getparam_ioctl, &req,
+ DRM_RENDER_ALLOW);
}
static drm_ioctl_compat_t *i915_compat_ioctls[] = {
diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c
index b6376b25ef63..43039dc8c607 100644
--- a/drivers/gpu/drm/i915/i915_mm.c
+++ b/drivers/gpu/drm/i915/i915_mm.c
@@ -25,7 +25,6 @@
#include <linux/mm.h>
#include <linux/io-mapping.h>
-#include <asm/pgtable.h>
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 498d8c982540..e5fdf17cd9cd 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -987,8 +987,6 @@ static void i915_pci_remove(struct pci_dev *pdev)
i915_driver_remove(i915);
pci_set_drvdata(pdev, NULL);
-
- drm_dev_put(&i915->drm);
}
/* is device_id present in comma separated list of ids */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index f35712d04ba4..25329b7600c9 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3415,10 +3415,10 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
/* Similar to perf's kernel.perf_paranoid_cpu sysctl option
* we check a dev.i915.perf_stream_paranoid sysctl option
* to determine if it's ok to access system wide OA counters
- * without CAP_SYS_ADMIN privileges.
+ * without CAP_PERFMON or CAP_SYS_ADMIN privileges.
*/
if (privileged_op &&
- i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to open i915 perf stream\n");
ret = -EACCES;
goto err_ctx;
@@ -3612,9 +3612,8 @@ static int read_properties_unlocked(struct i915_perf *perf,
} else
oa_freq_hz = 0;
- if (oa_freq_hz > i915_oa_max_sample_rate &&
- !capable(CAP_SYS_ADMIN)) {
- DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without root privileges\n",
+ if (oa_freq_hz > i915_oa_max_sample_rate && !perfmon_capable()) {
+ DRM_DEBUG("OA exponent would exceed the max sampling frequency (sysctl dev.i915.oa_max_sample_rate) %uHz without CAP_PERFMON or CAP_SYS_ADMIN privileges\n",
i915_oa_max_sample_rate);
return -EACCES;
}
@@ -3677,7 +3676,7 @@ static int read_properties_unlocked(struct i915_perf *perf,
* buffered data written by the GPU besides periodic OA metrics.
*
* Note we copy the properties from userspace outside of the i915 perf
- * mutex to avoid an awkward lockdep with mmap_sem.
+ * mutex to avoid an awkward lockdep with mmap_lock.
*
* Most of the implementation details are handled by
* i915_perf_open_ioctl_locked() after taking the &perf->lock
@@ -3897,9 +3896,6 @@ static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf,
if (!n_regs)
return NULL;
- if (!access_ok(regs, n_regs * sizeof(u32) * 2))
- return ERR_PTR(-EFAULT);
-
/* No is_valid function means we're not allowing any register to be programmed. */
GEM_BUG_ON(!is_valid);
if (!is_valid)
@@ -4000,7 +3996,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to add i915 OA config\n");
return -EACCES;
}
@@ -4147,7 +4143,7 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
return -ENOTSUPP;
}
- if (i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) {
+ if (i915_perf_stream_paranoid && !perfmon_capable()) {
DRM_DEBUG("Insufficient privileges to remove i915 OA config\n");
return -EACCES;
}
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index 9a8fdd3ac6bd..c1ebda9b5627 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -25,10 +25,6 @@ static int copy_query_item(void *query_hdr, size_t query_sz,
query_sz))
return -EFAULT;
- if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
- total_length))
- return -EFAULT;
-
return 0;
}
@@ -72,20 +68,20 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
topo.eu_offset = slice_length + subslice_length;
topo.eu_stride = sseu->eu_stride;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr),
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr),
&topo, sizeof(topo)))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr + sizeof(topo)),
&sseu->slice_mask, slice_length))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
sizeof(topo) + slice_length),
sseu->subslice_mask, subslice_length))
return -EFAULT;
- if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr +
+ if (copy_to_user(u64_to_user_ptr(query_item->data_ptr +
sizeof(topo) +
slice_length + subslice_length),
sseu->eu_mask, eu_length))
@@ -130,14 +126,14 @@ query_engine_info(struct drm_i915_private *i915,
info.engine.engine_instance = engine->uabi_instance;
info.capabilities = engine->uabi_capabilities;
- if (__copy_to_user(info_ptr, &info, sizeof(info)))
+ if (copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
query.num_engines++;
info_ptr++;
}
- if (__copy_to_user(query_ptr, &query, sizeof(query)))
+ if (copy_to_user(query_ptr, &query, sizeof(query)))
return -EFAULT;
return len;
@@ -157,10 +153,6 @@ static int can_copy_perf_config_registers_or_number(u32 user_n_regs,
if (user_n_regs < kernel_n_regs)
return -EINVAL;
- if (!access_ok(u64_to_user_ptr(user_regs_ptr),
- 2 * sizeof(u32) * kernel_n_regs))
- return -EFAULT;
-
return 0;
}
@@ -169,6 +161,7 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
u64 user_regs_ptr,
u32 *user_n_regs)
{
+ u32 __user *p = u64_to_user_ptr(user_regs_ptr);
u32 r;
if (*user_n_regs == 0) {
@@ -178,25 +171,19 @@ static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel
*user_n_regs = kernel_n_regs;
- for (r = 0; r < kernel_n_regs; r++) {
- u32 __user *user_reg_ptr =
- u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2);
- u32 __user *user_val_ptr =
- u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 +
- sizeof(u32));
- int ret;
-
- ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
- user_reg_ptr);
- if (ret)
- return -EFAULT;
+ if (!user_write_access_begin(p, 2 * sizeof(u32) * kernel_n_regs))
+ return -EFAULT;
- ret = __put_user(kernel_regs[r].value, user_val_ptr);
- if (ret)
- return -EFAULT;
+ for (r = 0; r < kernel_n_regs; r++, p += 2) {
+ unsafe_put_user(i915_mmio_reg_offset(kernel_regs[r].addr),
+ p, Efault);
+ unsafe_put_user(kernel_regs[r].value, p + 1, Efault);
}
-
+ user_write_access_end();
return 0;
+Efault:
+ user_write_access_end();
+ return -EFAULT;
}
static int query_perf_config_data(struct drm_i915_private *i915,
@@ -232,10 +219,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
return -EINVAL;
}
- if (!access_ok(user_query_config_ptr, total_size))
- return -EFAULT;
-
- if (__get_user(flags, &user_query_config_ptr->flags))
+ if (get_user(flags, &user_query_config_ptr->flags))
return -EFAULT;
if (flags != 0)
@@ -248,7 +232,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid));
memset(&uuid, 0, sizeof(uuid));
- if (__copy_from_user(uuid, user_query_config_ptr->uuid,
+ if (copy_from_user(uuid, user_query_config_ptr->uuid,
sizeof(user_query_config_ptr->uuid)))
return -EFAULT;
@@ -262,7 +246,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
}
rcu_read_unlock();
} else {
- if (__get_user(config_id, &user_query_config_ptr->config))
+ if (get_user(config_id, &user_query_config_ptr->config))
return -EFAULT;
oa_config = i915_perf_get_oa_config(perf, config_id);
@@ -270,8 +254,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
if (!oa_config)
return -ENOENT;
- if (__copy_from_user(&user_config, user_config_ptr,
- sizeof(user_config))) {
+ if (copy_from_user(&user_config, user_config_ptr, sizeof(user_config))) {
ret = -EFAULT;
goto out;
}
@@ -317,8 +300,7 @@ static int query_perf_config_data(struct drm_i915_private *i915,
memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid));
- if (__copy_to_user(user_config_ptr, &user_config,
- sizeof(user_config))) {
+ if (copy_to_user(user_config_ptr, &user_config, sizeof(user_config))) {
ret = -EFAULT;
goto out;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 19e1fed198c3..f09120cac89a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,7 +186,7 @@ typedef struct {
#define INVALID_MMIO_REG _MMIO(0)
-static inline u32 i915_mmio_reg_offset(i915_reg_t reg)
+static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg)
{
return reg.reg;
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 31ccd0559c55..153ca9e65382 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -559,7 +559,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
* becaue the HDA driver may require us to enable the audio power
* domain during system suspend.
*/
- dev_pm_set_driver_flags(kdev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
pm_runtime_mark_last_busy(kdev);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 47fde54150f4..9b105b811f1f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -25,6 +25,8 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_managed.h>
+
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
#include "gt/mock_engine.h"
@@ -55,6 +57,9 @@ static void mock_device_release(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
+ if (!i915->do_release)
+ goto out;
+
mock_device_flush(i915);
intel_gt_driver_remove(&i915->gt);
@@ -71,8 +76,9 @@ static void mock_device_release(struct drm_device *dev)
drm_mode_config_cleanup(&i915->drm);
- drm_dev_fini(&i915->drm);
+out:
put_device(&i915->drm.pdev->dev);
+ i915->drm.pdev = NULL;
}
static struct drm_driver mock_driver = {
@@ -114,9 +120,14 @@ struct drm_i915_private *mock_gem_device(void)
struct pci_dev *pdev;
int err;
- pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+ pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
if (!pdev)
- goto err;
+ return NULL;
+ i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
+ if (!i915) {
+ kfree(pdev);
+ return NULL;
+ }
device_initialize(&pdev->dev);
pdev->class = PCI_BASE_CLASS_DISPLAY << 16;
@@ -129,7 +140,6 @@ struct drm_i915_private *mock_gem_device(void)
pdev->dev.archdata.iommu = (void *)-1;
#endif
- i915 = (struct drm_i915_private *)(pdev + 1);
pci_set_drvdata(pdev, i915);
dev_pm_domain_set(&pdev->dev, &pm_domain);
@@ -141,9 +151,13 @@ struct drm_i915_private *mock_gem_device(void)
err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
if (err) {
pr_err("Failed to initialise mock GEM device: err=%d\n", err);
- goto put_device;
+ put_device(&pdev->dev);
+ kfree(i915);
+
+ return NULL;
}
i915->drm.pdev = pdev;
+ drmm_add_final_kfree(&i915->drm, i915);
intel_runtime_pm_init_early(&i915->runtime_pm);
@@ -188,6 +202,8 @@ struct drm_i915_private *mock_gem_device(void)
__clear_bit(I915_WEDGED, &i915->gt.reset.flags);
intel_engines_driver_register(i915);
+ i915->do_release = true;
+
return i915;
err_context:
@@ -198,9 +214,7 @@ err_drv:
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
drm_mode_config_cleanup(&i915->drm);
- drm_dev_fini(&i915->drm);
-put_device:
- put_device(&pdev->dev);
-err:
+ drm_dev_put(&i915->drm);
+
return NULL;
}
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index f22cfbf9353e..ba4ca17fd4d8 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -18,6 +18,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -143,10 +144,6 @@ static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs =
.atomic_check = dw_hdmi_imx_atomic_check,
};
-static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_mode_status
imx6q_hdmi_mode_valid(struct drm_connector *con,
const struct drm_display_mode *mode)
@@ -236,8 +233,7 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master,
return ret;
drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index da87c70e413b..36037b2e6564 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -42,12 +42,6 @@ void imx_drm_connector_destroy(struct drm_connector *connector)
}
EXPORT_SYMBOL_GPL(imx_drm_connector_destroy);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-EXPORT_SYMBOL_GPL(imx_drm_encoder_destroy);
-
static int imx_drm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state)
{
@@ -139,8 +133,8 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
encoder->possible_crtcs = crtc_mask;
- /* FIXME: this is the mask of outputs which can clone this output. */
- encoder->possible_clones = ~0;
+ /* FIXME: cloning support not clear, disable it all for now */
+ encoder->possible_clones = 0;
return 0;
}
@@ -152,17 +146,7 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
static struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.ioctls = imx_drm_ioctls,
.num_ioctls = ARRAY_SIZE(imx_drm_ioctls),
.fops = &imx_drm_driver_fops,
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index ab9c6f706eb3..c3e1a3f14d30 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -38,7 +38,6 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
struct drm_encoder *encoder, struct device_node *np);
void imx_drm_connector_destroy(struct drm_connector *connector);
-void imx_drm_encoder_destroy(struct drm_encoder *encoder);
int ipu_planes_assign_pre(struct drm_device *dev,
struct drm_atomic_state *state);
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 4da22a94790c..66ea68e8da87 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -26,6 +26,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -393,10 +394,6 @@ static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs =
.best_encoder = imx_ldb_connector_best_encoder,
};
-static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
.atomic_mode_set = imx_ldb_encoder_atomic_mode_set,
.enable = imx_ldb_encoder_enable,
@@ -441,8 +438,7 @@ static int imx_ldb_register(struct drm_device *drm,
}
drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS);
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 5bbfaa2cd0f4..ee63782c77e9 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -21,6 +21,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -348,10 +349,6 @@ static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs =
.mode_valid = imx_tve_connector_mode_valid,
};
-static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
.mode_set = imx_tve_encoder_mode_set,
.enable = imx_tve_encoder_enable,
@@ -479,8 +476,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
return ret;
drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs);
- drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs,
- encoder_type, NULL);
+ drm_simple_encoder_init(drm, &tve->encoder, encoder_type);
drm_connector_helper_add(&tve->connector,
&imx_tve_connector_helper_funcs);
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 08fafa4bf8c2..ac916c84a631 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -18,6 +18,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "imx-drm.h"
@@ -256,10 +257,6 @@ static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
.best_encoder = imx_pd_connector_best_encoder,
};
-static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
- .destroy = imx_drm_encoder_destroy,
-};
-
static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
.enable = imx_pd_bridge_enable,
.disable = imx_pd_bridge_disable,
@@ -288,8 +285,7 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE);
imxpd->bridge.funcs = &imx_pd_bridge_funcs;
drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 9dfe7cb530e1..16f0740df507 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -23,11 +23,13 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_plane.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#define JZ_REG_LCD_CFG 0x00
@@ -328,8 +330,8 @@ static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
if (!drm_atomic_crtc_needs_modeset(state))
return 0;
- if (state->mode.hdisplay > priv->soc_info->max_height ||
- state->mode.vdisplay > priv->soc_info->max_width)
+ if (state->mode.hdisplay > priv->soc_info->max_width ||
+ state->mode.vdisplay > priv->soc_info->max_height)
return -EINVAL;
rate = clk_round_rate(priv->pix_clk,
@@ -474,7 +476,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder,
static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
{
- struct ingenic_drm *priv = arg;
+ struct ingenic_drm *priv = drm_device_get_priv(arg);
unsigned int state;
regmap_read(priv->map, JZ_REG_LCD_STATE, &state);
@@ -488,15 +490,6 @@ static irqreturn_t ingenic_drm_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void ingenic_drm_release(struct drm_device *drm)
-{
- struct ingenic_drm *priv = drm_device_get_priv(drm);
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(priv);
-}
-
static int ingenic_drm_enable_vblank(struct drm_crtc *crtc)
{
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
@@ -526,21 +519,9 @@ static struct drm_driver ingenic_drm_driver_data = {
.patchlevel = 0,
.fops = &ingenic_drm_fops,
-
- .dumb_create = drm_gem_cma_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.irq_handler = ingenic_drm_irq_handler,
- .release = ingenic_drm_release,
};
static const struct drm_plane_funcs ingenic_drm_primary_plane_funcs = {
@@ -592,10 +573,6 @@ static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static const struct drm_encoder_funcs ingenic_drm_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void ingenic_drm_free_dma_hwdesc(void *d)
{
struct ingenic_drm *priv = d;
@@ -623,24 +600,21 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_dev_alloc(dev, &ingenic_drm_driver_data,
+ struct ingenic_drm, drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
priv->soc_info = soc_info;
priv->dev = dev;
drm = &priv->drm;
- drm->dev_private = priv;
platform_set_drvdata(pdev, priv);
- ret = devm_drm_dev_init(dev, drm, &ingenic_drm_driver_data);
- if (ret) {
- kfree(priv);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
return ret;
- }
- drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = soc_info->max_width;
@@ -661,10 +635,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed to get platform irq");
+ if (irq < 0)
return irq;
- }
if (soc_info->needs_dev_clk) {
priv->lcd_clk = devm_clk_get(dev, "lcd");
@@ -730,8 +702,8 @@ static int ingenic_drm_probe(struct platform_device *pdev)
drm_encoder_helper_add(&priv->encoder,
&ingenic_drm_encoder_helper_funcs);
- ret = drm_encoder_init(drm, &priv->encoder, &ingenic_drm_encoder_funcs,
- DRM_MODE_ENCODER_DPI, NULL);
+ ret = drm_simple_encoder_init(drm, &priv->encoder,
+ DRM_MODE_ENCODER_DPI);
if (ret) {
dev_err(dev, "Failed to init encoder: %i", ret);
return ret;
@@ -791,9 +763,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
goto err_devclk_disable;
}
- ret = drm_fbdev_generic_setup(drm, 32);
- if (ret)
- dev_warn(dev, "Unable to start fbdev emulation: %i", ret);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
@@ -843,6 +813,7 @@ static const struct of_device_id ingenic_drm_of_match[] = {
{ .compatible = "ingenic,jz4770-lcd", .data = &jz4770_soc_info },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, ingenic_drm_of_match);
static struct platform_driver ingenic_drm_driver = {
.driver = {
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
index d589f09d04d9..fa1d4f5df31e 100644
--- a/drivers/gpu/drm/lima/Kconfig
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -10,5 +10,7 @@ config DRM_LIMA
depends on OF
select DRM_SCHED
select DRM_GEM_SHMEM_HELPER
+ select PM_DEVFREQ
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
index a85444b0a1d4..ca2097b8e1ad 100644
--- a/drivers/gpu/drm/lima/Makefile
+++ b/drivers/gpu/drm/lima/Makefile
@@ -14,6 +14,8 @@ lima-y := \
lima_sched.o \
lima_ctx.o \
lima_dlbu.o \
- lima_bcast.o
+ lima_bcast.o \
+ lima_trace.o \
+ lima_devfreq.o
obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
index 288398027bfa..fbc43f243c54 100644
--- a/drivers/gpu/drm/lima/lima_bcast.c
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -26,18 +26,33 @@ void lima_bcast_enable(struct lima_device *dev, int num_pp)
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
}
+static int lima_bcast_hw_init(struct lima_ip *ip)
+{
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, ip->data.mask << 16);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, ip->data.mask);
+ return 0;
+}
+
+int lima_bcast_resume(struct lima_ip *ip)
+{
+ return lima_bcast_hw_init(ip);
+}
+
+void lima_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_bcast_init(struct lima_ip *ip)
{
- int i, mask = 0;
+ int i;
for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
if (ip->dev->ip[i].present)
- mask |= 1 << (i - lima_ip_pp0);
+ ip->data.mask |= 1 << (i - lima_ip_pp0);
}
- bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
- bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
- return 0;
+ return lima_bcast_hw_init(ip);
}
void lima_bcast_fini(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
index c47e58563d0a..465ee587bceb 100644
--- a/drivers/gpu/drm/lima/lima_bcast.h
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_bcast_resume(struct lima_ip *ip);
+void lima_bcast_suspend(struct lima_ip *ip);
int lima_bcast_init(struct lima_ip *ip);
void lima_bcast_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
index 22fff6caa961..891d5cd5019a 100644
--- a/drivers/gpu/drm/lima/lima_ctx.c
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -27,6 +27,9 @@ int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
if (err < 0)
goto err_out0;
+ ctx->pid = task_pid_nr(current);
+ get_task_comm(ctx->pname, current);
+
return 0;
err_out0:
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
index 6154e5c9bfe4..74e2be09090f 100644
--- a/drivers/gpu/drm/lima/lima_ctx.h
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -5,6 +5,7 @@
#define __LIMA_CTX_H__
#include <linux/xarray.h>
+#include <linux/sched.h>
#include "lima_device.h"
@@ -13,6 +14,10 @@ struct lima_ctx {
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
atomic_t guilty;
+
+ /* debug info */
+ char pname[TASK_COMM_LEN];
+ pid_t pid;
};
struct lima_ctx_mgr {
diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c
new file mode 100644
index 000000000000..bbe02817721b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_devfreq.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ *
+ * Based on panfrost_devfreq.c:
+ * Copyright 2019 Collabora ltd.
+ */
+#include <linux/clk.h>
+#include <linux/devfreq.h>
+#include <linux/devfreq_cooling.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/property.h>
+
+#include "lima_device.h"
+#include "lima_devfreq.h"
+
+static void lima_devfreq_update_utilization(struct lima_devfreq *devfreq)
+{
+ ktime_t now, last;
+
+ now = ktime_get();
+ last = devfreq->time_last_update;
+
+ if (devfreq->busy_count > 0)
+ devfreq->busy_time += ktime_sub(now, last);
+ else
+ devfreq->idle_time += ktime_sub(now, last);
+
+ devfreq->time_last_update = now;
+}
+
+static int lima_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct dev_pm_opp *opp;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+ dev_pm_opp_put(opp);
+
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void lima_devfreq_reset(struct lima_devfreq *devfreq)
+{
+ devfreq->busy_time = 0;
+ devfreq->idle_time = 0;
+ devfreq->time_last_update = ktime_get();
+}
+
+static int lima_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_devfreq *devfreq = &ldev->devfreq;
+ unsigned long irqflags;
+
+ status->current_frequency = clk_get_rate(ldev->clk_gpu);
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ status->total_time = ktime_to_ns(ktime_add(devfreq->busy_time,
+ devfreq->idle_time));
+ status->busy_time = ktime_to_ns(devfreq->busy_time);
+
+ lima_devfreq_reset(devfreq);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+ dev_dbg(ldev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
+ status->busy_time, status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile lima_devfreq_profile = {
+ .polling_ms = 50, /* ~3 frames */
+ .target = lima_devfreq_target,
+ .get_dev_status = lima_devfreq_get_dev_status,
+};
+
+void lima_devfreq_fini(struct lima_device *ldev)
+{
+ struct lima_devfreq *devfreq = &ldev->devfreq;
+
+ if (devfreq->cooling) {
+ devfreq_cooling_unregister(devfreq->cooling);
+ devfreq->cooling = NULL;
+ }
+
+ if (devfreq->devfreq) {
+ devm_devfreq_remove_device(ldev->dev, devfreq->devfreq);
+ devfreq->devfreq = NULL;
+ }
+
+ if (devfreq->opp_of_table_added) {
+ dev_pm_opp_of_remove_table(ldev->dev);
+ devfreq->opp_of_table_added = false;
+ }
+
+ if (devfreq->regulators_opp_table) {
+ dev_pm_opp_put_regulators(devfreq->regulators_opp_table);
+ devfreq->regulators_opp_table = NULL;
+ }
+
+ if (devfreq->clkname_opp_table) {
+ dev_pm_opp_put_clkname(devfreq->clkname_opp_table);
+ devfreq->clkname_opp_table = NULL;
+ }
+}
+
+int lima_devfreq_init(struct lima_device *ldev)
+{
+ struct thermal_cooling_device *cooling;
+ struct device *dev = ldev->dev;
+ struct opp_table *opp_table;
+ struct devfreq *devfreq;
+ struct lima_devfreq *ldevfreq = &ldev->devfreq;
+ struct dev_pm_opp *opp;
+ unsigned long cur_freq;
+ int ret;
+
+ if (!device_property_present(dev, "operating-points-v2"))
+ /* Optional, continue without devfreq */
+ return 0;
+
+ spin_lock_init(&ldevfreq->lock);
+
+ opp_table = dev_pm_opp_set_clkname(dev, "core");
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+ goto err_fini;
+ }
+
+ ldevfreq->clkname_opp_table = opp_table;
+
+ opp_table = dev_pm_opp_set_regulators(dev,
+ (const char *[]){ "mali" },
+ 1);
+ if (IS_ERR(opp_table)) {
+ ret = PTR_ERR(opp_table);
+
+ /* Continue if the optional regulator is missing */
+ if (ret != -ENODEV)
+ goto err_fini;
+ } else {
+ ldevfreq->regulators_opp_table = opp_table;
+ }
+
+ ret = dev_pm_opp_of_add_table(dev);
+ if (ret)
+ goto err_fini;
+ ldevfreq->opp_of_table_added = true;
+
+ lima_devfreq_reset(ldevfreq);
+
+ cur_freq = clk_get_rate(ldev->clk_gpu);
+
+ opp = devfreq_recommended_opp(dev, &cur_freq, 0);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto err_fini;
+ }
+
+ lima_devfreq_profile.initial_freq = cur_freq;
+ dev_pm_opp_put(opp);
+
+ devfreq = devm_devfreq_add_device(dev, &lima_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL);
+ if (IS_ERR(devfreq)) {
+ dev_err(dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(devfreq);
+ goto err_fini;
+ }
+
+ ldevfreq->devfreq = devfreq;
+
+ cooling = of_devfreq_cooling_register(dev->of_node, devfreq);
+ if (IS_ERR(cooling))
+ dev_info(dev, "Failed to register cooling device\n");
+ else
+ ldevfreq->cooling = cooling;
+
+ return 0;
+
+err_fini:
+ lima_devfreq_fini(ldev);
+ return ret;
+}
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ devfreq->busy_count++;
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_update_utilization(devfreq);
+
+ WARN_ON(--devfreq->busy_count < 0);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+}
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq)
+{
+ unsigned long irqflags;
+
+ if (!devfreq->devfreq)
+ return 0;
+
+ spin_lock_irqsave(&devfreq->lock, irqflags);
+
+ lima_devfreq_reset(devfreq);
+
+ spin_unlock_irqrestore(&devfreq->lock, irqflags);
+
+ return devfreq_resume_device(devfreq->devfreq);
+}
+
+int lima_devfreq_suspend(struct lima_devfreq *devfreq)
+{
+ if (!devfreq->devfreq)
+ return 0;
+
+ return devfreq_suspend_device(devfreq->devfreq);
+}
diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h
new file mode 100644
index 000000000000..5eed2975a375
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_devfreq.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2020 Martin Blumenstingl <martin.blumenstingl@googlemail.com> */
+
+#ifndef __LIMA_DEVFREQ_H__
+#define __LIMA_DEVFREQ_H__
+
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+
+struct devfreq;
+struct opp_table;
+struct thermal_cooling_device;
+
+struct lima_device;
+
+struct lima_devfreq {
+ struct devfreq *devfreq;
+ struct opp_table *clkname_opp_table;
+ struct opp_table *regulators_opp_table;
+ struct thermal_cooling_device *cooling;
+ bool opp_of_table_added;
+
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ int busy_count;
+ /*
+ * Protect busy_time, idle_time, time_last_update and busy_count
+ * because these can be updated concurrently, for example by the GP
+ * and PP interrupts.
+ */
+ spinlock_t lock;
+};
+
+int lima_devfreq_init(struct lima_device *ldev);
+void lima_devfreq_fini(struct lima_device *ldev);
+
+void lima_devfreq_record_busy(struct lima_devfreq *devfreq);
+void lima_devfreq_record_idle(struct lima_devfreq *devfreq);
+
+int lima_devfreq_resume(struct lima_devfreq *devfreq);
+int lima_devfreq_suspend(struct lima_devfreq *devfreq);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
index 19829b543024..65fdca366e41 100644
--- a/drivers/gpu/drm/lima/lima_device.c
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -25,6 +25,8 @@ struct lima_ip_desc {
int (*init)(struct lima_ip *ip);
void (*fini)(struct lima_ip *ip);
+ int (*resume)(struct lima_ip *ip);
+ void (*suspend)(struct lima_ip *ip);
};
#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
@@ -41,6 +43,8 @@ struct lima_ip_desc {
}, \
.init = lima_##func##_init, \
.fini = lima_##func##_fini, \
+ .resume = lima_##func##_resume, \
+ .suspend = lima_##func##_suspend, \
}
static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
@@ -77,26 +81,10 @@ const char *lima_ip_name(struct lima_ip *ip)
return lima_ip_desc[ip->id].name;
}
-static int lima_clk_init(struct lima_device *dev)
+static int lima_clk_enable(struct lima_device *dev)
{
int err;
- dev->clk_bus = devm_clk_get(dev->dev, "bus");
- if (IS_ERR(dev->clk_bus)) {
- err = PTR_ERR(dev->clk_bus);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get bus clk failed %d\n", err);
- return err;
- }
-
- dev->clk_gpu = devm_clk_get(dev->dev, "core");
- if (IS_ERR(dev->clk_gpu)) {
- err = PTR_ERR(dev->clk_gpu);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get core clk failed %d\n", err);
- return err;
- }
-
err = clk_prepare_enable(dev->clk_bus);
if (err)
return err;
@@ -105,15 +93,7 @@ static int lima_clk_init(struct lima_device *dev)
if (err)
goto error_out0;
- dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
-
- if (IS_ERR(dev->reset)) {
- err = PTR_ERR(dev->reset);
- if (err != -EPROBE_DEFER)
- dev_err(dev->dev, "get reset controller failed %d\n",
- err);
- goto error_out1;
- } else if (dev->reset != NULL) {
+ if (dev->reset) {
err = reset_control_deassert(dev->reset);
if (err) {
dev_err(dev->dev,
@@ -131,14 +111,76 @@ error_out0:
return err;
}
-static void lima_clk_fini(struct lima_device *dev)
+static void lima_clk_disable(struct lima_device *dev)
{
- if (dev->reset != NULL)
+ if (dev->reset)
reset_control_assert(dev->reset);
clk_disable_unprepare(dev->clk_gpu);
clk_disable_unprepare(dev->clk_bus);
}
+static int lima_clk_init(struct lima_device *dev)
+{
+ int err;
+
+ dev->clk_bus = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->clk_bus)) {
+ err = PTR_ERR(dev->clk_bus);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get bus clk failed %d\n", err);
+ dev->clk_bus = NULL;
+ return err;
+ }
+
+ dev->clk_gpu = devm_clk_get(dev->dev, "core");
+ if (IS_ERR(dev->clk_gpu)) {
+ err = PTR_ERR(dev->clk_gpu);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get core clk failed %d\n", err);
+ dev->clk_gpu = NULL;
+ return err;
+ }
+
+ dev->reset = devm_reset_control_array_get_optional_shared(dev->dev);
+ if (IS_ERR(dev->reset)) {
+ err = PTR_ERR(dev->reset);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev->dev, "get reset controller failed %d\n",
+ err);
+ dev->reset = NULL;
+ return err;
+ }
+
+ return lima_clk_enable(dev);
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+ lima_clk_disable(dev);
+}
+
+static int lima_regulator_enable(struct lima_device *dev)
+{
+ int ret;
+
+ if (!dev->regulator)
+ return 0;
+
+ ret = regulator_enable(dev->regulator);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lima_regulator_disable(struct lima_device *dev)
+{
+ if (dev->regulator)
+ regulator_disable(dev->regulator);
+}
+
static int lima_regulator_init(struct lima_device *dev)
{
int ret;
@@ -154,25 +196,20 @@ static int lima_regulator_init(struct lima_device *dev)
return ret;
}
- ret = regulator_enable(dev->regulator);
- if (ret < 0) {
- dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return lima_regulator_enable(dev);
}
static void lima_regulator_fini(struct lima_device *dev)
{
- if (dev->regulator)
- regulator_disable(dev->regulator);
+ lima_regulator_disable(dev);
}
static int lima_init_ip(struct lima_device *dev, int index)
{
+ struct platform_device *pdev = to_platform_device(dev->dev);
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = dev->ip + index;
+ const char *irq_name = desc->irq_name;
int offset = desc->offset[dev->id];
bool must = desc->must_have[dev->id];
int err;
@@ -183,8 +220,9 @@ static int lima_init_ip(struct lima_device *dev, int index)
ip->dev = dev;
ip->id = index;
ip->iomem = dev->iomem + offset;
- if (desc->irq_name) {
- err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+ if (irq_name) {
+ err = must ? platform_get_irq_byname(pdev, irq_name) :
+ platform_get_irq_byname_optional(pdev, irq_name);
if (err < 0)
goto out;
ip->irq = err;
@@ -209,11 +247,34 @@ static void lima_fini_ip(struct lima_device *ldev, int index)
desc->fini(ip);
}
+static int lima_resume_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+ int ret = 0;
+
+ if (ip->present)
+ ret = desc->resume(ip);
+
+ return ret;
+}
+
+static void lima_suspend_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+
+ if (ip->present)
+ desc->suspend(ip);
+}
+
static int lima_init_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
int err;
+ pipe->ldev = dev;
+
err = lima_sched_pipe_init(pipe, "gp");
if (err)
return err;
@@ -244,6 +305,8 @@ static int lima_init_pp_pipe(struct lima_device *dev)
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
int err, i;
+ pipe->ldev = dev;
+
err = lima_sched_pipe_init(pipe, "pp");
if (err)
return err;
@@ -290,8 +353,8 @@ static void lima_fini_pp_pipe(struct lima_device *dev)
int lima_device_init(struct lima_device *ldev)
{
+ struct platform_device *pdev = to_platform_device(ldev->dev);
int err, i;
- struct resource *res;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
@@ -322,8 +385,7 @@ int lima_device_init(struct lima_device *ldev)
} else
ldev->va_end = LIMA_VA_RESERVE_END;
- res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
- ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+ ldev->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ldev->iomem)) {
dev_err(ldev->dev, "fail to ioremap iomem\n");
err = PTR_ERR(ldev->iomem);
@@ -344,6 +406,12 @@ int lima_device_init(struct lima_device *ldev)
if (err)
goto err_out5;
+ ldev->dump.magic = LIMA_DUMP_MAGIC;
+ ldev->dump.version_major = LIMA_DUMP_MAJOR;
+ ldev->dump.version_minor = LIMA_DUMP_MINOR;
+ INIT_LIST_HEAD(&ldev->error_task_list);
+ mutex_init(&ldev->error_task_list_lock);
+
dev_info(ldev->dev, "bus rate = %lu\n", clk_get_rate(ldev->clk_bus));
dev_info(ldev->dev, "mod rate = %lu", clk_get_rate(ldev->clk_gpu));
@@ -370,6 +438,13 @@ err_out0:
void lima_device_fini(struct lima_device *ldev)
{
int i;
+ struct lima_sched_error_task *et, *tmp;
+
+ list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+ list_del(&et->list);
+ kvfree(et);
+ }
+ mutex_destroy(&ldev->error_task_list_lock);
lima_fini_pp_pipe(ldev);
lima_fini_gp_pipe(ldev);
@@ -387,3 +462,72 @@ void lima_device_fini(struct lima_device *ldev)
lima_clk_fini(ldev);
}
+
+int lima_device_resume(struct device *dev)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ int i, err;
+
+ err = lima_clk_enable(ldev);
+ if (err) {
+ dev_err(dev, "resume clk fail %d\n", err);
+ return err;
+ }
+
+ err = lima_regulator_enable(ldev);
+ if (err) {
+ dev_err(dev, "resume regulator fail %d\n", err);
+ goto err_out0;
+ }
+
+ for (i = 0; i < lima_ip_num; i++) {
+ err = lima_resume_ip(ldev, i);
+ if (err) {
+ dev_err(dev, "resume ip %d fail\n", i);
+ goto err_out1;
+ }
+ }
+
+ err = lima_devfreq_resume(&ldev->devfreq);
+ if (err) {
+ dev_err(dev, "devfreq resume fail\n");
+ goto err_out1;
+ }
+
+ return 0;
+
+err_out1:
+ while (--i >= 0)
+ lima_suspend_ip(ldev, i);
+ lima_regulator_disable(ldev);
+err_out0:
+ lima_clk_disable(ldev);
+ return err;
+}
+
+int lima_device_suspend(struct device *dev)
+{
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ int i, err;
+
+ /* check any task running */
+ for (i = 0; i < lima_pipe_num; i++) {
+ if (atomic_read(&ldev->pipe[i].base.hw_rq_count))
+ return -EBUSY;
+ }
+
+ err = lima_devfreq_suspend(&ldev->devfreq);
+ if (err) {
+ dev_err(dev, "devfreq suspend fail\n");
+ return err;
+ }
+
+ for (i = lima_ip_num - 1; i >= 0; i--)
+ lima_suspend_ip(ldev, i);
+
+ lima_regulator_disable(ldev);
+
+ lima_clk_disable(ldev);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
index 31158d86271c..41b9d7b4bcc7 100644
--- a/drivers/gpu/drm/lima/lima_device.h
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -6,8 +6,12 @@
#include <drm/drm_device.h>
#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include "lima_sched.h"
+#include "lima_dump.h"
+#include "lima_devfreq.h"
enum lima_gpu_id {
lima_gpu_mali400 = 0,
@@ -60,6 +64,8 @@ struct lima_ip {
bool async_reset;
/* l2 cache */
spinlock_t lock;
+ /* pmu/bcast */
+ u32 mask;
} data;
};
@@ -72,7 +78,6 @@ enum lima_pipe_id {
struct lima_device {
struct device *dev;
struct drm_device *ddev;
- struct platform_device *pdev;
enum lima_gpu_id id;
u32 gp_version;
@@ -94,6 +99,13 @@ struct lima_device {
u32 *dlbu_cpu;
dma_addr_t dlbu_dma;
+
+ struct lima_devfreq devfreq;
+
+ /* debug info */
+ struct lima_dump_head dump;
+ struct list_head error_task_list;
+ struct mutex error_task_list_lock;
};
static inline struct lima_device *
@@ -128,4 +140,7 @@ static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
return 0;
}
+int lima_device_suspend(struct device *dev);
+int lima_device_resume(struct device *dev);
+
#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
index 8399ceffb94b..c1d5ea35daa7 100644
--- a/drivers/gpu/drm/lima/lima_dlbu.c
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -42,7 +42,7 @@ void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
}
-int lima_dlbu_init(struct lima_ip *ip)
+static int lima_dlbu_hw_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -52,6 +52,21 @@ int lima_dlbu_init(struct lima_ip *ip)
return 0;
}
+int lima_dlbu_resume(struct lima_ip *ip)
+{
+ return lima_dlbu_hw_init(ip);
+}
+
+void lima_dlbu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+ return lima_dlbu_hw_init(ip);
+}
+
void lima_dlbu_fini(struct lima_ip *ip)
{
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
index 16f877984466..be71daaaee89 100644
--- a/drivers/gpu/drm/lima/lima_dlbu.h
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -12,6 +12,8 @@ void lima_dlbu_disable(struct lima_device *dev);
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+int lima_dlbu_resume(struct lima_ip *ip);
+void lima_dlbu_suspend(struct lima_ip *ip);
int lima_dlbu_init(struct lima_ip *ip);
void lima_dlbu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 2daac64d8955..a831565af813 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -5,17 +5,20 @@
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include <drm/lima_drm.h>
+#include "lima_device.h"
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
uint lima_heap_init_nr_pages = 8;
+uint lima_max_error_tasks;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
@@ -23,6 +26,9 @@ module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+MODULE_PARM_DESC(max_error_tasks, "max number of error tasks to save");
+module_param_named(max_error_tasks, lima_max_error_tasks, uint, 0644);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -272,6 +278,93 @@ static struct drm_driver lima_drm_driver = {
.gem_prime_mmap = drm_gem_prime_mmap,
};
+struct lima_block_reader {
+ void *dst;
+ size_t base;
+ size_t count;
+ size_t off;
+ ssize_t read;
+};
+
+static bool lima_read_block(struct lima_block_reader *reader,
+ void *src, size_t src_size)
+{
+ size_t max_off = reader->base + src_size;
+
+ if (reader->off < max_off) {
+ size_t size = min_t(size_t, max_off - reader->off,
+ reader->count);
+
+ memcpy(reader->dst, src + (reader->off - reader->base), size);
+
+ reader->dst += size;
+ reader->off += size;
+ reader->read += size;
+ reader->count -= size;
+ }
+
+ reader->base = max_off;
+
+ return !!reader->count;
+}
+
+static ssize_t lima_error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_sched_error_task *et;
+ struct lima_block_reader reader = {
+ .dst = buf,
+ .count = count,
+ .off = off,
+ };
+
+ mutex_lock(&ldev->error_task_list_lock);
+
+ if (lima_read_block(&reader, &ldev->dump, sizeof(ldev->dump))) {
+ list_for_each_entry(et, &ldev->error_task_list, list) {
+ if (!lima_read_block(&reader, et->data, et->size))
+ break;
+ }
+ }
+
+ mutex_unlock(&ldev->error_task_list_lock);
+ return reader.read;
+}
+
+static ssize_t lima_error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct lima_device *ldev = dev_get_drvdata(dev);
+ struct lima_sched_error_task *et, *tmp;
+
+ mutex_lock(&ldev->error_task_list_lock);
+
+ list_for_each_entry_safe(et, tmp, &ldev->error_task_list, list) {
+ list_del(&et->list);
+ kvfree(et);
+ }
+
+ ldev->dump.size = 0;
+ ldev->dump.num_tasks = 0;
+
+ mutex_unlock(&ldev->error_task_list_lock);
+
+ return count;
+}
+
+static const struct bin_attribute lima_error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = 0600,
+ .size = 0,
+ .read = lima_error_state_read,
+ .write = lima_error_state_write,
+};
+
static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
@@ -288,7 +381,6 @@ static int lima_pdev_probe(struct platform_device *pdev)
goto err_out0;
}
- ldev->pdev = pdev;
ldev->dev = &pdev->dev;
ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
@@ -306,16 +398,34 @@ static int lima_pdev_probe(struct platform_device *pdev)
if (err)
goto err_out1;
+ err = lima_devfreq_init(ldev);
+ if (err) {
+ dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+ goto err_out2;
+ }
+
+ pm_runtime_set_active(ldev->dev);
+ pm_runtime_mark_last_busy(ldev->dev);
+ pm_runtime_set_autosuspend_delay(ldev->dev, 200);
+ pm_runtime_use_autosuspend(ldev->dev);
+ pm_runtime_enable(ldev->dev);
+
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
- goto err_out2;
+ goto err_out3;
+
+ if (sysfs_create_bin_file(&ldev->dev->kobj, &lima_error_state_attr))
+ dev_warn(ldev->dev, "fail to create error state sysfs\n");
return 0;
+err_out3:
+ pm_runtime_disable(ldev->dev);
+ lima_devfreq_fini(ldev);
err_out2:
lima_device_fini(ldev);
err_out1:
@@ -330,8 +440,17 @@ static int lima_pdev_remove(struct platform_device *pdev)
struct lima_device *ldev = platform_get_drvdata(pdev);
struct drm_device *ddev = ldev->ddev;
+ sysfs_remove_bin_file(&ldev->dev->kobj, &lima_error_state_attr);
+
drm_dev_unregister(ddev);
+
+ /* stop autosuspend to make sure device is in active state */
+ pm_runtime_set_autosuspend_delay(ldev->dev, -1);
+ pm_runtime_disable(ldev->dev);
+
+ lima_devfreq_fini(ldev);
lima_device_fini(ldev);
+
drm_dev_put(ddev);
lima_sched_slab_fini();
return 0;
@@ -344,26 +463,22 @@ static const struct of_device_id dt_match[] = {
};
MODULE_DEVICE_TABLE(of, dt_match);
+static const struct dev_pm_ops lima_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(lima_device_suspend, lima_device_resume, NULL)
+};
+
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
.remove = lima_pdev_remove,
.driver = {
.name = "lima",
+ .pm = &lima_pm_ops,
.of_match_table = dt_match,
},
};
-static int __init lima_init(void)
-{
- return platform_driver_register(&lima_platform_driver);
-}
-module_init(lima_init);
-
-static void __exit lima_exit(void)
-{
- platform_driver_unregister(&lima_platform_driver);
-}
-module_exit(lima_exit);
+module_platform_driver(lima_platform_driver);
MODULE_AUTHOR("Lima Project Developers");
MODULE_DESCRIPTION("Lima DRM Driver");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index f492ecc6a5d9..fdbd4077c768 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -10,6 +10,7 @@
extern int lima_sched_timeout_ms;
extern uint lima_heap_init_nr_pages;
+extern uint lima_max_error_tasks;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_dump.h b/drivers/gpu/drm/lima/lima_dump.h
new file mode 100644
index 000000000000..ca243d99c51b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dump.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DUMP_H__
+#define __LIMA_DUMP_H__
+
+#include <linux/types.h>
+
+/**
+ * dump file format for all the information to start a lima task
+ *
+ * top level format
+ * | magic code "LIMA" | format version | num tasks | data size |
+ * | reserved | reserved | reserved | reserved |
+ * | task 1 ID | task 1 size | num chunks | reserved | task 1 data |
+ * | task 2 ID | task 2 size | num chunks | reserved | task 2 data |
+ * ...
+ *
+ * task data format
+ * | chunk 1 ID | chunk 1 size | reserved | reserved | chunk 1 data |
+ * | chunk 2 ID | chunk 2 size | reserved | reserved | chunk 2 data |
+ * ...
+ *
+ */
+
+#define LIMA_DUMP_MAJOR 1
+#define LIMA_DUMP_MINOR 0
+
+#define LIMA_DUMP_MAGIC 0x414d494c
+
+struct lima_dump_head {
+ __u32 magic;
+ __u16 version_major;
+ __u16 version_minor;
+ __u32 num_tasks;
+ __u32 size;
+ __u32 reserved[4];
+};
+
+#define LIMA_DUMP_TASK_GP 0
+#define LIMA_DUMP_TASK_PP 1
+#define LIMA_DUMP_TASK_NUM 2
+
+struct lima_dump_task {
+ __u32 id;
+ __u32 size;
+ __u32 num_chunks;
+ __u32 reserved;
+};
+
+#define LIMA_DUMP_CHUNK_FRAME 0
+#define LIMA_DUMP_CHUNK_BUFFER 1
+#define LIMA_DUMP_CHUNK_PROCESS_NAME 2
+#define LIMA_DUMP_CHUNK_PROCESS_ID 3
+#define LIMA_DUMP_CHUNK_NUM 4
+
+struct lima_dump_chunk {
+ __u32 id;
+ __u32 size;
+ __u32 reserved[2];
+};
+
+struct lima_dump_chunk_buffer {
+ __u32 id;
+ __u32 size;
+ __u32 va;
+ __u32 reserved;
+};
+
+struct lima_dump_chunk_pid {
+ __u32 id;
+ __u32 size;
+ __u32 pid;
+ __u32 reserved;
+};
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 5404e0d668db..155f2b4b4030 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -134,7 +134,7 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
out:
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return err;
}
@@ -243,7 +243,7 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
*offset = drm_vma_node_offset_addr(&obj->vma_node);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return 0;
}
@@ -323,7 +323,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
*/
err = lima_vm_bo_add(vm, bo, false);
if (err) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
goto err_out0;
}
@@ -368,7 +368,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
- drm_gem_object_put_unlocked(&bos[i]->base.base);
+ drm_gem_object_put(&bos[i]->base.base);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
@@ -389,7 +389,7 @@ err_out0:
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
- drm_gem_object_put_unlocked(&bos[i]->base.base);
+ drm_gem_object_put(&bos[i]->base.base);
}
if (out_sync)
drm_syncobj_put(out_sync);
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index d8841c870d90..8dd501b7a3d0 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -274,6 +274,23 @@ static void lima_gp_print_version(struct lima_ip *ip)
static struct kmem_cache *lima_gp_task_slab;
static int lima_gp_task_slab_refcnt;
+static int lima_gp_hw_init(struct lima_ip *ip)
+{
+ ip->data.async_reset = false;
+ lima_gp_soft_reset_async(ip);
+ return lima_gp_soft_reset_async_wait(ip);
+}
+
+int lima_gp_resume(struct lima_ip *ip)
+{
+ return lima_gp_hw_init(ip);
+}
+
+void lima_gp_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_gp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -281,9 +298,7 @@ int lima_gp_init(struct lima_ip *ip)
lima_gp_print_version(ip);
- ip->data.async_reset = false;
- lima_gp_soft_reset_async(ip);
- err = lima_gp_soft_reset_async_wait(ip);
+ err = lima_gp_hw_init(ip);
if (err)
return err;
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
index 516e5c1babbb..02ec9af78a51 100644
--- a/drivers/gpu/drm/lima/lima_gp.h
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -7,6 +7,8 @@
struct lima_ip;
struct lima_device;
+int lima_gp_resume(struct lima_ip *ip);
+void lima_gp_suspend(struct lima_ip *ip);
int lima_gp_init(struct lima_ip *ip);
void lima_gp_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
index 6873a7af5a5c..c4080a02957b 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.c
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -38,9 +38,35 @@ int lima_l2_cache_flush(struct lima_ip *ip)
return ret;
}
+static int lima_l2_cache_hw_init(struct lima_ip *ip)
+{
+ int err;
+
+ err = lima_l2_cache_flush(ip);
+ if (err)
+ return err;
+
+ l2_cache_write(LIMA_L2_CACHE_ENABLE,
+ LIMA_L2_CACHE_ENABLE_ACCESS |
+ LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+ l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+ return 0;
+}
+
+int lima_l2_cache_resume(struct lima_ip *ip)
+{
+ return lima_l2_cache_hw_init(ip);
+}
+
+void lima_l2_cache_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_l2_cache_init(struct lima_ip *ip)
{
- int i, err;
+ int i;
u32 size;
struct lima_device *dev = ip->dev;
@@ -63,15 +89,7 @@ int lima_l2_cache_init(struct lima_ip *ip)
1 << (size & 0xff),
1 << ((size >> 24) & 0xff));
- err = lima_l2_cache_flush(ip);
- if (err)
- return err;
-
- l2_cache_write(LIMA_L2_CACHE_ENABLE,
- LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
- l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
-
- return 0;
+ return lima_l2_cache_hw_init(ip);
}
void lima_l2_cache_fini(struct lima_ip *ip)
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
index c63fb676ff14..1aeeefd53fb9 100644
--- a/drivers/gpu/drm/lima/lima_l2_cache.h
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_l2_cache_resume(struct lima_ip *ip);
+void lima_l2_cache_suspend(struct lima_ip *ip);
int lima_l2_cache_init(struct lima_ip *ip);
void lima_l2_cache_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index f79d2af427e7..a1ae6c252dc2 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -59,12 +59,44 @@ static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-int lima_mmu_init(struct lima_ip *ip)
+static int lima_mmu_hw_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+ err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ if (err)
+ return err;
+
+ mmu_write(LIMA_MMU_INT_MASK,
+ LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+int lima_mmu_resume(struct lima_ip *ip)
+{
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return 0;
+
+ return lima_mmu_hw_init(ip);
+}
+
+void lima_mmu_suspend(struct lima_ip *ip)
+{
+
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
if (ip->id == lima_ip_ppmmu_bcast)
return 0;
@@ -74,12 +106,6 @@ int lima_mmu_init(struct lima_ip *ip)
return -EIO;
}
- mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
- err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
- LIMA_MMU_DTE_ADDR, v, v == 0);
- if (err)
- return err;
-
err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
@@ -87,11 +113,7 @@ int lima_mmu_init(struct lima_ip *ip)
return err;
}
- mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
- mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
- return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
- LIMA_MMU_STATUS, v,
- v & LIMA_MMU_STATUS_PAGING_ENABLED);
+ return lima_mmu_hw_init(ip);
}
void lima_mmu_fini(struct lima_ip *ip)
@@ -113,8 +135,7 @@ void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
LIMA_MMU_STATUS, v,
v & LIMA_MMU_STATUS_STALL_ACTIVE);
- if (vm)
- mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+ mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
/* flush the TLB */
mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 4f8ccbebcba1..f0c97ac75ea0 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -7,6 +7,8 @@
struct lima_ip;
struct lima_vm;
+int lima_mmu_resume(struct lima_ip *ip);
+void lima_mmu_suspend(struct lima_ip *ip);
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
index 571f6d661581..e397e1146e96 100644
--- a/drivers/gpu/drm/lima/lima_pmu.c
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -21,7 +21,7 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
v, v & LIMA_PMU_INT_CMD_MASK,
100, 100000);
if (err) {
- dev_err(dev->dev, "timeout wait pmd cmd\n");
+ dev_err(dev->dev, "timeout wait pmu cmd\n");
return err;
}
@@ -29,7 +29,41 @@ static int lima_pmu_wait_cmd(struct lima_ip *ip)
return 0;
}
-int lima_pmu_init(struct lima_ip *ip)
+static u32 lima_pmu_get_ip_mask(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ u32 ret = 0;
+ int i;
+
+ ret |= LIMA_PMU_POWER_GP0_MASK;
+
+ if (dev->id == lima_gpu_mali400) {
+ ret |= LIMA_PMU_POWER_L2_MASK;
+ for (i = 0; i < 4; i++) {
+ if (dev->ip[lima_ip_pp0 + i].present)
+ ret |= LIMA_PMU_POWER_PP_MASK(i);
+ }
+ } else {
+ if (dev->ip[lima_ip_pp0].present)
+ ret |= LIMA450_PMU_POWER_PP0_MASK;
+ for (i = lima_ip_pp1; i <= lima_ip_pp3; i++) {
+ if (dev->ip[i].present) {
+ ret |= LIMA450_PMU_POWER_PP13_MASK;
+ break;
+ }
+ }
+ for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+ if (dev->ip[i].present) {
+ ret |= LIMA450_PMU_POWER_PP47_MASK;
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int lima_pmu_hw_init(struct lima_ip *ip)
{
int err;
u32 stat;
@@ -54,7 +88,44 @@ int lima_pmu_init(struct lima_ip *ip)
return 0;
}
-void lima_pmu_fini(struct lima_ip *ip)
+static void lima_pmu_hw_fini(struct lima_ip *ip)
{
+ u32 stat;
+
+ if (!ip->data.mask)
+ ip->data.mask = lima_pmu_get_ip_mask(ip);
+ stat = ~pmu_read(LIMA_PMU_STATUS) & ip->data.mask;
+ if (stat) {
+ pmu_write(LIMA_PMU_POWER_DOWN, stat);
+
+ /* Don't wait for interrupt on Mali400 if all domains are
+ * powered off because the HW won't generate an interrupt
+ * in this case.
+ */
+ if (ip->dev->id == lima_gpu_mali400)
+ pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+ else
+ lima_pmu_wait_cmd(ip);
+ }
+}
+
+int lima_pmu_resume(struct lima_ip *ip)
+{
+ return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_suspend(struct lima_ip *ip)
+{
+ lima_pmu_hw_fini(ip);
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+ return lima_pmu_hw_init(ip);
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+ lima_pmu_hw_fini(ip);
}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
index a2a18775eb07..652dc7af3047 100644
--- a/drivers/gpu/drm/lima/lima_pmu.h
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -6,6 +6,8 @@
struct lima_ip;
+int lima_pmu_resume(struct lima_ip *ip);
+void lima_pmu_suspend(struct lima_ip *ip);
int lima_pmu_init(struct lima_ip *ip);
void lima_pmu_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index 8fef224b93c8..33f01383409c 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -223,6 +223,23 @@ static void lima_pp_print_version(struct lima_ip *ip)
lima_ip_name(ip), name, major, minor);
}
+static int lima_pp_hw_init(struct lima_ip *ip)
+{
+ ip->data.async_reset = false;
+ lima_pp_soft_reset_async(ip);
+ return lima_pp_soft_reset_async_wait(ip);
+}
+
+int lima_pp_resume(struct lima_ip *ip)
+{
+ return lima_pp_hw_init(ip);
+}
+
+void lima_pp_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_pp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
@@ -230,9 +247,7 @@ int lima_pp_init(struct lima_ip *ip)
lima_pp_print_version(ip);
- ip->data.async_reset = false;
- lima_pp_soft_reset_async(ip);
- err = lima_pp_soft_reset_async_wait(ip);
+ err = lima_pp_hw_init(ip);
if (err)
return err;
@@ -254,6 +269,16 @@ void lima_pp_fini(struct lima_ip *ip)
}
+int lima_pp_bcast_resume(struct lima_ip *ip)
+{
+ return 0;
+}
+
+void lima_pp_bcast_suspend(struct lima_ip *ip)
+{
+
+}
+
int lima_pp_bcast_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
index bf60c77b2633..16ec96de15a9 100644
--- a/drivers/gpu/drm/lima/lima_pp.h
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -7,9 +7,13 @@
struct lima_ip;
struct lima_device;
+int lima_pp_resume(struct lima_ip *ip);
+void lima_pp_suspend(struct lima_ip *ip);
int lima_pp_init(struct lima_ip *ip);
void lima_pp_fini(struct lima_ip *ip);
+int lima_pp_bcast_resume(struct lima_ip *ip);
+void lima_pp_bcast_suspend(struct lima_ip *ip);
int lima_pp_bcast_init(struct lima_ip *ip);
void lima_pp_bcast_fini(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index 3886999b4533..64ced6d0e6cf 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -3,14 +3,17 @@
#include <linux/kthread.h>
#include <linux/slab.h>
-#include <linux/xarray.h>
+#include <linux/vmalloc.h>
+#include <linux/pm_runtime.h>
+#include "lima_devfreq.h"
#include "lima_drv.h"
#include "lima_sched.h"
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
#include "lima_gem.h"
+#include "lima_trace.h"
struct lima_fence {
struct dma_fence base;
@@ -148,7 +151,7 @@ void lima_sched_task_fini(struct lima_sched_task *task)
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
- drm_gem_object_put_unlocked(&task->bos[i]->base.base);
+ drm_gem_object_put(&task->bos[i]->base.base);
kfree(task->bos);
}
@@ -176,6 +179,7 @@ struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *conte
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
+ trace_lima_task_submit(task);
drm_sched_entity_push_job(&task->base, &context->base);
return fence;
}
@@ -191,14 +195,36 @@ static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
return NULL;
}
+static int lima_pm_busy(struct lima_device *ldev)
+{
+ int ret;
+
+ /* resume GPU if it has been suspended by runtime PM */
+ ret = pm_runtime_get_sync(ldev->dev);
+ if (ret < 0)
+ return ret;
+
+ lima_devfreq_record_busy(&ldev->devfreq);
+ return 0;
+}
+
+static void lima_pm_idle(struct lima_device *ldev)
+{
+ lima_devfreq_record_idle(&ldev->devfreq);
+
+ /* GPU can do auto runtime suspend */
+ pm_runtime_mark_last_busy(ldev->dev);
+ pm_runtime_put_autosuspend(ldev->dev);
+}
+
static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_device *ldev = pipe->ldev;
struct lima_fence *fence;
struct dma_fence *ret;
- struct lima_vm *vm = NULL, *last_vm = NULL;
- int i;
+ int i, err;
/* after GPU reset */
if (job->s_fence->finished.error < 0)
@@ -207,6 +233,13 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
fence = lima_fence_create(pipe);
if (!fence)
return NULL;
+
+ err = lima_pm_busy(ldev);
+ if (err < 0) {
+ dma_fence_put(&fence->base);
+ return NULL;
+ }
+
task->fence = &fence->base;
/* for caller usage of the fence, otherwise irq handler
@@ -234,21 +267,17 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
for (i = 0; i < pipe->num_l2_cache; i++)
lima_l2_cache_flush(pipe->l2_cache[i]);
- if (task->vm != pipe->current_vm) {
- vm = lima_vm_get(task->vm);
- last_vm = pipe->current_vm;
- pipe->current_vm = task->vm;
- }
+ lima_vm_put(pipe->current_vm);
+ pipe->current_vm = lima_vm_get(task->vm);
if (pipe->bcast_mmu)
- lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+ lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
else {
for (i = 0; i < pipe->num_mmu; i++)
- lima_mmu_switch_vm(pipe->mmu[i], vm);
+ lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
}
- if (last_vm)
- lima_vm_put(last_vm);
+ trace_lima_task_run(task);
pipe->error = false;
pipe->task_run(pipe, task);
@@ -256,10 +285,139 @@ static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
return task->fence;
}
+static void lima_sched_build_error_task_list(struct lima_sched_task *task)
+{
+ struct lima_sched_error_task *et;
+ struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
+ struct lima_ip *ip = pipe->processor[0];
+ int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_context *sched_ctx =
+ container_of(task->base.entity,
+ struct lima_sched_context, base);
+ struct lima_ctx *ctx =
+ container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
+ struct lima_dump_task *dt;
+ struct lima_dump_chunk *chunk;
+ struct lima_dump_chunk_pid *pid_chunk;
+ struct lima_dump_chunk_buffer *buffer_chunk;
+ u32 size, task_size, mem_size;
+ int i;
+
+ mutex_lock(&dev->error_task_list_lock);
+
+ if (dev->dump.num_tasks >= lima_max_error_tasks) {
+ dev_info(dev->dev, "fail to save task state from %s pid %d: "
+ "error task list is full\n", ctx->pname, ctx->pid);
+ goto out;
+ }
+
+ /* frame chunk */
+ size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
+ /* process name chunk */
+ size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
+ /* pid chunk */
+ size += sizeof(struct lima_dump_chunk);
+ /* buffer chunks */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ size += sizeof(struct lima_dump_chunk);
+ size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
+ }
+
+ task_size = size + sizeof(struct lima_dump_task);
+ mem_size = task_size + sizeof(*et);
+ et = kvmalloc(mem_size, GFP_KERNEL);
+ if (!et) {
+ dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
+ mem_size);
+ goto out;
+ }
+
+ et->data = et + 1;
+ et->size = task_size;
+
+ dt = et->data;
+ memset(dt, 0, sizeof(*dt));
+ dt->id = pipe_id;
+ dt->size = size;
+
+ chunk = (struct lima_dump_chunk *)(dt + 1);
+ memset(chunk, 0, sizeof(*chunk));
+ chunk->id = LIMA_DUMP_CHUNK_FRAME;
+ chunk->size = pipe->frame_size;
+ memcpy(chunk + 1, task->frame, pipe->frame_size);
+ dt->num_chunks++;
+
+ chunk = (void *)(chunk + 1) + chunk->size;
+ memset(chunk, 0, sizeof(*chunk));
+ chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
+ chunk->size = sizeof(ctx->pname);
+ memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
+ dt->num_chunks++;
+
+ pid_chunk = (void *)(chunk + 1) + chunk->size;
+ memset(pid_chunk, 0, sizeof(*pid_chunk));
+ pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
+ pid_chunk->pid = ctx->pid;
+ dt->num_chunks++;
+
+ buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+ void *data;
+
+ memset(buffer_chunk, 0, sizeof(*buffer_chunk));
+ buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
+ buffer_chunk->va = lima_vm_get_va(task->vm, bo);
+
+ if (bo->heap_size) {
+ buffer_chunk->size = bo->heap_size;
+
+ data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ if (!data) {
+ kvfree(et);
+ goto out;
+ }
+
+ memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+ vunmap(data);
+ } else {
+ buffer_chunk->size = lima_bo_size(bo);
+
+ data = drm_gem_shmem_vmap(&bo->base.base);
+ if (IS_ERR_OR_NULL(data)) {
+ kvfree(et);
+ goto out;
+ }
+
+ memcpy(buffer_chunk + 1, data, buffer_chunk->size);
+
+ drm_gem_shmem_vunmap(&bo->base.base, data);
+ }
+
+ buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
+ dt->num_chunks++;
+ }
+
+ list_add(&et->list, &dev->error_task_list);
+ dev->dump.size += et->size;
+ dev->dump.num_tasks++;
+
+ dev_info(dev->dev, "save error task state success\n");
+
+out:
+ mutex_unlock(&dev->error_task_list_lock);
+}
+
static void lima_sched_timedout_job(struct drm_sched_job *job)
{
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
+ struct lima_device *ldev = pipe->ldev;
if (!pipe->error)
DRM_ERROR("lima job timeout\n");
@@ -268,6 +426,8 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
drm_sched_increase_karma(&task->base);
+ lima_sched_build_error_task_list(task);
+
pipe->task_error(pipe);
if (pipe->bcast_mmu)
@@ -279,12 +439,12 @@ static void lima_sched_timedout_job(struct drm_sched_job *job)
lima_mmu_page_fault_resume(pipe->mmu[i]);
}
- if (pipe->current_vm)
- lima_vm_put(pipe->current_vm);
-
+ lima_vm_put(pipe->current_vm);
pipe->current_vm = NULL;
pipe->current_task = NULL;
+ lima_pm_idle(ldev);
+
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
}
@@ -355,6 +515,7 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
struct lima_sched_task *task = pipe->current_task;
+ struct lima_device *ldev = pipe->ldev;
if (pipe->error) {
if (task && task->recoverable)
@@ -364,5 +525,7 @@ void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
} else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
+
+ lima_pm_idle(ldev);
}
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index d64393fb50a9..90f03c48ef4a 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -5,9 +5,18 @@
#define __LIMA_SCHED_H__
#include <drm/gpu_scheduler.h>
+#include <linux/list.h>
+#include <linux/xarray.h>
+struct lima_device;
struct lima_vm;
+struct lima_sched_error_task {
+ struct list_head list;
+ void *data;
+ u32 size;
+};
+
struct lima_sched_task {
struct drm_sched_job base;
@@ -44,6 +53,8 @@ struct lima_sched_pipe {
u32 fence_seqno;
spinlock_t fence_lock;
+ struct lima_device *ldev;
+
struct lima_sched_task *current_task;
struct lima_vm *current_vm;
diff --git a/drivers/gpu/drm/lima/lima_trace.c b/drivers/gpu/drm/lima/lima_trace.c
new file mode 100644
index 000000000000..ea1c7289bebc
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_trace.c
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#include "lima_sched.h"
+
+#define CREATE_TRACE_POINTS
+#include "lima_trace.h"
diff --git a/drivers/gpu/drm/lima/lima_trace.h b/drivers/gpu/drm/lima/lima_trace.h
new file mode 100644
index 000000000000..3a430e93d384
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_trace.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2020 Qiang Yu <yuq825@gmail.com> */
+
+#if !defined(_LIMA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _LIMA_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lima
+#define TRACE_INCLUDE_FILE lima_trace
+
+DECLARE_EVENT_CLASS(lima_task,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task),
+ TP_STRUCT__entry(
+ __field(uint64_t, task_id)
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ __string(pipe, task->base.sched->name)
+ ),
+
+ TP_fast_assign(
+ __entry->task_id = task->base.id;
+ __entry->context = task->base.s_fence->finished.context;
+ __entry->seqno = task->base.s_fence->finished.seqno;
+ __assign_str(pipe, task->base.sched->name)
+ ),
+
+ TP_printk("task=%llu, context=%u seqno=%u pipe=%s",
+ __entry->task_id, __entry->context, __entry->seqno,
+ __get_str(pipe))
+);
+
+DEFINE_EVENT(lima_task, lima_task_submit,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task)
+);
+
+DEFINE_EVENT(lima_task, lima_task_run,
+ TP_PROTO(struct lima_sched_task *task),
+ TP_ARGS(task)
+);
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/lima
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index 22aeec77d84d..3a7c74822d8b 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -54,7 +54,8 @@ static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
static inline void lima_vm_put(struct lima_vm *vm)
{
- kref_put(&vm->refcount, lima_vm_release);
+ if (vm)
+ kref_put(&vm->refcount, lima_vm_release);
}
void lima_vm_print(struct lima_vm *vm);
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index e59907e68854..04e1d38d41f7 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -948,7 +948,7 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event;
drm_crtc_vblank_off(crtc);
@@ -1020,7 +1020,7 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct drm_pending_vblank_event *event = crtc->state->event;
struct drm_plane *plane = &pipe->plane;
struct drm_plane_state *pstate = plane->state;
@@ -1078,7 +1078,7 @@ static int mcde_display_enable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
u32 val;
/* Enable all VBLANK IRQs */
@@ -1097,7 +1097,7 @@ static void mcde_display_disable_vblank(struct drm_simple_display_pipe *pipe)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
/* Disable all VBLANK IRQs */
writel(0, mcde->regs + MCDE_IMSCPP);
@@ -1117,7 +1117,7 @@ static struct drm_simple_display_pipe_funcs mcde_display_funcs = {
int mcde_display_init(struct drm_device *drm)
{
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
int ret;
static const u32 formats[] = {
DRM_FORMAT_ARGB8888,
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index 80edd6628979..679c2c4e6d9d 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -34,6 +34,8 @@ struct mcde {
struct regulator *vana;
};
+#define to_mcde(dev) container_of(dev, struct mcde, drm)
+
bool mcde_dsi_irq(struct mipi_dsi_device *mdsi);
void mcde_dsi_te_request(struct mipi_dsi_device *mdsi);
extern struct platform_driver mcde_dsi_driver;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index f28cb7a576ba..d300be5ee463 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -72,6 +72,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_panel.h>
@@ -163,7 +164,7 @@ static irqreturn_t mcde_irq(int irq, void *data)
static int mcde_modeset_init(struct drm_device *drm)
{
struct drm_mode_config *mode_config;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
int ret;
if (!mcde->bridge) {
@@ -183,13 +184,13 @@ static int mcde_modeset_init(struct drm_device *drm)
ret = drm_vblank_init(drm, 1);
if (ret) {
dev_err(drm->dev, "failed to init vblank\n");
- goto out_config;
+ return ret;
}
ret = mcde_display_init(drm);
if (ret) {
dev_err(drm->dev, "failed to init display\n");
- goto out_config;
+ return ret;
}
/*
@@ -203,7 +204,7 @@ static int mcde_modeset_init(struct drm_device *drm)
mcde->bridge);
if (ret) {
dev_err(drm->dev, "failed to attach display output bridge\n");
- goto out_config;
+ return ret;
}
drm_mode_config_reset(drm);
@@ -211,19 +212,6 @@ static int mcde_modeset_init(struct drm_device *drm)
drm_fbdev_generic_setup(drm, 32);
return 0;
-
-out_config:
- drm_mode_config_cleanup(drm);
- return ret;
-}
-
-static void mcde_release(struct drm_device *drm)
-{
- struct mcde *mcde = drm->dev_private;
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(mcde);
}
DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
@@ -231,7 +219,6 @@ DEFINE_DRM_GEM_CMA_FOPS(drm_fops);
static struct drm_driver mcde_drm_driver = {
.driver_features =
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .release = mcde_release,
.lastclose = drm_fb_helper_lastclose,
.ioctls = NULL,
.fops = &drm_fops,
@@ -241,17 +228,7 @@ static struct drm_driver mcde_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- .dumb_create = drm_gem_cma_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
};
static int mcde_drm_bind(struct device *dev)
@@ -259,7 +236,9 @@ static int mcde_drm_bind(struct device *dev)
struct drm_device *drm = dev_get_drvdata(dev);
int ret;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
ret = component_bind_all(drm->dev, drm);
if (ret) {
@@ -318,35 +297,27 @@ static int mcde_probe(struct platform_device *pdev)
int ret;
int i;
- mcde = kzalloc(sizeof(*mcde), GFP_KERNEL);
- if (!mcde)
- return -ENOMEM;
- mcde->dev = dev;
-
- ret = drm_dev_init(&mcde->drm, &mcde_drm_driver, dev);
- if (ret) {
- kfree(mcde);
- return ret;
- }
+ mcde = devm_drm_dev_alloc(dev, &mcde_drm_driver, struct mcde, drm);
+ if (IS_ERR(mcde))
+ return PTR_ERR(mcde);
drm = &mcde->drm;
- drm->dev_private = mcde;
+ mcde->dev = dev;
platform_set_drvdata(pdev, drm);
/* Enable continuous updates: this is what Linux' framebuffer expects */
mcde->oneshot_mode = false;
- drm->dev_private = mcde;
/* First obtain and turn on the main power */
mcde->epod = devm_regulator_get(dev, "epod");
if (IS_ERR(mcde->epod)) {
ret = PTR_ERR(mcde->epod);
dev_err(dev, "can't get EPOD regulator\n");
- goto dev_unref;
+ return ret;
}
ret = regulator_enable(mcde->epod);
if (ret) {
dev_err(dev, "can't enable EPOD regulator\n");
- goto dev_unref;
+ return ret;
}
mcde->vana = devm_regulator_get(dev, "vana");
if (IS_ERR(mcde->vana)) {
@@ -497,8 +468,6 @@ regulator_off:
regulator_disable(mcde->vana);
regulator_epod_off:
regulator_disable(mcde->epod);
-dev_unref:
- drm_dev_put(drm);
return ret;
}
@@ -506,13 +475,12 @@ dev_unref:
static int mcde_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
component_master_del(&pdev->dev, &mcde_drm_comp_ops);
clk_disable_unprepare(mcde->mcde_clk);
regulator_disable(mcde->vana);
regulator_disable(mcde->epod);
- drm_dev_put(drm);
return 0;
}
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 7af5ebb0c436..981923caa7e6 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -537,8 +537,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
* porches and sync.
*/
/* (ps/s) / (pixels/s) = ps/pixels */
- pclk = DIV_ROUND_UP_ULL(1000000000000,
- (mode->vrefresh * mode->htotal * mode->vtotal));
+ pclk = DIV_ROUND_UP_ULL(1000000000000, mode->clock);
dev_dbg(d->dev, "picoseconds between two pixels: %llu\n",
pclk);
@@ -568,7 +567,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
bpl *= d->mdsi->lanes;
dev_dbg(d->dev,
"calculated bytes per line: %llu @ %d Hz with HS %lu Hz\n",
- bpl, mode->vrefresh, d->mdsi->hs_rate);
+ bpl, drm_mode_vrefresh(mode), d->mdsi->hs_rate);
/*
* 6 is header + checksum, header = 4 bytes, checksum = 2 bytes
@@ -644,7 +643,7 @@ static void mcde_dsi_setup_video_mode(struct mcde_dsi *d,
dev_err(d->dev, "video block does not fit on line!\n");
dev_err(d->dev,
"calculated bytes per line: %llu @ %d Hz\n",
- bpl, mode->vrefresh);
+ bpl, drm_mode_vrefresh(mode));
dev_err(d->dev,
"bytes per line (blkline_pck) %u bytes\n",
blkline_pck);
@@ -1020,7 +1019,7 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
struct mcde_dsi *d = dev_get_drvdata(dev);
struct device_node *child;
struct drm_panel *panel = NULL;
@@ -1073,10 +1072,9 @@ static int mcde_dsi_bind(struct device *dev, struct device *master,
panel = NULL;
bridge = of_drm_find_bridge(child);
- if (IS_ERR(bridge)) {
- dev_err(dev, "failed to find bridge (%ld)\n",
- PTR_ERR(bridge));
- return PTR_ERR(bridge);
+ if (!bridge) {
+ dev_err(dev, "failed to find bridge\n");
+ return -EINVAL;
}
}
}
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index fa5ffc4fe823..c420f5a3d33b 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -11,6 +11,7 @@ config DRM_MEDIATEK
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
+ select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c
index 6fb0d6983a4a..3ae9c810845b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_color.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c
@@ -119,7 +119,10 @@ static int mtk_disp_color_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_color_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 891d80c73e04..28651bc579bc 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -386,7 +386,10 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_ovl_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0cb848d64206..e04319fedf46 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -294,7 +294,10 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id,
&mtk_disp_rdma_funcs);
if (ret) {
- dev_err(dev, "Failed to initialize component: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to initialize component: %d\n",
+ ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 4f0ce4cd5b8c..d4f0fb7ad312 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -10,7 +10,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_gpio.h>
#include <linux/of_graph.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/types.h>
@@ -20,6 +22,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mtk_dpi_regs.h"
#include "mtk_drm_ddp_comp.h"
@@ -74,6 +77,9 @@ struct mtk_dpi {
enum mtk_dpi_out_yc_map yc_map;
enum mtk_dpi_out_bit_num bit_num;
enum mtk_dpi_out_channel_swap channel_swap;
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *pins_gpio;
+ struct pinctrl_state *pins_dpi;
int refcount;
};
@@ -379,6 +385,9 @@ static void mtk_dpi_power_off(struct mtk_dpi *dpi)
if (--dpi->refcount != 0)
return;
+ if (dpi->pinctrl && dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
mtk_dpi_disable(dpi);
clk_disable_unprepare(dpi->pixel_clk);
clk_disable_unprepare(dpi->engine_clk);
@@ -403,6 +412,9 @@ static int mtk_dpi_power_on(struct mtk_dpi *dpi)
goto err_pixel;
}
+ if (dpi->pinctrl && dpi->pins_dpi)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_dpi);
+
mtk_dpi_enable(dpi);
return 0;
@@ -509,15 +521,6 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
return 0;
}
-static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
- .destroy = mtk_dpi_encoder_destroy,
-};
-
static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -596,8 +599,8 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_encoder_init(drm_dev, &dpi->encoder, &mtk_dpi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, &dpi->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Failed to initialize decoder: %d\n", ret);
goto err_unregister;
@@ -705,6 +708,26 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->dev = dev;
dpi->conf = (struct mtk_dpi_conf *)of_device_get_match_data(dev);
+ dpi->pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(dpi->pinctrl)) {
+ dpi->pinctrl = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl!\n");
+ }
+ if (dpi->pinctrl) {
+ dpi->pins_gpio = pinctrl_lookup_state(dpi->pinctrl, "sleep");
+ if (IS_ERR(dpi->pins_gpio)) {
+ dpi->pins_gpio = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl idle!\n");
+ }
+ if (dpi->pins_gpio)
+ pinctrl_select_state(dpi->pinctrl, dpi->pins_gpio);
+
+ dpi->pins_dpi = pinctrl_lookup_state(dpi->pinctrl, "default");
+ if (IS_ERR(dpi->pins_dpi)) {
+ dpi->pins_dpi = NULL;
+ dev_dbg(&pdev->dev, "Cannot find pinctrl active!\n");
+ }
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dpi->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(dpi->regs)) {
@@ -716,21 +739,27 @@ static int mtk_dpi_probe(struct platform_device *pdev)
dpi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dpi->engine_clk)) {
ret = PTR_ERR(dpi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
return ret;
}
dpi->pixel_clk = devm_clk_get(dev, "pixel");
if (IS_ERR(dpi->pixel_clk)) {
ret = PTR_ERR(dpi->pixel_clk);
- dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get pixel clock: %d\n", ret);
+
return ret;
}
dpi->tvd_clk = devm_clk_get(dev, "pll");
if (IS_ERR(dpi->tvd_clk)) {
ret = PTR_ERR(dpi->tvd_clk);
- dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get tvdpll clock: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index fe85e487e477..e56e47aa707b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/soc/mediatek/mtk-cmdq.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <asm/barrier.h>
#include <soc/mediatek/smi.h>
@@ -28,7 +29,7 @@
* @enabled: records whether crtc_enable succeeded
* @planes: array of 4 drm_plane structures, one for each overlay plane
* @pending_planes: whether any plane has pending changes to be applied
- * @config_regs: memory mapped mmsys configuration register space
+ * @mmsys_dev: pointer to the mmsys device for configuration registers
* @mutex: handle to one of the ten disp_mutex streams
* @ddp_comp_nr: number of components in ddp_comp
* @ddp_comp: array of pointers the mtk_ddp_comp structures used by this crtc
@@ -50,7 +51,7 @@ struct mtk_drm_crtc {
u32 cmdq_event;
#endif
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct mtk_disp_mutex *mutex;
unsigned int ddp_comp_nr;
struct mtk_ddp_comp **ddp_comp;
@@ -164,7 +165,7 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
state->pending_width = crtc->mode.hdisplay;
state->pending_height = crtc->mode.vdisplay;
- state->pending_vrefresh = crtc->mode.vrefresh;
+ state->pending_vrefresh = drm_mode_vrefresh(&crtc->mode);
wmb(); /* Make sure the above parameters are set before update */
state->pending_config = true;
}
@@ -263,7 +264,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
width = crtc->state->adjusted_mode.hdisplay;
height = crtc->state->adjusted_mode.vdisplay;
- vrefresh = crtc->state->adjusted_mode.vrefresh;
+ vrefresh = drm_mode_vrefresh(&crtc->state->adjusted_mode);
drm_for_each_encoder(encoder, crtc->dev) {
if (encoder->crtc != crtc)
@@ -300,9 +301,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_add_comp_to_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_add_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -360,9 +361,9 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
mtk_crtc->ddp_comp[i]->id);
mtk_disp_mutex_disable(mtk_crtc->mutex);
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
- mtk_ddp_remove_comp_from_path(mtk_crtc->config_regs,
- mtk_crtc->ddp_comp[i]->id,
- mtk_crtc->ddp_comp[i + 1]->id);
+ mtk_mmsys_ddp_disconnect(mtk_crtc->mmsys_dev,
+ mtk_crtc->ddp_comp[i]->id,
+ mtk_crtc->ddp_comp[i + 1]->id);
mtk_disp_mutex_remove_comp(mtk_crtc->mutex,
mtk_crtc->ddp_comp[i]->id);
}
@@ -766,7 +767,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!mtk_crtc)
return -ENOMEM;
- mtk_crtc->config_regs = priv->config_regs;
+ mtk_crtc->mmsys_dev = priv->mmsys_dev;
mtk_crtc->ddp_comp_nr = path_len;
mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr,
sizeof(*mtk_crtc->ddp_comp),
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 13035c906035..014c1bbe1df2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -13,26 +13,6 @@
#include "mtk_drm_ddp.h"
#include "mtk_drm_ddp_comp.h"
-#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
-#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
-#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
-#define DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN 0x04c
-#define DISP_REG_CONFIG_DISP_UFOE_MOUT_EN 0x050
-#define DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0x084
-#define DISP_REG_CONFIG_DISP_COLOR1_SEL_IN 0x088
-#define DISP_REG_CONFIG_DSIE_SEL_IN 0x0a4
-#define DISP_REG_CONFIG_DSIO_SEL_IN 0x0a8
-#define DISP_REG_CONFIG_DPI_SEL_IN 0x0ac
-#define DISP_REG_CONFIG_DISP_RDMA2_SOUT 0x0b8
-#define DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN 0x0c4
-#define DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN 0x0c8
-#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
-
-#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
-#define DISP_REG_CONFIG_OUT_SEL 0x04c
-#define DISP_REG_CONFIG_DSI_SEL 0x050
-#define DISP_REG_CONFIG_DPI_SEL 0x064
-
#define MT2701_DISP_MUTEX0_MOD0 0x2c
#define MT2701_DISP_MUTEX0_SOF0 0x30
@@ -94,48 +74,6 @@
#define MUTEX_SOF_DSI2 5
#define MUTEX_SOF_DSI3 6
-#define OVL0_MOUT_EN_COLOR0 0x1
-#define OD_MOUT_EN_RDMA0 0x1
-#define OD1_MOUT_EN_RDMA1 BIT(16)
-#define UFOE_MOUT_EN_DSI0 0x1
-#define COLOR0_SEL_IN_OVL0 0x1
-#define OVL1_MOUT_EN_COLOR1 0x1
-#define GAMMA_MOUT_EN_RDMA1 0x1
-#define RDMA0_SOUT_DPI0 0x2
-#define RDMA0_SOUT_DPI1 0x3
-#define RDMA0_SOUT_DSI1 0x1
-#define RDMA0_SOUT_DSI2 0x4
-#define RDMA0_SOUT_DSI3 0x5
-#define RDMA1_SOUT_DPI0 0x2
-#define RDMA1_SOUT_DPI1 0x3
-#define RDMA1_SOUT_DSI1 0x1
-#define RDMA1_SOUT_DSI2 0x4
-#define RDMA1_SOUT_DSI3 0x5
-#define RDMA2_SOUT_DPI0 0x2
-#define RDMA2_SOUT_DPI1 0x3
-#define RDMA2_SOUT_DSI1 0x1
-#define RDMA2_SOUT_DSI2 0x4
-#define RDMA2_SOUT_DSI3 0x5
-#define DPI0_SEL_IN_RDMA1 0x1
-#define DPI0_SEL_IN_RDMA2 0x3
-#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
-#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
-#define DSI0_SEL_IN_RDMA1 0x1
-#define DSI0_SEL_IN_RDMA2 0x4
-#define DSI1_SEL_IN_RDMA1 0x1
-#define DSI1_SEL_IN_RDMA2 0x4
-#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI2_SEL_IN_RDMA2 (0x4 << 16)
-#define DSI3_SEL_IN_RDMA1 (0x1 << 16)
-#define DSI3_SEL_IN_RDMA2 (0x4 << 16)
-#define COLOR1_SEL_IN_OVL1 0x1
-
-#define OVL_MOUT_EN_RDMA 0x1
-#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
-#define BLS_TO_DPI_RDMA1_TO_DSI 0x2
-#define DSI_SEL_IN_BLS 0x0
-#define DPI_SEL_IN_BLS 0x0
-#define DSI_SEL_IN_RDMA 0x1
struct mtk_disp_mutex {
int id;
@@ -246,200 +184,6 @@ static const struct mtk_ddp_data mt8173_ddp_driver_data = {
.mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0,
};
-static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
- value = OVL0_MOUT_EN_COLOR0;
- } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
- value = OVL_MOUT_EN_RDMA;
- } else if (cur == DDP_COMPONENT_OD0 && next == DDP_COMPONENT_RDMA0) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD_MOUT_EN_RDMA0;
- } else if (cur == DDP_COMPONENT_UFOE && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DISP_UFOE_MOUT_EN;
- value = UFOE_MOUT_EN_DSI0;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_OVL1_MOUT_EN;
- value = OVL1_MOUT_EN_COLOR1;
- } else if (cur == DDP_COMPONENT_GAMMA && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_GAMMA_MOUT_EN;
- value = GAMMA_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_OD1 && next == DDP_COMPONENT_RDMA1) {
- *addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
- value = OD1_MOUT_EN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
- value = RDMA0_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DSI3;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA1_SOUT_EN;
- value = RDMA1_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI0;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DPI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DISP_RDMA2_SOUT;
- value = RDMA2_SOUT_DSI3;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next,
- unsigned int *addr)
-{
- unsigned int value;
-
- if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
- *addr = DISP_REG_CONFIG_DISP_COLOR0_SEL_IN;
- value = COLOR0_SEL_IN_OVL0;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI3_SEL_IN_RDMA1;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI0) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
- *addr = DISP_REG_CONFIG_DPI_SEL_IN;
- value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI0_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
- *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
- value = DSI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI2_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI3) {
- *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
- value = DSI3_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
- *addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
- value = COLOR1_SEL_IN_OVL1;
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- *addr = DISP_REG_CONFIG_DSI_SEL;
- value = DSI_SEL_IN_BLS;
- } else {
- value = 0;
- }
-
- return value;
-}
-
-static void mtk_ddp_sout_sel(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
- writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DPI0) {
- writel_relaxed(BLS_TO_DPI_RDMA1_TO_DSI,
- config_regs + DISP_REG_CONFIG_OUT_SEL);
- writel_relaxed(DSI_SEL_IN_RDMA,
- config_regs + DISP_REG_CONFIG_DSI_SEL);
- writel_relaxed(DPI_SEL_IN_BLS,
- config_regs + DISP_REG_CONFIG_DPI_SEL);
- }
-}
-
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- mtk_ddp_sout_sel(config_regs, cur, next);
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) | value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next)
-{
- unsigned int addr, value, reg;
-
- value = mtk_ddp_mout_en(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-
- value = mtk_ddp_sel_in(cur, next, &addr);
- if (value) {
- reg = readl_relaxed(config_regs + addr) & ~value;
- writel_relaxed(reg, config_regs + addr);
- }
-}
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id)
{
struct mtk_ddp *ddp = dev_get_drvdata(dev);
@@ -628,7 +372,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
if (!ddp->data->no_clk) {
ddp->clk = devm_clk_get(dev, NULL);
if (IS_ERR(ddp->clk)) {
- dev_err(dev, "Failed to get clock\n");
+ if (PTR_ERR(ddp->clk) != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clock\n");
return PTR_ERR(ddp->clk);
}
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 827be424a148..6b691a57be4a 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -12,13 +12,6 @@ struct regmap;
struct device;
struct mtk_disp_mutex;
-void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-void mtk_ddp_remove_comp_from_path(void __iomem *config_regs,
- enum mtk_ddp_comp_id cur,
- enum mtk_ddp_comp_id next);
-
struct mtk_disp_mutex *mtk_disp_mutex_get(struct device *dev, unsigned int id);
int mtk_disp_mutex_prepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 0563c6813333..6bd369434d9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -10,6 +10,7 @@
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/dma-mapping.h>
#include <drm/drm_atomic.h>
@@ -162,7 +163,9 @@ static int mtk_drm_kms_init(struct drm_device *drm)
}
private->mutex_dev = &pdev->dev;
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ return ret;
drm->mode_config.min_width = 64;
drm->mode_config.min_height = 64;
@@ -179,7 +182,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
ret = component_bind_all(drm->dev, drm);
if (ret)
- goto err_config_cleanup;
+ return ret;
/*
* We currently support two fixed data streams, each optional,
@@ -255,8 +258,6 @@ err_unset_dma_parms:
dma_dev->dma_parms = NULL;
err_component_unbind:
component_unbind_all(drm->dev, drm);
-err_config_cleanup:
- drm_mode_config_cleanup(drm);
return ret;
}
@@ -272,7 +273,6 @@ static void mtk_drm_kms_deinit(struct drm_device *drm)
private->dma_dev->dma_parms = NULL;
component_unbind_all(drm->dev, drm);
- drm_mode_config_cleanup(drm);
}
static const struct file_operations mtk_drm_fops = {
@@ -348,9 +348,7 @@ static int mtk_drm_bind(struct device *dev)
if (ret < 0)
goto err_deinit;
- ret = drm_fbdev_generic_setup(drm, 32);
- if (ret)
- DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
+ drm_fbdev_generic_setup(drm, 32);
return 0;
@@ -421,11 +419,22 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
{ }
};
+static const struct of_device_id mtk_drm_of_ids[] = {
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt2712-mmsys",
+ .data = &mt2712_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
+ { }
+};
+
static int mtk_drm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct device_node *phandle = dev->parent->of_node;
+ const struct of_device_id *of_id;
struct mtk_drm_private *private;
- struct resource *mem;
struct device_node *node;
struct component_match *match = NULL;
int ret;
@@ -436,18 +445,20 @@ static int mtk_drm_probe(struct platform_device *pdev)
return -ENOMEM;
private->data = of_device_get_match_data(dev);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- private->config_regs = devm_ioremap_resource(dev, mem);
- if (IS_ERR(private->config_regs)) {
- ret = PTR_ERR(private->config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
- return ret;
+ private->mmsys_dev = dev->parent;
+ if (!private->mmsys_dev) {
+ dev_err(dev, "Failed to get MMSYS device\n");
+ return -ENODEV;
}
+ of_id = of_match_node(mtk_drm_of_ids, phandle);
+ if (!of_id)
+ return -ENODEV;
+
+ private->data = of_id->data;
+
/* Iterate over sibling DISP function blocks */
- for_each_child_of_node(dev->of_node->parent, node) {
+ for_each_child_of_node(phandle->parent, node) {
const struct of_device_id *of_id;
enum mtk_ddp_comp_type comp_type;
int comp_id;
@@ -581,22 +592,11 @@ static int mtk_drm_sys_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
mtk_drm_sys_resume);
-static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt2701-mmsys",
- .data = &mt2701_mmsys_driver_data},
- { .compatible = "mediatek,mt2712-mmsys",
- .data = &mt2712_mmsys_driver_data},
- { .compatible = "mediatek,mt8173-mmsys",
- .data = &mt8173_mmsys_driver_data},
- { }
-};
-
static struct platform_driver mtk_drm_platform_driver = {
.probe = mtk_drm_probe,
.remove = mtk_drm_remove,
.driver = {
.name = "mediatek-drm",
- .of_match_table = mtk_drm_of_ids,
.pm = &mtk_drm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index 17bc99b9f5d4..b5be63e53176 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -39,7 +39,7 @@ struct mtk_drm_private {
struct device_node *mutex_node;
struct device *mutex_dev;
- void __iomem *config_regs;
+ struct device *mmsys_dev;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
const struct mtk_mmsys_driver_data *data;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
index b04a3c2b111e..6190cc3b7b0d 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c
@@ -117,7 +117,7 @@ int mtk_drm_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(&mtk_gem->base);
+ drm_gem_object_put(&mtk_gem->base);
return 0;
@@ -224,6 +224,9 @@ struct drm_gem_object *mtk_gem_prime_import_sg_table(struct drm_device *dev,
expected = sg_dma_address(sg->sgl);
for_each_sg(sg->sgl, s, sg->nents, i) {
+ if (!sg_dma_len(s))
+ break;
+
if (sg_dma_address(s) != expected) {
DRM_ERROR("sg_table is not contiguous");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 0ede69830a9d..270bf22c98fe 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mtk_drm_ddp_comp.h"
@@ -787,15 +788,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
dsi->enabled = false;
}
-static void mtk_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs mtk_dsi_encoder_funcs = {
- .destroy = mtk_dsi_encoder_destroy,
-};
-
static bool mtk_dsi_encoder_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -888,8 +880,8 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
{
int ret;
- ret = drm_encoder_init(drm, &dsi->encoder, &mtk_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to encoder init to drm\n");
return ret;
@@ -1194,14 +1186,18 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->engine_clk = devm_clk_get(dev, "engine");
if (IS_ERR(dsi->engine_clk)) {
ret = PTR_ERR(dsi->engine_clk);
- dev_err(dev, "Failed to get engine clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get engine clock: %d\n", ret);
goto err_unregister_host;
}
dsi->digital_clk = devm_clk_get(dev, "digital");
if (IS_ERR(dsi->digital_clk)) {
ret = PTR_ERR(dsi->digital_clk);
- dev_err(dev, "Failed to get digital clock: %d\n", ret);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get digital clock: %d\n", ret);
goto err_unregister_host;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index ff43a3d80410..55a4d095606f 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -311,14 +311,10 @@ static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
u8 checksum;
int ctrl_frame_en = 0;
- frame_type = *buffer;
- buffer += 1;
- frame_ver = *buffer;
- buffer += 1;
- frame_len = *buffer;
- buffer += 1;
- checksum = *buffer;
- buffer += 1;
+ frame_type = *buffer++;
+ frame_ver = *buffer++;
+ frame_len = *buffer++;
+ checksum = *buffer++;
frame_data = buffer;
dev_dbg(hdmi->dev,
@@ -982,7 +978,7 @@ static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
struct drm_display_mode *mode)
{
struct hdmi_avi_infoframe frame;
- u8 buffer[17];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
ssize_t err;
err = drm_hdmi_avi_infoframe_from_display_mode(&frame,
@@ -1008,7 +1004,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
const char *product)
{
struct hdmi_spd_infoframe frame;
- u8 buffer[29];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_spd_infoframe_init(&frame, vendor, product);
@@ -1031,7 +1027,7 @@ static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
{
struct hdmi_audio_infoframe frame;
- u8 buffer[14];
+ u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
ssize_t err;
err = hdmi_audio_infoframe_init(&frame);
@@ -1258,7 +1254,7 @@ static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
struct drm_bridge *next_bridge;
dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode),
!!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
next_bridge = drm_bridge_get_next_bridge(&hdmi->bridge);
@@ -1474,7 +1470,9 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
ret = mtk_hdmi_get_all_clk(hdmi, np);
if (ret) {
- dev_err(dev, "Failed to get clocks: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get clocks: %d\n", ret);
+
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index e4d34484ecc8..8cee2591e728 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -88,6 +88,44 @@ static const struct phy_ops mtk_mipi_tx_ops = {
.owner = THIS_MODULE,
};
+static void mtk_mipi_tx_get_calibration_datal(struct mtk_mipi_tx *mipi_tx)
+{
+ struct nvmem_cell *cell;
+ size_t len;
+ u32 *buf;
+
+ cell = nvmem_cell_get(mipi_tx->dev, "calibration-data");
+ if (IS_ERR(cell)) {
+ dev_info(mipi_tx->dev, "can't get nvmem_cell_get, ignore it\n");
+ return;
+ }
+ buf = (u32 *)nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+
+ if (IS_ERR(buf)) {
+ dev_info(mipi_tx->dev, "can't get data, ignore it\n");
+ return;
+ }
+
+ if (len < 3 * sizeof(u32)) {
+ dev_info(mipi_tx->dev, "invalid calibration data\n");
+ kfree(buf);
+ return;
+ }
+
+ mipi_tx->rt_code[0] = ((buf[0] >> 6 & 0x1f) << 5) |
+ (buf[0] >> 11 & 0x1f);
+ mipi_tx->rt_code[1] = ((buf[1] >> 27 & 0x1f) << 5) |
+ (buf[0] >> 1 & 0x1f);
+ mipi_tx->rt_code[2] = ((buf[1] >> 17 & 0x1f) << 5) |
+ (buf[1] >> 22 & 0x1f);
+ mipi_tx->rt_code[3] = ((buf[1] >> 7 & 0x1f) << 5) |
+ (buf[1] >> 12 & 0x1f);
+ mipi_tx->rt_code[4] = ((buf[2] >> 27 & 0x1f) << 5) |
+ (buf[1] >> 2 & 0x1f);
+ kfree(buf);
+}
+
static int mtk_mipi_tx_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -125,6 +163,20 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return ret;
}
+ ret = of_property_read_u32(dev->of_node, "drive-strength-microamp",
+ &mipi_tx->mipitx_drive);
+ /* If can't get the "mipi_tx->mipitx_drive", set it default 0x8 */
+ if (ret < 0)
+ mipi_tx->mipitx_drive = 4600;
+
+ /* check the mipitx_drive valid */
+ if (mipi_tx->mipitx_drive > 6000 || mipi_tx->mipitx_drive < 3000) {
+ dev_warn(dev, "drive-strength-microamp is invalid %d, not in 3000 ~ 6000\n",
+ mipi_tx->mipitx_drive);
+ mipi_tx->mipitx_drive = clamp_val(mipi_tx->mipitx_drive, 3000,
+ 6000);
+ }
+
ref_clk_name = __clk_get_name(ref_clk);
ret = of_property_read_string(dev->of_node, "clock-output-names",
@@ -160,6 +212,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
mipi_tx->dev = dev;
+ mtk_mipi_tx_get_calibration_datal(mipi_tx);
+
return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
mipi_tx->pll);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
index 413f35d86219..c76f07c3fdeb 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h
@@ -12,9 +12,11 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
+#include <linux/slab.h>
struct mtk_mipitx_data {
const u32 mppll_preserve;
@@ -27,6 +29,8 @@ struct mtk_mipi_tx {
struct device *dev;
void __iomem *regs;
u32 data_rate;
+ u32 mipitx_drive;
+ u32 rt_code[5];
const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
struct clk *pll;
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
index 91f08a351fd0..9f3e55aeebb2 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c
@@ -17,6 +17,9 @@
#define RG_DSI_BG_CORE_EN BIT(7)
#define RG_DSI_PAD_TIEL_SEL BIT(8)
+#define MIPITX_VOLTAGE_SEL 0x0010
+#define RG_DSI_HSTX_LDO_REF_SEL (0xf << 6)
+
#define MIPITX_PLL_PWR 0x0028
#define MIPITX_PLL_CON0 0x002c
#define MIPITX_PLL_CON1 0x0030
@@ -25,6 +28,7 @@
#define MIPITX_PLL_CON4 0x003c
#define RG_DSI_PLL_IBIAS (3 << 10)
+#define MIPITX_D2P_RTCODE 0x0100
#define MIPITX_D2_SW_CTL_EN 0x0144
#define MIPITX_D0_SW_CTL_EN 0x0244
#define MIPITX_CK_CKMODE_EN 0x0328
@@ -105,6 +109,24 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
.recalc_rate = mtk_mipi_tx_pll_recalc_rate,
};
+static void mtk_mipi_tx_config_calibration_data(struct mtk_mipi_tx *mipi_tx)
+{
+ int i, j;
+
+ for (i = 0; i < 5; i++) {
+ if ((mipi_tx->rt_code[i] & 0x1f) == 0)
+ mipi_tx->rt_code[i] |= 0x10;
+
+ if ((mipi_tx->rt_code[i] >> 5 & 0x1f) == 0)
+ mipi_tx->rt_code[i] |= 0x10 << 5;
+
+ for (j = 0; j < 10; j++)
+ mtk_mipi_tx_update_bits(mipi_tx,
+ MIPITX_D2P_RTCODE * (i + 1) + j * 4,
+ 1, mipi_tx->rt_code[i] >> j & 1);
+ }
+}
+
static void mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
@@ -123,6 +145,12 @@ static void mtk_mipi_tx_power_on_signal(struct phy *phy)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN);
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_VOLTAGE_SEL,
+ RG_DSI_HSTX_LDO_REF_SEL,
+ (mipi_tx->mipitx_drive - 3000) / 200 << 6);
+
+ mtk_mipi_tx_config_calibration_data(mipi_tx);
+
mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index b5f5eb7b4bb9..8b9c8dd788c4 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -11,6 +11,7 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/sys_soc.h>
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
@@ -95,19 +96,8 @@ static struct drm_driver meson_driver = {
/* IRQ */
.irq_handler = meson_irq,
- /* PRIME Ops */
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
-
- /* GEM Ops */
- .dumb_create = meson_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
+ /* CMA Ops */
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(meson_dumb_create),
/* Misc */
.fops = &fops,
@@ -183,6 +173,24 @@ static void meson_remove_framebuffers(void)
kfree(ap);
}
+struct meson_drm_soc_attr {
+ struct meson_drm_soc_limits limits;
+ const struct soc_device_attribute *attrs;
+};
+
+static const struct meson_drm_soc_attr meson_drm_soc_attrs[] = {
+ /* S805X/S805Y HDMI PLL won't lock for HDMI PHY freq > 1,65GHz */
+ {
+ .limits = {
+ .max_hdmi_phy_freq = 1650000,
+ },
+ .attrs = (const struct soc_device_attribute []) {
+ { .soc_id = "GXL (S805*)", },
+ { /* sentinel */ },
+ }
+ },
+};
+
static int meson_drv_bind_master(struct device *dev, bool has_components)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -191,7 +199,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
struct drm_device *drm;
struct resource *res;
void __iomem *regs;
- int ret;
+ int ret, i;
/* Checks if an output connector is available */
if (!meson_vpu_has_available_connectors(dev)) {
@@ -281,10 +289,20 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
if (ret)
goto free_drm;
+ /* Assign limits per soc revision/package */
+ for (i = 0 ; i < ARRAY_SIZE(meson_drm_soc_attrs) ; ++i) {
+ if (soc_device_match(meson_drm_soc_attrs[i].attrs)) {
+ priv->limits = &meson_drm_soc_attrs[i].limits;
+ break;
+ }
+ }
+
/* Remove early framebuffers (ie. simplefb) */
meson_remove_framebuffers();
- drm_mode_config_init(drm);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
+ goto free_drm;
drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs;
@@ -379,7 +397,6 @@ static void meson_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_irq_uninstall(drm);
drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
}
@@ -412,9 +429,7 @@ static int __maybe_unused meson_drv_pm_resume(struct device *dev)
if (priv->afbcd.ops)
priv->afbcd.ops->init(priv);
- drm_mode_config_helper_resume(priv->drm);
-
- return 0;
+ return drm_mode_config_helper_resume(priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 04fdf3826643..5b23704a80d6 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -30,6 +30,10 @@ struct meson_drm_match_data {
struct meson_afbcd_ops *afbcd_ops;
};
+struct meson_drm_soc_limits {
+ unsigned int max_hdmi_phy_freq;
+};
+
struct meson_drm {
struct device *dev;
enum vpu_compatible compat;
@@ -48,6 +52,8 @@ struct meson_drm {
struct drm_plane *primary_plane;
struct drm_plane *overlay_plane;
+ const struct meson_drm_soc_limits *limits;
+
/* Components Data */
struct {
bool osd1_enabled;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e8c94915a4fc..24a12c453095 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -695,7 +695,7 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
__func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
+ return meson_vclk_vic_supported_freq(priv, phy_freq, vclk_freq);
}
/* Encoder */
@@ -1034,10 +1034,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(dw_plat_data->regm);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Failed to get hdmi top irq\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(dev, irq, dw_hdmi_top_irq,
dw_hdmi_top_thread_irq, IRQF_SHARED,
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d5cbc47835bf..35338ed18209 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -223,7 +223,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[0] |= OSD_BLK_MODE_16 |
OSD_COLOR_MATRIX_16_RGB565;
break;
- };
+ }
}
switch (fb->format->format) {
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index fdf26dac9fa8..0eb86943a358 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -725,6 +725,13 @@ meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq)
/* In DMT mode, path after PLL is always /10 */
freq *= 10;
+ /* Check against soc revision/package limits */
+ if (priv->limits) {
+ if (priv->limits->max_hdmi_phy_freq &&
+ freq > priv->limits->max_hdmi_phy_freq)
+ return MODE_CLOCK_HIGH;
+ }
+
if (meson_hdmi_pll_find_params(priv, freq, &m, &frac, &od))
return MODE_OK;
@@ -762,7 +769,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq,
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
unsigned int vclk_freq)
{
int i;
@@ -770,6 +777,13 @@ meson_vclk_vic_supported_freq(unsigned int phy_freq,
DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
phy_freq, vclk_freq);
+ /* Check against soc revision/package limits */
+ if (priv->limits) {
+ if (priv->limits->max_hdmi_phy_freq &&
+ phy_freq > priv->limits->max_hdmi_phy_freq)
+ return MODE_CLOCK_HIGH;
+ }
+
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index aed0ab2efa71..60617aaf18dd 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,7 +25,8 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
+meson_vclk_vic_supported_freq(struct meson_drm *priv, unsigned int phy_freq,
+ unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
unsigned int phy_freq, unsigned int vclk_freq,
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 541f9eb2a135..f1747fde1fe0 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -48,7 +48,6 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
DRM_MODE("720x576i", DRM_MODE_TYPE_DRIVER, 13500,
720, 732, 795, 864, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 50,
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
},
},
@@ -58,7 +57,6 @@ struct meson_cvbs_mode meson_cvbs_modes[MESON_CVBS_MODES_COUNT] = {
DRM_MODE("720x480i", DRM_MODE_TYPE_DRIVER, 13500,
720, 739, 801, 858, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_INTERLACE),
- .vrefresh = 60,
.picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3,
},
},
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index d60aa4b9ccd4..93be766715c9 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -2,10 +2,8 @@
config DRM_MGAG200
tristate "Kernel modesetting driver for MGA G200 server engines"
depends on DRM && PCI && MMU
+ select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
- select DRM_VRAM_HELPER
- select DRM_TTM
- select DRM_TTM_HELPER
help
This is a KMS driver for the MGA G200 server chips, it
does not support the original MGA G200 or any of the desktop
diff --git a/drivers/gpu/drm/mgag200/Makefile b/drivers/gpu/drm/mgag200/Makefile
index 04b281bcf655..42fedef53882 100644
--- a/drivers/gpu/drm/mgag200/Makefile
+++ b/drivers/gpu/drm/mgag200/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-mgag200-y := mgag200_main.o mgag200_mode.o mgag200_cursor.o \
- mgag200_drv.o mgag200_i2c.o mgag200_ttm.o
+mgag200-y := mgag200_drv.o mgag200_i2c.o mgag200_mm.o mgag200_mode.o
obj-$(CONFIG_DRM_MGAG200) += mgag200.o
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
deleted file mode 100644
index d491edd317ff..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2013 Matrox Graphics
- *
- * Author: Christopher Harvey <charvey@matrox.com>
- */
-
-#include <linux/pci.h>
-
-#include "mgag200_drv.h"
-
-static bool warn_transparent = true;
-static bool warn_palette = true;
-
-static int mgag200_cursor_update(struct mga_device *mdev, void *dst, void *src,
- unsigned int width, unsigned int height)
-{
- struct drm_device *dev = mdev->dev;
- unsigned int i, row, col;
- uint32_t colour_set[16];
- uint32_t *next_space = &colour_set[0];
- uint32_t *palette_iter;
- uint32_t this_colour;
- bool found = false;
- int colour_count = 0;
- u8 reg_index;
- u8 this_row[48];
-
- memset(&colour_set[0], 0, sizeof(uint32_t)*16);
- /* width*height*4 = 16384 */
- for (i = 0; i < 16384; i += 4) {
- this_colour = ioread32(src + i);
- /* No transparency */
- if (this_colour>>24 != 0xff &&
- this_colour>>24 != 0x0) {
- if (warn_transparent) {
- dev_info(&dev->pdev->dev, "Video card doesn't support cursors with partial transparency.\n");
- dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
- warn_transparent = false; /* Only tell the user once. */
- }
- return -EINVAL;
- }
- /* Don't need to store transparent pixels as colours */
- if (this_colour>>24 == 0x0)
- continue;
- found = false;
- for (palette_iter = &colour_set[0]; palette_iter != next_space; palette_iter++) {
- if (*palette_iter == this_colour) {
- found = true;
- break;
- }
- }
- if (found)
- continue;
- /* We only support 4bit paletted cursors */
- if (colour_count >= 16) {
- if (warn_palette) {
- dev_info(&dev->pdev->dev, "Video card only supports cursors with up to 16 colours.\n");
- dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n");
- warn_palette = false; /* Only tell the user once. */
- }
- return -EINVAL;
- }
- *next_space = this_colour;
- next_space++;
- colour_count++;
- }
-
- /* Program colours from cursor icon into palette */
- for (i = 0; i < colour_count; i++) {
- if (i <= 2)
- reg_index = 0x8 + i*0x4;
- else
- reg_index = 0x60 + i*0x3;
- WREG_DAC(reg_index, colour_set[i] & 0xff);
- WREG_DAC(reg_index+1, colour_set[i]>>8 & 0xff);
- WREG_DAC(reg_index+2, colour_set[i]>>16 & 0xff);
- BUG_ON((colour_set[i]>>24 & 0xff) != 0xff);
- }
-
- /* now write colour indices into hardware cursor buffer */
- for (row = 0; row < 64; row++) {
- memset(&this_row[0], 0, 48);
- for (col = 0; col < 64; col++) {
- this_colour = ioread32(src + 4*(col + 64*row));
- /* write transparent pixels */
- if (this_colour>>24 == 0x0) {
- this_row[47 - col/8] |= 0x80>>(col%8);
- continue;
- }
-
- /* write colour index here */
- for (i = 0; i < colour_count; i++) {
- if (colour_set[i] == this_colour) {
- if (col % 2)
- this_row[col/2] |= i<<4;
- else
- this_row[col/2] |= i;
- break;
- }
- }
- }
- memcpy_toio(dst + row*48, &this_row[0], 48);
- }
-
- return 0;
-}
-
-static void mgag200_cursor_set_base(struct mga_device *mdev, u64 address)
-{
- u8 addrl = (address >> 10) & 0xff;
- u8 addrh = (address >> 18) & 0x3f;
-
- /* Program gpu address of cursor buffer */
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, addrl);
- WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, addrh);
-}
-
-static int mgag200_show_cursor(struct mga_device *mdev, void *src,
- unsigned int width, unsigned int height)
-{
- struct drm_device *dev = mdev->dev;
- struct drm_gem_vram_object *gbo;
- void *dst;
- s64 off;
- int ret;
-
- gbo = mdev->cursor.gbo[mdev->cursor.next_index];
- if (!gbo) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -ENOTSUPP; /* Didn't allocate space for cursors */
- }
- dst = drm_gem_vram_vmap(gbo);
- if (IS_ERR(dst)) {
- ret = PTR_ERR(dst);
- dev_err(&dev->pdev->dev,
- "failed to map cursor updates: %d\n", ret);
- return ret;
- }
- off = drm_gem_vram_offset(gbo);
- if (off < 0) {
- ret = (int)off;
- dev_err(&dev->pdev->dev,
- "failed to get cursor scanout address: %d\n", ret);
- goto err_drm_gem_vram_vunmap;
- }
-
- ret = mgag200_cursor_update(mdev, dst, src, width, height);
- if (ret)
- goto err_drm_gem_vram_vunmap;
- mgag200_cursor_set_base(mdev, off);
-
- /* Adjust cursor control register to turn on the cursor */
- WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */
-
- drm_gem_vram_vunmap(gbo, dst);
-
- ++mdev->cursor.next_index;
- mdev->cursor.next_index %= ARRAY_SIZE(mdev->cursor.gbo);
-
- return 0;
-
-err_drm_gem_vram_vunmap:
- drm_gem_vram_vunmap(gbo, dst);
- return ret;
-}
-
-/*
- * Hide the cursor off screen. We can't disable the cursor hardware because
- * it takes too long to re-activate and causes momentary corruption.
- */
-static void mgag200_hide_cursor(struct mga_device *mdev)
-{
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
-}
-
-static void mgag200_move_cursor(struct mga_device *mdev, int x, int y)
-{
- if (WARN_ON(x <= 0))
- return;
- if (WARN_ON(y <= 0))
- return;
- if (WARN_ON(x & ~0xffff))
- return;
- if (WARN_ON(y & ~0xffff))
- return;
-
- WREG8(MGA_CURPOSXL, x & 0xff);
- WREG8(MGA_CURPOSXH, (x>>8) & 0xff);
-
- WREG8(MGA_CURPOSYL, y & 0xff);
- WREG8(MGA_CURPOSYH, (y>>8) & 0xff);
-}
-
-int mgag200_cursor_init(struct mga_device *mdev)
-{
- struct drm_device *dev = mdev->dev;
- size_t ncursors = ARRAY_SIZE(mdev->cursor.gbo);
- size_t size;
- int ret;
- size_t i;
- struct drm_gem_vram_object *gbo;
-
- size = roundup(64 * 48, PAGE_SIZE);
- if (size * ncursors > mdev->vram_fb_available)
- return -ENOMEM;
-
- for (i = 0; i < ncursors; ++i) {
- gbo = drm_gem_vram_create(dev, size, 0);
- if (IS_ERR(gbo)) {
- ret = PTR_ERR(gbo);
- goto err_drm_gem_vram_put;
- }
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
- DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
- if (ret) {
- drm_gem_vram_put(gbo);
- goto err_drm_gem_vram_put;
- }
-
- mdev->cursor.gbo[i] = gbo;
- }
-
- /*
- * At the high end of video memory, we reserve space for
- * buffer objects. The cursor plane uses this memory to store
- * a double-buffered image of the current cursor. Hence, it's
- * not available for framebuffers.
- */
- mdev->vram_fb_available -= ncursors * size;
-
- return 0;
-
-err_drm_gem_vram_put:
- while (i) {
- --i;
- gbo = mdev->cursor.gbo[i];
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- mdev->cursor.gbo[i] = NULL;
- }
- return ret;
-}
-
-void mgag200_cursor_fini(struct mga_device *mdev)
-{
- size_t i;
- struct drm_gem_vram_object *gbo;
-
- for (i = 0; i < ARRAY_SIZE(mdev->cursor.gbo); ++i) {
- gbo = mdev->cursor.gbo[i];
- drm_gem_vram_unpin(gbo);
- drm_gem_vram_put(gbo);
- }
-}
-
-int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = (struct mga_device *)dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_gem_vram_object *gbo = NULL;
- int ret;
- u8 *src;
-
- if (!handle || !file_priv) {
- mgag200_hide_cursor(mdev);
- return 0;
- }
-
- if (width != 64 || height != 64) {
- WREG8(MGA_CURPOSXL, 0);
- WREG8(MGA_CURPOSXH, 0);
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(file_priv, handle);
- if (!obj)
- return -ENOENT;
- gbo = drm_gem_vram_of_gem(obj);
- src = drm_gem_vram_vmap(gbo);
- if (IS_ERR(src)) {
- ret = PTR_ERR(src);
- dev_err(&dev->pdev->dev,
- "failed to map user buffer updates\n");
- goto err_drm_gem_object_put_unlocked;
- }
-
- ret = mgag200_show_cursor(mdev, src, width, height);
- if (ret)
- goto err_drm_gem_vram_vunmap;
-
- /* Now update internal buffer pointers */
- drm_gem_vram_vunmap(gbo, src);
- drm_gem_object_put_unlocked(obj);
-
- return 0;
-err_drm_gem_vram_vunmap:
- drm_gem_vram_vunmap(gbo, src);
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(obj);
- return ret;
-}
-
-int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private;
-
- /* Our origin is at (64,64) */
- x += 64;
- y += 64;
-
- mgag200_move_cursor(mdev, x, y);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 7a5bad2f57d7..e19660f4a637 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -17,23 +17,105 @@
#include "mgag200_drv.h"
-/*
- * This is the generic driver code. This binds the driver to the drm core,
- * which then performs further device association and calls our graphics init
- * functions
- */
int mgag200_modeset = -1;
-
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, mgag200_modeset, int, 0400);
-int mgag200_hw_bug_no_startadd = -1;
-MODULE_PARM_DESC(modeset, "HW does not interpret scanout-buffer start address correctly");
-module_param_named(hw_bug_no_startadd, mgag200_hw_bug_no_startadd, int, 0400);
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
+
+static struct drm_driver mgag200_driver = {
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &mgag200_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+/*
+ * DRM device
+ */
+
+static int mgag200_device_init(struct mga_device *mdev, unsigned long flags)
+{
+ struct drm_device *dev = &mdev->base;
+ int ret, option;
+
+ mdev->flags = mgag200_flags_from_driver_data(flags);
+ mdev->type = mgag200_type_from_driver_data(flags);
-static struct drm_driver driver;
+ pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
+ mdev->has_sdram = !(option & (1 << 14));
-static const struct pci_device_id pciidlist[] = {
+ /* BAR 0 is the framebuffer, BAR 1 contains registers */
+ mdev->rmmio_base = pci_resource_start(dev->pdev, 1);
+ mdev->rmmio_size = pci_resource_len(dev->pdev, 1);
+
+ if (!devm_request_mem_region(dev->dev, mdev->rmmio_base,
+ mdev->rmmio_size, "mgadrmfb_mmio")) {
+ drm_err(dev, "can't reserve mmio registers\n");
+ return -ENOMEM;
+ }
+
+ mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
+ if (mdev->rmmio == NULL)
+ return -ENOMEM;
+
+ /* stash G200 SE model number for later use */
+ if (IS_G200_SE(mdev)) {
+ mdev->unique_rev_id = RREG32(0x1e24);
+ drm_dbg(dev, "G200 SE unique revision id is 0x%x\n",
+ mdev->unique_rev_id);
+ }
+
+ ret = mgag200_mm_init(mdev);
+ if (ret)
+ return ret;
+
+ ret = mgag200_modeset_init(mdev);
+ if (ret) {
+ drm_err(dev, "Fatal error during modeset init: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mga_device *
+mgag200_device_create(struct pci_dev *pdev, unsigned long flags)
+{
+ struct drm_device *dev;
+ struct mga_device *mdev;
+ int ret;
+
+ mdev = devm_drm_dev_alloc(&pdev->dev, &mgag200_driver,
+ struct mga_device, base);
+ if (IS_ERR(mdev))
+ return mdev;
+ dev = &mdev->base;
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+
+ ret = mgag200_device_init(mdev, flags);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return mdev;
+}
+
+/*
+ * PCI driver
+ */
+
+static const struct pci_device_id mgag200_pciidlist[] = {
{ PCI_VENDOR_ID_MATROX, 0x522, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
G200_SE_A | MGAG200_FLAG_HW_BUG_NO_STARTADD},
{ PCI_VENDOR_ID_MATROX, 0x524, PCI_ANY_ID, PCI_ANY_ID, 0, 0, G200_SE_B },
@@ -46,117 +128,47 @@ static const struct pci_device_id pciidlist[] = {
{0,}
};
-MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_DEVICE_TABLE(pci, mgag200_pciidlist);
-
-static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int
+mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct mga_device *mdev;
struct drm_device *dev;
int ret;
drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
-
- dev->pdev = pdev;
- pci_set_drvdata(pdev, dev);
-
- ret = mgag200_driver_load(dev, ent->driver_data);
- if (ret)
- goto err_drm_dev_put;
+ mdev = mgag200_device_create(pdev, ent->driver_data);
+ if (IS_ERR(mdev))
+ return PTR_ERR(mdev);
+ dev = &mdev->base;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_mgag200_driver_unload;
+ return ret;
- return 0;
+ drm_fbdev_generic_setup(dev, 0);
-err_mgag200_driver_unload:
- mgag200_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
+ return 0;
}
-static void mga_pci_remove(struct pci_dev *pdev)
+static void mgag200_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- mgag200_driver_unload(dev);
- drm_dev_put(dev);
-}
-
-DEFINE_DRM_GEM_FOPS(mgag200_driver_fops);
-
-static bool mgag200_pin_bo_at_0(const struct mga_device *mdev)
-{
- if (mgag200_hw_bug_no_startadd > 0) {
- DRM_WARN_ONCE("Option hw_bug_no_startradd is enabled. Please "
- "report the output of 'lspci -vvnn' to "
- "<dri-devel@lists.freedesktop.org> if this "
- "option is required to make mgag200 work "
- "correctly on your system.\n");
- return true;
- } else if (!mgag200_hw_bug_no_startadd) {
- return false;
- }
- return mdev->flags & MGAG200_FLAG_HW_BUG_NO_STARTADD;
}
-int mgag200_driver_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct mga_device *mdev = dev->dev_private;
- unsigned long pg_align;
-
- if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized"))
- return -EINVAL;
-
- pg_align = 0ul;
-
- /*
- * Aligning scanout buffers to the size of the video ram forces
- * placement at offset 0. Works around a bug where HW does not
- * respect 'startadd' field.
- */
- if (mgag200_pin_bo_at_0(mdev))
- pg_align = PFN_UP(mdev->mc.vram_size);
-
- return drm_gem_vram_fill_create_dumb(file, dev, pg_align, 0, args);
-}
-
-static struct drm_driver driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET,
- .fops = &mgag200_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
- .debugfs_init = drm_vram_mm_debugfs_init,
- .dumb_create = mgag200_driver_dumb_create,
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
- .gem_prime_mmap = drm_gem_prime_mmap,
-};
-
static struct pci_driver mgag200_pci_driver = {
.name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = mga_pci_probe,
- .remove = mga_pci_remove,
+ .id_table = mgag200_pciidlist,
+ .probe = mgag200_pci_probe,
+ .remove = mgag200_pci_remove,
};
static int __init mgag200_init(void)
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 9691252d6233..270c2f9a6766 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -18,7 +18,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
-#include <drm/drm_gem_vram_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mgag200_reg.h"
@@ -32,8 +33,6 @@
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
-#define MGAG200FB_CONN_LIMIT 1
-
#define RREG8(reg) ioread8(((void __iomem *)mdev->rmmio) + (reg))
#define WREG8(reg, v) iowrite8(v, ((void __iomem *)mdev->rmmio) + (reg))
#define RREG32(reg) ioread32(((void __iomem *)mdev->rmmio) + (reg))
@@ -49,6 +48,12 @@
WREG8(ATTR_DATA, v); \
} while (0) \
+#define RREG_SEQ(reg, v) \
+ do { \
+ WREG8(MGAREG_SEQ_INDEX, reg); \
+ v = RREG8(MGAREG_SEQ_DATA); \
+ } while (0) \
+
#define WREG_SEQ(reg, v) \
do { \
WREG8(MGAREG_SEQ_INDEX, reg); \
@@ -61,6 +66,11 @@
WREG8(MGAREG_CRTC_DATA, v); \
} while (0) \
+#define RREG_ECRT(reg, v) \
+ do { \
+ WREG8(MGAREG_CRTCEXT_INDEX, reg); \
+ v = RREG8(MGAREG_CRTCEXT_DATA); \
+ } while (0) \
#define WREG_ECRT(reg, v) \
do { \
@@ -92,23 +102,8 @@
#define MGAG200_MAX_FB_HEIGHT 4096
#define MGAG200_MAX_FB_WIDTH 4096
-#define MATROX_DPMS_CLEARED (-1)
-
-#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
-struct mga_crtc {
- struct drm_crtc base;
- u8 lut_r[256], lut_g[256], lut_b[256];
- int last_dpms;
- bool enabled;
-};
-
-struct mga_mode_info {
- bool mode_config_initialized;
- struct mga_crtc *crtc;
-};
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -121,11 +116,6 @@ struct mga_connector {
struct mga_i2c_chan *i2c;
};
-struct mga_cursor {
- struct drm_gem_vram_object *gbo[2];
- unsigned int next_index;
-};
-
struct mga_mc {
resource_size_t vram_size;
resource_size_t vram_base;
@@ -152,7 +142,7 @@ enum mga_type {
#define IS_G200_SE(mdev) (mdev->type == G200_SE_A || mdev->type == G200_SE_B)
struct mga_device {
- struct drm_device *dev;
+ struct drm_device base;
unsigned long flags;
resource_size_t rmmio_base;
@@ -160,17 +150,12 @@ struct mga_device {
void __iomem *rmmio;
struct mga_mc mc;
- struct mga_mode_info mode_info;
-
- struct mga_cursor cursor;
- size_t vram_fb_available;
+ void __iomem *vram;
+ size_t vram_fb_available;
- bool suspended;
- int num_crtc;
enum mga_type type;
int has_sdram;
- struct drm_display_mode mode;
int bpp_shifts[4];
@@ -179,9 +164,15 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
- struct drm_encoder encoder;
+ struct mga_connector connector;
+ struct drm_simple_display_pipe display_pipe;
};
+static inline struct mga_device *to_mga_device(struct drm_device *dev)
+{
+ return container_of(dev, struct mga_device, base);
+}
+
static inline enum mga_type
mgag200_type_from_driver_data(kernel_ulong_t driver_data)
{
@@ -196,24 +187,12 @@ mgag200_flags_from_driver_data(kernel_ulong_t driver_data)
/* mgag200_mode.c */
int mgag200_modeset_init(struct mga_device *mdev);
-void mgag200_modeset_fini(struct mga_device *mdev);
-
- /* mgag200_main.c */
-int mgag200_driver_load(struct drm_device *dev, unsigned long flags);
-void mgag200_driver_unload(struct drm_device *dev);
/* mgag200_i2c.c */
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
+ /* mgag200_mm.c */
int mgag200_mm_init(struct mga_device *mdev);
-void mgag200_mm_fini(struct mga_device *mdev);
-int mgag200_mmap(struct file *filp, struct vm_area_struct *vma);
-
-int mgag200_cursor_init(struct mga_device *mdev);
-void mgag200_cursor_fini(struct mga_device *mdev);
-int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
- uint32_t handle, uint32_t width, uint32_t height);
-int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
#endif /* __MGAG200_DRV_H__ */
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 9f4635916d32..09731e614e46 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -61,34 +61,34 @@ static inline void mga_i2c_set(struct mga_device *mdev, int mask, int state)
static void mga_gpio_setsda(void *data, int state)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
mga_i2c_set(mdev, i2c->data, state);
}
static void mga_gpio_setscl(void *data, int state)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
mga_i2c_set(mdev, i2c->clock, state);
}
static int mga_gpio_getsda(void *data)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
return (mga_i2c_read_gpio(mdev) & i2c->data) ? 1 : 0;
}
static int mga_gpio_getscl(void *data)
{
struct mga_i2c_chan *i2c = data;
- struct mga_device *mdev = i2c->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(i2c->dev);
return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
}
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev)
{
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
struct mga_i2c_chan *i2c;
int ret;
int data, clock;
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
deleted file mode 100644
index e278b6a547bd..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ /dev/null
@@ -1,211 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2010 Matt Turner.
- * Copyright 2012 Red Hat
- *
- * Authors: Matthew Garrett
- * Matt Turner
- * Dave Airlie
- */
-
-#include <linux/pci.h>
-
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "mgag200_drv.h"
-
-static const struct drm_mode_config_funcs mga_mode_funcs = {
- .fb_create = drm_gem_fb_create
-};
-
-static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
-{
- int offset;
- int orig;
- int test1, test2;
- int orig1, orig2;
- unsigned int vram_size;
-
- /* Probe */
- orig = ioread16(mem);
- iowrite16(0, mem);
-
- vram_size = mdev->mc.vram_window;
-
- if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000)) {
- vram_size = vram_size - 0x400000;
- }
-
- for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
- orig1 = ioread8(mem + offset);
- orig2 = ioread8(mem + offset + 0x100);
-
- iowrite16(0xaa55, mem + offset);
- iowrite16(0xaa55, mem + offset + 0x100);
-
- test1 = ioread16(mem + offset);
- test2 = ioread16(mem);
-
- iowrite16(orig1, mem + offset);
- iowrite16(orig2, mem + offset + 0x100);
-
- if (test1 != 0xaa55) {
- break;
- }
-
- if (test2) {
- break;
- }
- }
-
- iowrite16(orig, mem);
- return offset - 65536;
-}
-
-/* Map the framebuffer from the card and configure the core */
-static int mga_vram_init(struct mga_device *mdev)
-{
- void __iomem *mem;
-
- /* BAR 0 is VRAM */
- mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
- mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
-
- if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
- "mgadrmfb_vram")) {
- DRM_ERROR("can't reserve VRAM\n");
- return -ENXIO;
- }
-
- mem = pci_iomap(mdev->dev->pdev, 0, 0);
- if (!mem)
- return -ENOMEM;
-
- mdev->mc.vram_size = mga_probe_vram(mdev, mem);
-
- pci_iounmap(mdev->dev->pdev, mem);
-
- return 0;
-}
-
-static int mgag200_device_init(struct drm_device *dev,
- uint32_t flags)
-{
- struct mga_device *mdev = dev->dev_private;
- int ret, option;
-
- mdev->flags = mgag200_flags_from_driver_data(flags);
- mdev->type = mgag200_type_from_driver_data(flags);
-
- /* Hardcode the number of CRTCs to 1 */
- mdev->num_crtc = 1;
-
- pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option);
- mdev->has_sdram = !(option & (1 << 14));
-
- /* BAR 0 is the framebuffer, BAR 1 contains registers */
- mdev->rmmio_base = pci_resource_start(mdev->dev->pdev, 1);
- mdev->rmmio_size = pci_resource_len(mdev->dev->pdev, 1);
-
- if (!devm_request_mem_region(mdev->dev->dev, mdev->rmmio_base, mdev->rmmio_size,
- "mgadrmfb_mmio")) {
- DRM_ERROR("can't reserve mmio registers\n");
- return -ENOMEM;
- }
-
- mdev->rmmio = pcim_iomap(dev->pdev, 1, 0);
- if (mdev->rmmio == NULL)
- return -ENOMEM;
-
- /* stash G200 SE model number for later use */
- if (IS_G200_SE(mdev)) {
- mdev->unique_rev_id = RREG32(0x1e24);
- DRM_DEBUG("G200 SE unique revision id is 0x%x\n",
- mdev->unique_rev_id);
- }
-
- ret = mga_vram_init(mdev);
- if (ret)
- return ret;
-
- mdev->bpp_shifts[0] = 0;
- mdev->bpp_shifts[1] = 1;
- mdev->bpp_shifts[2] = 0;
- mdev->bpp_shifts[3] = 2;
- return 0;
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-
-int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct mga_device *mdev;
- int r;
-
- mdev = devm_kzalloc(dev->dev, sizeof(struct mga_device), GFP_KERNEL);
- if (mdev == NULL)
- return -ENOMEM;
- dev->dev_private = (void *)mdev;
- mdev->dev = dev;
-
- r = mgag200_device_init(dev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
- return r;
- }
- r = mgag200_mm_init(mdev);
- if (r)
- goto err_mm;
-
- drm_mode_config_init(dev);
- dev->mode_config.funcs = (void *)&mga_mode_funcs;
- if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
- dev->mode_config.preferred_depth = 16;
- else
- dev->mode_config.preferred_depth = 32;
- dev->mode_config.prefer_shadow = 1;
-
- r = mgag200_modeset_init(mdev);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto err_modeset;
- }
-
- r = mgag200_cursor_init(mdev);
- if (r)
- dev_warn(&dev->pdev->dev,
- "Could not initialize cursors. Not doing hardware cursors.\n");
-
- r = drm_fbdev_generic_setup(mdev->dev, 0);
- if (r)
- goto err_modeset;
-
- return 0;
-
-err_modeset:
- drm_mode_config_cleanup(dev);
- mgag200_cursor_fini(mdev);
- mgag200_mm_fini(mdev);
-err_mm:
- dev->dev_private = NULL;
-
- return r;
-}
-
-void mgag200_driver_unload(struct drm_device *dev)
-{
- struct mga_device *mdev = dev->dev_private;
-
- if (mdev == NULL)
- return;
- mgag200_modeset_fini(mdev);
- drm_mode_config_cleanup(dev);
- mgag200_cursor_fini(mdev);
- mgag200_mm_fini(mdev);
- dev->dev_private = NULL;
-}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
new file mode 100644
index 000000000000..7b69392bcb89
--- /dev/null
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors: Dave Airlie <airlied@redhat.com>
+ */
+
+#include <linux/pci.h>
+
+#include <drm/drm_managed.h>
+
+#include "mgag200_drv.h"
+
+static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
+ size_t size)
+{
+ int offset;
+ int orig;
+ int test1, test2;
+ int orig1, orig2;
+ size_t vram_size;
+
+ /* Probe */
+ orig = ioread16(mem);
+ iowrite16(0, mem);
+
+ vram_size = size;
+
+ if ((mdev->type == G200_EW3) && (vram_size >= 0x1000000))
+ vram_size = vram_size - 0x400000;
+
+ for (offset = 0x100000; offset < vram_size; offset += 0x4000) {
+ orig1 = ioread8(mem + offset);
+ orig2 = ioread8(mem + offset + 0x100);
+
+ iowrite16(0xaa55, mem + offset);
+ iowrite16(0xaa55, mem + offset + 0x100);
+
+ test1 = ioread16(mem + offset);
+ test2 = ioread16(mem);
+
+ iowrite16(orig1, mem + offset);
+ iowrite16(orig2, mem + offset + 0x100);
+
+ if (test1 != 0xaa55)
+ break;
+
+ if (test2)
+ break;
+ }
+
+ iowrite16(orig, mem);
+
+ return offset - 65536;
+}
+
+static void mgag200_mm_release(struct drm_device *dev, void *ptr)
+{
+ struct mga_device *mdev = to_mga_device(dev);
+
+ mdev->vram_fb_available = 0;
+ iounmap(mdev->vram);
+ arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+ arch_phys_wc_del(mdev->fb_mtrr);
+ mdev->fb_mtrr = 0;
+}
+
+int mgag200_mm_init(struct mga_device *mdev)
+{
+ struct drm_device *dev = &mdev->base;
+ resource_size_t start, len;
+ int ret;
+
+ /* BAR 0 is VRAM */
+ start = pci_resource_start(dev->pdev, 0);
+ len = pci_resource_len(dev->pdev, 0);
+
+ if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) {
+ drm_err(dev, "can't reserve VRAM\n");
+ return -ENXIO;
+ }
+
+ arch_io_reserve_memtype_wc(start, len);
+
+ mdev->fb_mtrr = arch_phys_wc_add(start, len);
+
+ mdev->vram = ioremap(start, len);
+ if (!mdev->vram) {
+ ret = -ENOMEM;
+ goto err_arch_phys_wc_del;
+ }
+
+ mdev->mc.vram_size = mgag200_probe_vram(mdev, mdev->vram, len);
+ mdev->mc.vram_base = start;
+ mdev->mc.vram_window = len;
+
+ mdev->vram_fb_available = mdev->mc.vram_size;
+
+ return drmm_add_action_or_reset(dev, mgag200_mm_release, NULL);
+
+err_arch_phys_wc_del:
+ arch_phys_wc_del(mdev->fb_mtrr);
+ arch_io_free_memtype_wc(start, len);
+ return ret;
+}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d90e83959fca..f16bd278ab7e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -11,9 +11,15 @@
#include <linux/delay.h>
#include <linux/pci.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -28,14 +34,19 @@
static void mga_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
- struct drm_framebuffer *fb = crtc->primary->fb;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_framebuffer *fb;
u16 *r_ptr, *g_ptr, *b_ptr;
int i;
if (!crtc->enabled)
return;
+ if (!mdev->display_pipe.plane.state)
+ return;
+
+ fb = mdev->display_pipe.plane.state->fb;
+
r_ptr = crtc->gamma_store;
g_ptr = r_ptr + crtc->gamma_size;
b_ptr = g_ptr + crtc->gamma_size;
@@ -703,6 +714,8 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
{
+ u8 misc;
+
switch(mdev->type) {
case G200_SE_A:
case G200_SE_B:
@@ -723,12 +736,18 @@ static int mga_crtc_set_plls(struct mga_device *mdev, long clock)
return mga_g200er_set_plls(mdev, clock);
break;
}
+
+ misc = RREG8(MGA_MISC_IN);
+ misc &= ~MGAREG_MISC_CLK_SEL_MASK;
+ misc |= MGAREG_MISC_CLK_SEL_MGA_MSK;
+ WREG8(MGA_MISC_OUT, misc);
+
return 0;
}
static void mga_g200wb_prepare(struct drm_crtc *crtc)
{
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
u8 tmp;
int iter_max;
@@ -783,7 +802,7 @@ static void mga_g200wb_prepare(struct drm_crtc *crtc)
static void mga_g200wb_commit(struct drm_crtc *crtc)
{
u8 tmp;
- struct mga_device *mdev = crtc->dev->dev_private;
+ struct mga_device *mdev = to_mga_device(crtc->dev);
/* 1- The first step is to ensure that the vrsten and hrsten are set */
WREG8(MGAREG_CRTCEXT_INDEX, 1);
@@ -818,102 +837,91 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
}
/*
- This is how the framebuffer base address is stored in g200 cards:
- * Assume @offset is the gpu_addr variable of the framebuffer object
- * Then addr is the number of _pixels_ (not bytes) from the start of
- VRAM to the first pixel we want to display. (divided by 2 for 32bit
- framebuffers)
- * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
- addr<20> -> CRTCEXT0<6>
- addr<19-16> -> CRTCEXT0<3-0>
- addr<15-8> -> CRTCC<7-0>
- addr<7-0> -> CRTCD<7-0>
- CRTCEXT0 has to be programmed last to trigger an update and make the
- new addr variable take effect.
+ * This is how the framebuffer base address is stored in g200 cards:
+ * * Assume @offset is the gpu_addr variable of the framebuffer object
+ * * Then addr is the number of _pixels_ (not bytes) from the start of
+ * VRAM to the first pixel we want to display. (divided by 2 for 32bit
+ * framebuffers)
+ * * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
+ * addr<20> -> CRTCEXT0<6>
+ * addr<19-16> -> CRTCEXT0<3-0>
+ * addr<15-8> -> CRTCC<7-0>
+ * addr<7-0> -> CRTCD<7-0>
+ *
+ * CRTCEXT0 has to be programmed last to trigger an update and make the
+ * new addr variable take effect.
*/
-static void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
+static void mgag200_set_startadd(struct mga_device *mdev,
+ unsigned long offset)
{
- struct mga_device *mdev = crtc->dev->dev_private;
- u32 addr;
- int count;
- u8 crtcext0;
-
- while (RREG8(0x1fda) & 0x08);
- while (!(RREG8(0x1fda) & 0x08));
-
- count = RREG8(MGAREG_VCOUNT) + 2;
- while (RREG8(MGAREG_VCOUNT) < count);
-
- WREG8(MGAREG_CRTCEXT_INDEX, 0);
- crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
- crtcext0 &= 0xB0;
- addr = offset / 8;
- /* Can't store addresses any higher than that...
- but we also don't have more than 16MB of memory, so it should be fine. */
- WARN_ON(addr > 0x1fffff);
- crtcext0 |= (!!(addr & (1<<20)))<<6;
- WREG_CRT(0x0d, (u8)(addr & 0xff));
- WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
- WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
-}
+ struct drm_device *dev = &mdev->base;
+ u32 startadd;
+ u8 crtcc, crtcd, crtcext0;
-static int mga_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct drm_gem_vram_object *gbo;
- int ret;
- s64 gpu_addr;
-
- if (!atomic && fb) {
- gbo = drm_gem_vram_of_gem(fb->obj[0]);
- drm_gem_vram_unpin(gbo);
- }
+ startadd = offset / 8;
- gbo = drm_gem_vram_of_gem(crtc->primary->fb->obj[0]);
-
- ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM);
- if (ret)
- return ret;
- gpu_addr = drm_gem_vram_offset(gbo);
- if (gpu_addr < 0) {
- ret = (int)gpu_addr;
- goto err_drm_gem_vram_unpin;
- }
+ /*
+ * Can't store addresses any higher than that, but we also
+ * don't have more than 16 MiB of memory, so it should be fine.
+ */
+ drm_WARN_ON(dev, startadd > 0x1fffff);
- mga_set_start_address(crtc, (u32)gpu_addr);
+ RREG_ECRT(0x00, crtcext0);
- return 0;
+ crtcc = (startadd >> 8) & 0xff;
+ crtcd = startadd & 0xff;
+ crtcext0 &= 0xb0;
+ crtcext0 |= ((startadd >> 14) & BIT(6)) |
+ ((startadd >> 16) & 0x0f);
-err_drm_gem_vram_unpin:
- drm_gem_vram_unpin(gbo);
- return ret;
+ WREG_CRT(0x0c, crtcc);
+ WREG_CRT(0x0d, crtcd);
+ WREG_ECRT(0x00, crtcext0);
}
-static int mga_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void mgag200_set_pci_regs(struct mga_device *mdev)
{
- return mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+ uint32_t option = 0, option2 = 0;
+ struct drm_device *dev = &mdev->base;
+
+ switch (mdev->type) {
+ case G200_SE_A:
+ case G200_SE_B:
+ if (mdev->has_sdram)
+ option = 0x40049120;
+ else
+ option = 0x4004d120;
+ option2 = 0x00008000;
+ break;
+ case G200_WB:
+ case G200_EW3:
+ option = 0x41049120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EV:
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_EH:
+ case G200_EH3:
+ option = 0x00000120;
+ option2 = 0x0000b000;
+ break;
+ case G200_ER:
+ break;
+ }
+
+ if (option)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
+
+ if (option2)
+ pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
}
-static int mga_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
+static void mgag200_set_dac_regs(struct mga_device *mdev)
{
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
- int hdisplay, hsyncstart, hsyncend, htotal;
- int vdisplay, vsyncstart, vsyncend, vtotal;
- int pitch;
- int option = 0, option2 = 0;
- int i;
- unsigned char misc = 0;
- unsigned char ext_vga[6];
- u8 bppshift;
-
- static unsigned char dacvalue[] = {
+ size_t i;
+ u8 dacvalue[] = {
/* 0x00: */ 0, 0, 0, 0, 0, 0, 0x00, 0,
/* 0x08: */ 0, 0, 0, 0, 0, 0, 0, 0,
/* 0x10: */ 0, 0, 0, 0, 0, 0, 0, 0,
@@ -926,8 +934,6 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
/* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0
};
- bppshift = mdev->bpp_shifts[fb->format->cpp[0] - 1];
-
switch (mdev->type) {
case G200_SE_A:
case G200_SE_B:
@@ -936,61 +942,26 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_DAC_EN |
MGA1064_MISC_CTL_VGA8 |
MGA1064_MISC_CTL_DAC_RAM_CS;
- if (mdev->has_sdram)
- option = 0x40049120;
- else
- option = 0x4004d120;
- option2 = 0x00008000;
break;
case G200_WB:
case G200_EW3:
dacvalue[MGA1064_VREF_CTL] = 0x07;
- option = 0x41049120;
- option2 = 0x0000b000;
break;
case G200_EV:
dacvalue[MGA1064_PIX_CLK_CTL] = MGA1064_PIX_CLK_CTL_SEL_PLL;
dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
MGA1064_MISC_CTL_DAC_RAM_CS;
- option = 0x00000120;
- option2 = 0x0000b000;
break;
case G200_EH:
case G200_EH3:
dacvalue[MGA1064_MISC_CTL] = MGA1064_MISC_CTL_VGA8 |
MGA1064_MISC_CTL_DAC_RAM_CS;
- option = 0x00000120;
- option2 = 0x0000b000;
break;
case G200_ER:
break;
}
- switch (fb->format->cpp[0] * 8) {
- case 8:
- dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits;
- break;
- case 16:
- if (fb->format->depth == 15)
- dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits;
- else
- dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits;
- break;
- case 24:
- dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_24bits;
- break;
- case 32:
- dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_32_24bits;
- break;
- }
-
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- misc |= 0x40;
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- misc |= 0x80;
-
-
- for (i = 0; i < sizeof(dacvalue); i++) {
+ for (i = 0; i < ARRAY_SIZE(dacvalue); i++) {
if ((i <= 0x17) ||
(i == 0x1b) ||
(i == 0x1c) ||
@@ -1013,21 +984,53 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (mdev->type == G200_ER)
WREG_DAC(0x90, 0);
+}
- if (option)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
- if (option2)
- pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2);
+static void mgag200_init_regs(struct mga_device *mdev)
+{
+ u8 crtcext3, crtcext4, misc;
- WREG_SEQ(2, 0xf);
- WREG_SEQ(3, 0);
- WREG_SEQ(4, 0xe);
+ mgag200_set_pci_regs(mdev);
+ mgag200_set_dac_regs(mdev);
- pitch = fb->pitches[0] / fb->format->cpp[0];
- if (fb->format->cpp[0] * 8 == 24)
- pitch = (pitch * 3) >> (4 - bppshift);
- else
- pitch = pitch >> (4 - bppshift);
+ WREG_SEQ(2, 0x0f);
+ WREG_SEQ(3, 0x00);
+ WREG_SEQ(4, 0x0e);
+
+ WREG_CRT(10, 0);
+ WREG_CRT(11, 0);
+ WREG_CRT(12, 0);
+ WREG_CRT(13, 0);
+ WREG_CRT(14, 0);
+ WREG_CRT(15, 0);
+
+ RREG_ECRT(0x03, crtcext3);
+
+ crtcext3 |= BIT(7); /* enable MGA mode */
+ crtcext4 = 0x00;
+
+ WREG_ECRT(0x03, crtcext3);
+ WREG_ECRT(0x04, crtcext4);
+
+ if (mdev->type == G200_ER)
+ WREG_ECRT(0x24, 0x5);
+
+ if (mdev->type == G200_EW3)
+ WREG_ECRT(0x34, 0x5);
+
+ misc = RREG8(MGA_MISC_IN);
+ misc |= MGAREG_MISC_IOADSEL |
+ MGAREG_MISC_RAMMAPEN |
+ MGAREG_MISC_HIGH_PG_SEL;
+ WREG8(MGA_MISC_OUT, misc);
+}
+
+static void mgag200_set_mode_regs(struct mga_device *mdev,
+ const struct drm_display_mode *mode)
+{
+ unsigned int hdisplay, hsyncstart, hsyncend, htotal;
+ unsigned int vdisplay, vsyncstart, vsyncend, vtotal;
+ u8 misc, crtcext1, crtcext2, crtcext5;
hdisplay = mode->hdisplay / 8 - 1;
hsyncstart = mode->hsync_start / 8 - 1;
@@ -1043,15 +1046,32 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
vsyncend = mode->vsync_end - 1;
vtotal = mode->vtotal - 2;
- WREG_GFX(0, 0);
- WREG_GFX(1, 0);
- WREG_GFX(2, 0);
- WREG_GFX(3, 0);
- WREG_GFX(4, 0);
- WREG_GFX(5, 0x40);
- WREG_GFX(6, 0x5);
- WREG_GFX(7, 0xf);
- WREG_GFX(8, 0xf);
+ misc = RREG8(MGA_MISC_IN);
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ misc |= MGAREG_MISC_HSYNCPOL;
+ else
+ misc &= ~MGAREG_MISC_HSYNCPOL;
+
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ misc |= MGAREG_MISC_VSYNCPOL;
+ else
+ misc &= ~MGAREG_MISC_VSYNCPOL;
+
+ crtcext1 = (((htotal - 4) & 0x100) >> 8) |
+ ((hdisplay & 0x100) >> 7) |
+ ((hsyncstart & 0x100) >> 6) |
+ (htotal & 0x40);
+ if (mdev->type == G200_WB || mdev->type == G200_EW3)
+ crtcext1 |= BIT(7) | /* vrsten */
+ BIT(3); /* hrsten */
+
+ crtcext2 = ((vtotal & 0xc00) >> 10) |
+ ((vdisplay & 0x400) >> 8) |
+ ((vdisplay & 0xc00) >> 7) |
+ ((vsyncstart & 0xc00) >> 5) |
+ ((vdisplay & 0x400) >> 3);
+ crtcext5 = 0x00;
WREG_CRT(0, htotal - 4);
WREG_CRT(1, hdisplay);
@@ -1065,205 +1085,214 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
((vsyncstart & 0x100) >> 6) |
((vdisplay & 0x100) >> 5) |
((vdisplay & 0x100) >> 4) | /* linecomp */
- ((vtotal & 0x200) >> 4)|
+ ((vtotal & 0x200) >> 4) |
((vdisplay & 0x200) >> 3) |
((vsyncstart & 0x200) >> 2));
WREG_CRT(9, ((vdisplay & 0x200) >> 4) |
((vdisplay & 0x200) >> 3));
- WREG_CRT(10, 0);
- WREG_CRT(11, 0);
- WREG_CRT(12, 0);
- WREG_CRT(13, 0);
- WREG_CRT(14, 0);
- WREG_CRT(15, 0);
WREG_CRT(16, vsyncstart & 0xFF);
WREG_CRT(17, (vsyncend & 0x0F) | 0x20);
WREG_CRT(18, vdisplay & 0xFF);
- WREG_CRT(19, pitch & 0xFF);
WREG_CRT(20, 0);
WREG_CRT(21, vdisplay & 0xFF);
WREG_CRT(22, (vtotal + 1) & 0xFF);
WREG_CRT(23, 0xc3);
WREG_CRT(24, vdisplay & 0xFF);
- ext_vga[0] = 0;
- ext_vga[5] = 0;
-
- /* TODO interlace */
-
- ext_vga[0] |= (pitch & 0x300) >> 4;
- ext_vga[1] = (((htotal - 4) & 0x100) >> 8) |
- ((hdisplay & 0x100) >> 7) |
- ((hsyncstart & 0x100) >> 6) |
- (htotal & 0x40);
- ext_vga[2] = ((vtotal & 0xc00) >> 10) |
- ((vdisplay & 0x400) >> 8) |
- ((vdisplay & 0xc00) >> 7) |
- ((vsyncstart & 0xc00) >> 5) |
- ((vdisplay & 0x400) >> 3);
- if (fb->format->cpp[0] * 8 == 24)
- ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80;
- else
- ext_vga[3] = ((1 << bppshift) - 1) | 0x80;
- ext_vga[4] = 0;
- if (mdev->type == G200_WB || mdev->type == G200_EW3)
- ext_vga[1] |= 0x88;
+ WREG_ECRT(0x01, crtcext1);
+ WREG_ECRT(0x02, crtcext2);
+ WREG_ECRT(0x05, crtcext5);
- /* Set pixel clocks */
- misc = 0x2d;
WREG8(MGA_MISC_OUT, misc);
mga_crtc_set_plls(mdev, mode->clock);
+}
- for (i = 0; i < 6; i++) {
- WREG_ECRT(i, ext_vga[i]);
- }
+static u8 mgag200_get_bpp_shift(struct mga_device *mdev,
+ const struct drm_format_info *format)
+{
+ return mdev->bpp_shifts[format->cpp[0] - 1];
+}
- if (mdev->type == G200_ER)
- WREG_ECRT(0x24, 0x5);
+/*
+ * Calculates the HW offset value from the framebuffer's pitch. The
+ * offset is a multiple of the pixel size and depends on the display
+ * format.
+ */
+static u32 mgag200_calculate_offset(struct mga_device *mdev,
+ const struct drm_framebuffer *fb)
+{
+ u32 offset = fb->pitches[0] / fb->format->cpp[0];
+ u8 bppshift = mgag200_get_bpp_shift(mdev, fb->format);
- if (mdev->type == G200_EW3)
- WREG_ECRT(0x34, 0x5);
+ if (fb->format->cpp[0] * 8 == 24)
+ offset = (offset * 3) >> (4 - bppshift);
+ else
+ offset = offset >> (4 - bppshift);
- if (mdev->type == G200_EV) {
- WREG_ECRT(6, 0);
- }
+ return offset;
+}
- WREG_ECRT(0, ext_vga[0]);
- /* Enable mga pixel clock */
- misc = 0x2d;
+static void mgag200_set_offset(struct mga_device *mdev,
+ const struct drm_framebuffer *fb)
+{
+ u8 crtc13, crtcext0;
+ u32 offset = mgag200_calculate_offset(mdev, fb);
- WREG8(MGA_MISC_OUT, misc);
+ RREG_ECRT(0, crtcext0);
+
+ crtc13 = offset & 0xff;
+
+ crtcext0 &= ~MGAREG_CRTCEXT0_OFFSET_MASK;
+ crtcext0 |= (offset >> 4) & MGAREG_CRTCEXT0_OFFSET_MASK;
- if (adjusted_mode)
- memcpy(&mdev->mode, mode, sizeof(struct drm_display_mode));
+ WREG_CRT(0x13, crtc13);
+ WREG_ECRT(0x00, crtcext0);
+}
- mga_crtc_do_set_base(crtc, old_fb, x, y, 0);
+static void mgag200_set_format_regs(struct mga_device *mdev,
+ const struct drm_framebuffer *fb)
+{
+ struct drm_device *dev = &mdev->base;
+ const struct drm_format_info *format = fb->format;
+ unsigned int bpp, bppshift, scale;
+ u8 crtcext3, xmulctrl;
- /* reset tagfifo */
- if (mdev->type == G200_ER) {
- u32 mem_ctl = RREG32(MGAREG_MEMCTL);
- u8 seq1;
+ bpp = format->cpp[0] * 8;
- /* screen off */
- WREG8(MGAREG_SEQ_INDEX, 0x01);
- seq1 = RREG8(MGAREG_SEQ_DATA) | 0x20;
- WREG8(MGAREG_SEQ_DATA, seq1);
+ bppshift = mgag200_get_bpp_shift(mdev, format);
+ switch (bpp) {
+ case 24:
+ scale = ((1 << bppshift) * 3) - 1;
+ break;
+ default:
+ scale = (1 << bppshift) - 1;
+ break;
+ }
- WREG32(MGAREG_MEMCTL, mem_ctl | 0x00200000);
- udelay(1000);
- WREG32(MGAREG_MEMCTL, mem_ctl & ~0x00200000);
+ RREG_ECRT(3, crtcext3);
- WREG8(MGAREG_SEQ_DATA, seq1 & ~0x20);
+ switch (bpp) {
+ case 8:
+ xmulctrl = MGA1064_MUL_CTL_8bits;
+ break;
+ case 16:
+ if (format->depth == 15)
+ xmulctrl = MGA1064_MUL_CTL_15bits;
+ else
+ xmulctrl = MGA1064_MUL_CTL_16bits;
+ break;
+ case 24:
+ xmulctrl = MGA1064_MUL_CTL_24bits;
+ break;
+ case 32:
+ xmulctrl = MGA1064_MUL_CTL_32_24bits;
+ break;
+ default:
+ /* BUG: We should have caught this problem already. */
+ drm_WARN_ON(dev, "invalid format depth\n");
+ return;
}
+ crtcext3 &= ~GENMASK(2, 0);
+ crtcext3 |= scale;
- if (IS_G200_SE(mdev)) {
- if (mdev->unique_rev_id >= 0x04) {
- WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
- WREG8(MGAREG_CRTCEXT_DATA, 0);
- } else if (mdev->unique_rev_id >= 0x02) {
- u8 hi_pri_lvl;
- u32 bpp;
- u32 mb;
-
- if (fb->format->cpp[0] * 8 > 16)
- bpp = 32;
- else if (fb->format->cpp[0] * 8 > 8)
- bpp = 16;
- else
- bpp = 8;
-
- mb = (mode->clock * bpp) / 1000;
- if (mb > 3100)
- hi_pri_lvl = 0;
- else if (mb > 2600)
- hi_pri_lvl = 1;
- else if (mb > 1900)
- hi_pri_lvl = 2;
- else if (mb > 1160)
- hi_pri_lvl = 3;
- else if (mb > 440)
- hi_pri_lvl = 4;
- else
- hi_pri_lvl = 5;
+ WREG_DAC(MGA1064_MUL_CTL, xmulctrl);
- WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
- WREG8(MGAREG_CRTCEXT_DATA, hi_pri_lvl);
- } else {
- WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
- if (mdev->unique_rev_id >= 0x01)
- WREG8(MGAREG_CRTCEXT_DATA, 0x03);
- else
- WREG8(MGAREG_CRTCEXT_DATA, 0x04);
- }
- }
- return 0;
+ WREG_GFX(0, 0x00);
+ WREG_GFX(1, 0x00);
+ WREG_GFX(2, 0x00);
+ WREG_GFX(3, 0x00);
+ WREG_GFX(4, 0x00);
+ WREG_GFX(5, 0x40);
+ WREG_GFX(6, 0x05);
+ WREG_GFX(7, 0x0f);
+ WREG_GFX(8, 0x0f);
+
+ WREG_ECRT(3, crtcext3);
}
-#if 0 /* code from mjg to attempt D3 on crtc dpms off - revisit later */
-static int mga_suspend(struct drm_crtc *crtc)
+static void mgag200_g200er_reset_tagfifo(struct mga_device *mdev)
{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
- int option;
+ static uint32_t RESET_FLAG = 0x00200000; /* undocumented magic value */
+ u8 seq1;
+ u32 memctl;
- if (mdev->suspended)
- return 0;
+ /* screen off */
+ RREG_SEQ(0x01, seq1);
+ seq1 |= MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
- WREG_SEQ(1, 0x20);
- WREG_ECRT(1, 0x30);
- /* Disable the pixel clock */
- WREG_DAC(0x1a, 0x05);
- /* Power down the DAC */
- WREG_DAC(0x1e, 0x18);
- /* Power down the pixel PLL */
- WREG_DAC(0x1a, 0x0d);
+ memctl = RREG32(MGAREG_MEMCTL);
- /* Disable PLLs and clocks */
- pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
- option &= ~(0x1F8024);
- pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
- pci_set_power_state(pdev, PCI_D3hot);
- pci_disable_device(pdev);
+ memctl |= RESET_FLAG;
+ WREG32(MGAREG_MEMCTL, memctl);
- mdev->suspended = true;
+ udelay(1000);
- return 0;
+ memctl &= ~RESET_FLAG;
+ WREG32(MGAREG_MEMCTL, memctl);
+
+ /* screen on */
+ RREG_SEQ(0x01, seq1);
+ seq1 &= ~MGAREG_SEQ1_SCROFF;
+ WREG_SEQ(0x01, seq1);
}
-static int mga_resume(struct drm_crtc *crtc)
+static void mgag200_g200se_set_hiprilvl(struct mga_device *mdev,
+ const struct drm_display_mode *mode,
+ const struct drm_framebuffer *fb)
{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
- struct pci_dev *pdev = dev->pdev;
- int option;
-
- if (!mdev->suspended)
- return 0;
-
- pci_set_power_state(pdev, PCI_D0);
- pci_enable_device(pdev);
+ unsigned int hiprilvl;
+ u8 crtcext6;
+
+ if (mdev->unique_rev_id >= 0x04) {
+ hiprilvl = 0;
+ } else if (mdev->unique_rev_id >= 0x02) {
+ unsigned int bpp;
+ unsigned long mb;
+
+ if (fb->format->cpp[0] * 8 > 16)
+ bpp = 32;
+ else if (fb->format->cpp[0] * 8 > 8)
+ bpp = 16;
+ else
+ bpp = 8;
+
+ mb = (mode->clock * bpp) / 1000;
+ if (mb > 3100)
+ hiprilvl = 0;
+ else if (mb > 2600)
+ hiprilvl = 1;
+ else if (mb > 1900)
+ hiprilvl = 2;
+ else if (mb > 1160)
+ hiprilvl = 3;
+ else if (mb > 440)
+ hiprilvl = 4;
+ else
+ hiprilvl = 5;
- /* Disable sysclk */
- pci_read_config_dword(pdev, PCI_MGA_OPTION, &option);
- option &= ~(0x4);
- pci_write_config_dword(pdev, PCI_MGA_OPTION, option);
+ } else if (mdev->unique_rev_id >= 0x01) {
+ hiprilvl = 3;
+ } else {
+ hiprilvl = 4;
+ }
- mdev->suspended = false;
+ crtcext6 = hiprilvl; /* implicitly sets maxhipri to 0 */
- return 0;
+ WREG_ECRT(0x06, crtcext6);
}
-#endif
+static void mgag200_g200ev_set_hiprilvl(struct mga_device *mdev)
+{
+ WREG_ECRT(0x06, 0x00);
+}
static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 seq1 = 0, crtcext1 = 0;
switch (mode) {
@@ -1286,11 +1315,6 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
break;
}
-#if 0
- if (mode == DRM_MODE_DPMS_OFF) {
- mga_suspend(crtc);
- }
-#endif
WREG8(MGAREG_SEQ_INDEX, 0x01);
seq1 |= RREG8(MGAREG_SEQ_DATA) & ~0x20;
mga_wait_vsync(mdev);
@@ -1300,13 +1324,6 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
WREG8(MGAREG_CRTCEXT_INDEX, 0x01);
crtcext1 |= RREG8(MGAREG_CRTCEXT_DATA) & ~0x30;
WREG8(MGAREG_CRTCEXT_DATA, crtcext1);
-
-#if 0
- if (mode == DRM_MODE_DPMS_ON && mdev->suspended == true) {
- mga_resume(crtc);
- drm_helper_resume_force_mode(dev);
- }
-#endif
}
/*
@@ -1317,7 +1334,7 @@ static void mga_crtc_dpms(struct drm_crtc *crtc, int mode)
static void mga_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 tmp;
/* mga_resume(crtc);*/
@@ -1353,8 +1370,7 @@ static void mga_crtc_prepare(struct drm_crtc *crtc)
static void mga_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct mga_device *mdev = dev->dev_private;
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ struct mga_device *mdev = to_mga_device(dev);
u8 tmp;
if (mdev->type == G200_WB || mdev->type == G200_EW3)
@@ -1373,82 +1389,12 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
WREG_SEQ(0x1, tmp);
WREG_SEQ(0, 3);
}
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
+ * Connector
*/
-static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- mga_crtc_load_lut(crtc);
-
- return 0;
-}
-
-/* Simple cleanup function */
-static void mga_crtc_destroy(struct drm_crtc *crtc)
-{
- struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(mga_crtc);
-}
-
-static void mga_crtc_disable(struct drm_crtc *crtc)
-{
- DRM_DEBUG_KMS("\n");
- mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
- if (crtc->primary->fb) {
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct drm_gem_vram_object *gbo =
- drm_gem_vram_of_gem(fb->obj[0]);
- drm_gem_vram_unpin(gbo);
- }
- crtc->primary->fb = NULL;
-}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs mga_crtc_funcs = {
- .cursor_set = mgag200_crtc_cursor_set,
- .cursor_move = mgag200_crtc_cursor_move,
- .gamma_set = mga_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = mga_crtc_destroy,
-};
-
-static const struct drm_crtc_helper_funcs mga_helper_funcs = {
- .disable = mga_crtc_disable,
- .dpms = mga_crtc_dpms,
- .mode_set = mga_crtc_mode_set,
- .mode_set_base = mga_crtc_mode_set_base,
- .prepare = mga_crtc_prepare,
- .commit = mga_crtc_commit,
-};
-
-/* CRTC setup */
-static void mga_crtc_init(struct mga_device *mdev)
-{
- struct mga_crtc *mga_crtc;
-
- mga_crtc = kzalloc(sizeof(struct mga_crtc) +
- (MGAG200FB_CONN_LIMIT * sizeof(struct drm_connector *)),
- GFP_KERNEL);
-
- if (mga_crtc == NULL)
- return;
-
- drm_crtc_init(mdev->dev, &mga_crtc->base, &mga_crtc_funcs);
-
- drm_mode_crtc_set_gamma_size(&mga_crtc->base, MGAG200_LUT_SIZE);
- mdev->mode_info.crtc = mga_crtc;
-
- drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
-}
static int mga_vga_get_modes(struct drm_connector *connector)
{
@@ -1495,7 +1441,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct mga_device *mdev = (struct mga_device*)dev->dev_private;
+ struct mga_device *mdev = to_mga_device(dev);
int bpp = 32;
if (IS_G200_SE(mdev)) {
@@ -1574,84 +1520,258 @@ static void mga_connector_destroy(struct drm_connector *connector)
struct mga_connector *mga_connector = to_mga_connector(connector);
mgag200_i2c_destroy(mga_connector->i2c);
drm_connector_cleanup(connector);
- kfree(connector);
}
static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = {
- .get_modes = mga_vga_get_modes,
+ .get_modes = mga_vga_get_modes,
.mode_valid = mga_vga_mode_valid,
};
static const struct drm_connector_funcs mga_vga_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mga_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = mga_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static struct drm_connector *mga_vga_init(struct drm_device *dev)
+static int mgag200_vga_connector_init(struct mga_device *mdev)
{
- struct drm_connector *connector;
- struct mga_connector *mga_connector;
+ struct drm_device *dev = &mdev->base;
+ struct mga_connector *mconnector = &mdev->connector;
+ struct drm_connector *connector = &mconnector->base;
+ struct mga_i2c_chan *i2c;
+ int ret;
- mga_connector = kzalloc(sizeof(struct mga_connector), GFP_KERNEL);
- if (!mga_connector)
- return NULL;
+ i2c = mgag200_i2c_create(dev);
+ if (!i2c)
+ drm_warn(dev, "failed to add DDC bus\n");
- connector = &mga_connector->base;
- mga_connector->i2c = mgag200_i2c_create(dev);
- if (!mga_connector->i2c)
- DRM_ERROR("failed to add ddc bus\n");
+ ret = drm_connector_init_with_ddc(dev, connector,
+ &mga_vga_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA,
+ &i2c->adapter);
+ if (ret)
+ goto err_mgag200_i2c_destroy;
+ drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
- drm_connector_init_with_ddc(dev, connector,
- &mga_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &mga_connector->i2c->adapter);
+ mconnector->i2c = i2c;
- drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+ return 0;
+
+err_mgag200_i2c_destroy:
+ mgag200_i2c_destroy(i2c);
+ return ret;
+}
+
+/*
+ * Simple Display Pipe
+ */
+
+static enum drm_mode_status
+mgag200_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static void
+mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ struct drm_device *dev = &mdev->base;
+ void *vmap;
+
+ vmap = drm_gem_shmem_vmap(fb->obj[0]);
+ if (drm_WARN_ON(dev, !vmap))
+ return; /* BUG: SHMEM BO should always be vmapped */
+
+ drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip);
+
+ drm_gem_shmem_vunmap(fb->obj[0], vmap);
+
+ /* Always scanout image at VRAM offset 0 */
+ mgag200_set_startadd(mdev, (u32)0);
+ mgag200_set_offset(mdev, fb);
+}
+
+static void
+mgag200_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+
+ mga_crtc_prepare(crtc);
+
+ mgag200_set_format_regs(mdev, fb);
+ mgag200_set_mode_regs(mdev, adjusted_mode);
- drm_connector_register(connector);
+ if (mdev->type == G200_ER)
+ mgag200_g200er_reset_tagfifo(mdev);
+
+ if (IS_G200_SE(mdev))
+ mgag200_g200se_set_hiprilvl(mdev, adjusted_mode, fb);
+ else if (mdev->type == G200_EV)
+ mgag200_g200ev_set_hiprilvl(mdev);
+
+ mga_crtc_commit(crtc);
- return connector;
+ mgag200_handle_damage(mdev, fb, &fullscreen);
}
+static void
+mgag200_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static int
+mgag200_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_plane *plane = plane_state->plane;
+ struct drm_framebuffer *new_fb = plane_state->fb;
+ struct drm_framebuffer *fb = NULL;
+
+ if (!new_fb)
+ return 0;
+
+ if (plane->state)
+ fb = plane->state->fb;
+
+ if (!fb || (fb->format != new_fb->format))
+ crtc_state->mode_changed = true; /* update PLL settings */
+
+ return 0;
+}
+
+static void
+mgag200_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_device *dev = plane->dev;
+ struct mga_device *mdev = to_mga_device(dev);
+ struct drm_plane_state *state = plane->state;
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_rect damage;
+
+ if (!fb)
+ return;
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &damage))
+ mgag200_handle_damage(mdev, fb, &damage);
+}
+
+static const struct drm_simple_display_pipe_funcs
+mgag200_simple_display_pipe_funcs = {
+ .mode_valid = mgag200_simple_display_pipe_mode_valid,
+ .enable = mgag200_simple_display_pipe_enable,
+ .disable = mgag200_simple_display_pipe_disable,
+ .check = mgag200_simple_display_pipe_check,
+ .update = mgag200_simple_display_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const uint32_t mgag200_simple_display_pipe_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+};
+
+static const uint64_t mgag200_simple_display_pipe_fmtmods[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/*
+ * Mode config
+ */
+
+static const struct drm_mode_config_funcs mgag200_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static unsigned int mgag200_preferred_depth(struct mga_device *mdev)
+{
+ if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024))
+ return 16;
+ else
+ return 32;
+}
int mgag200_modeset_init(struct mga_device *mdev)
{
- struct drm_encoder *encoder = &mdev->encoder;
- struct drm_connector *connector;
+ struct drm_device *dev = &mdev->base;
+ struct drm_connector *connector = &mdev->connector.base;
+ struct drm_simple_display_pipe *pipe = &mdev->display_pipe;
+ size_t format_count = ARRAY_SIZE(mgag200_simple_display_pipe_formats);
int ret;
- mdev->mode_info.mode_config_initialized = true;
+ mdev->bpp_shifts[0] = 0;
+ mdev->bpp_shifts[1] = 1;
+ mdev->bpp_shifts[2] = 0;
+ mdev->bpp_shifts[3] = 2;
+
+ mgag200_init_regs(mdev);
+
+ ret = drmm_mode_config_init(dev);
+ if (ret) {
+ drm_err(dev, "drmm_mode_config_init() failed, error %d\n",
+ ret);
+ return ret;
+ }
+
+ dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
+ dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
- mdev->dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
- mdev->dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
+ dev->mode_config.preferred_depth = mgag200_preferred_depth(mdev);
- mdev->dev->mode_config.fb_base = mdev->mc.vram_base;
+ dev->mode_config.fb_base = mdev->mc.vram_base;
- mga_crtc_init(mdev);
+ dev->mode_config.funcs = &mgag200_mode_config_funcs;
- ret = drm_simple_encoder_init(mdev->dev, encoder,
- DRM_MODE_ENCODER_DAC);
+ ret = mgag200_vga_connector_init(mdev);
if (ret) {
- drm_err(mdev->dev,
- "drm_simple_encoder_init() failed, error %d\n",
+ drm_err(dev,
+ "mgag200_vga_connector_init() failed, error %d\n",
ret);
return ret;
}
- encoder->possible_crtcs = 0x1;
- connector = mga_vga_init(mdev->dev);
- if (!connector) {
- DRM_ERROR("mga_vga_init failed\n");
- return -1;
+ ret = drm_simple_display_pipe_init(dev, pipe,
+ &mgag200_simple_display_pipe_funcs,
+ mgag200_simple_display_pipe_formats,
+ format_count,
+ mgag200_simple_display_pipe_fmtmods,
+ connector);
+ if (ret) {
+ drm_err(dev,
+ "drm_simple_display_pipe_init() failed, error %d\n",
+ ret);
+ return ret;
}
- drm_connector_attach_encoder(connector, encoder);
+ /* FIXME: legacy gamma tables; convert to CRTC state */
+ drm_mode_crtc_set_gamma_size(&pipe->crtc, MGAG200_LUT_SIZE);
- return 0;
-}
-
-void mgag200_modeset_fini(struct mga_device *mdev)
-{
+ drm_mode_config_reset(dev);
+ return 0;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_reg.h b/drivers/gpu/drm/mgag200/mgag200_reg.h
index c096a9d6bcbc..29f7194faadc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_reg.h
+++ b/drivers/gpu/drm/mgag200/mgag200_reg.h
@@ -16,10 +16,11 @@
* MGA1064SG Mystique register file
*/
-
#ifndef _MGA_REG_H_
#define _MGA_REG_H_
+#include <linux/bits.h>
+
#define MGAREG_DWGCTL 0x1c00
#define MGAREG_MACCESS 0x1c04
/* the following is a mystique only register */
@@ -221,21 +222,29 @@
#define MGAREG_MISC_IOADSEL (0x1 << 0)
#define MGAREG_MISC_RAMMAPEN (0x1 << 1)
+#define MGAREG_MISC_CLK_SEL_MASK GENMASK(3, 2)
#define MGAREG_MISC_CLK_SEL_VGA25 (0x0 << 2)
#define MGAREG_MISC_CLK_SEL_VGA28 (0x1 << 2)
#define MGAREG_MISC_CLK_SEL_MGA_PIX (0x2 << 2)
#define MGAREG_MISC_CLK_SEL_MGA_MSK (0x3 << 2)
#define MGAREG_MISC_VIDEO_DIS (0x1 << 4)
#define MGAREG_MISC_HIGH_PG_SEL (0x1 << 5)
+#define MGAREG_MISC_HSYNCPOL BIT(6)
+#define MGAREG_MISC_VSYNCPOL BIT(7)
/* MMIO VGA registers */
#define MGAREG_SEQ_INDEX 0x1fc4
#define MGAREG_SEQ_DATA 0x1fc5
+
+#define MGAREG_SEQ1_SCROFF BIT(5)
+
#define MGAREG_CRTC_INDEX 0x1fd4
#define MGAREG_CRTC_DATA 0x1fd5
#define MGAREG_CRTCEXT_INDEX 0x1fde
#define MGAREG_CRTCEXT_DATA 0x1fdf
+#define MGAREG_CRTCEXT0_OFFSET_MASK GENMASK(5, 4)
+
/* Cursor X and Y position */
#define MGA_CURPOSXL 0x3c0c
#define MGA_CURPOSXH 0x3c0d
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
deleted file mode 100644
index e89657630ea7..000000000000
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- */
-/*
- * Authors: Dave Airlie <airlied@redhat.com>
- */
-
-#include <linux/pci.h>
-
-#include "mgag200_drv.h"
-
-int mgag200_mm_init(struct mga_device *mdev)
-{
- struct drm_vram_mm *vmm;
- int ret;
- struct drm_device *dev = mdev->dev;
-
- vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0),
- mdev->mc.vram_size);
- if (IS_ERR(vmm)) {
- ret = PTR_ERR(vmm);
- DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
- return ret;
- }
-
- arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
-
- mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
-
- mdev->vram_fb_available = mdev->mc.vram_size;
-
- return 0;
-}
-
-void mgag200_mm_fini(struct mga_device *mdev)
-{
- struct drm_device *dev = mdev->dev;
-
- mdev->vram_fb_available = 0;
-
- drm_vram_helper_release_mm(dev);
-
- arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0));
- arch_phys_wc_del(mdev->fb_mtrr);
- mdev->fb_mtrr = 0;
-}
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1579cf0d828f..42f8aae28b31 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -65,6 +65,7 @@ msm-y := \
disp/dpu1/dpu_hw_lm.o \
disp/dpu1/dpu_hw_pingpong.o \
disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_dspp.o \
disp/dpu1/dpu_hw_top.o \
disp/dpu1/dpu_hw_util.o \
disp/dpu1/dpu_hw_vbif.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 1f83bc18d500..60f6472a3e58 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -401,6 +401,21 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu)
return state;
}
+static struct msm_gem_address_space *
+a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct msm_mmu *mmu = msm_gpummu_new(&pdev->dev, gpu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ SZ_16M + 0xfff * SZ_64K);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
/* Register offset defines for A2XX - copy of A3XX */
static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -429,6 +444,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a2xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = a2xx_create_address_space,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b67f88872726..0a5ea9f56cb8 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -441,6 +441,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a3xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 253d8d85daad..b9b26b2bf9c5 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -66,19 +66,22 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
}
}
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
- 0x00000922);
- }
+ /* No CCU for A405 */
+ if (!adreno_is_a405(adreno_gpu)) {
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
+ 0x00000922);
+ }
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
- 0x00000000);
- }
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
+ 0x00000000);
+ }
- for (i = 0; i < 4; i++) {
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
- 0x00000001);
+ for (i = 0; i < 4; i++) {
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
+ 0x00000001);
+ }
}
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
@@ -137,7 +140,9 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a420(adreno_gpu)) {
+ if (adreno_is_a405(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -440,6 +445,52 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
+static const unsigned int a405_registers[] = {
+ /* RBBM */
+ 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
+ 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
+ 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
+ /* CP */
+ 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
+ 0x0578, 0x058F,
+ /* VSC */
+ 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
+ /* GRAS */
+ 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
+ /* RB */
+ 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
+ /* PC */
+ 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
+ /* VFD */
+ 0x0E40, 0x0E4A,
+ /* VPC */
+ 0x0E60, 0x0E61, 0x0E63, 0x0E68,
+ /* UCHE */
+ 0x0E80, 0x0E84, 0x0E88, 0x0E95,
+ /* GRAS CTX 0 */
+ 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
+ /* PC CTX 0 */
+ 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
+ /* VFD CTX 0 */
+ 0x2200, 0x2204, 0x2208, 0x22A9,
+ /* GRAS CTX 1 */
+ 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
+ /* PC CTX 1 */
+ 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
+ /* VFD CTX 1 */
+ 0x2600, 0x2604, 0x2608, 0x26A9,
+ /* VBIF version 0x20050000*/
+ 0x3000, 0x3007, 0x302C, 0x302C, 0x3030, 0x3030, 0x3034, 0x3036,
+ 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040, 0x3049, 0x3049,
+ 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068, 0x306C, 0x306D,
+ 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094, 0x3098, 0x3098,
+ 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8, 0x30D0, 0x30D0,
+ 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100, 0x3108, 0x3108,
+ 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120, 0x3124, 0x3125,
+ 0x3129, 0x3129, 0x340C, 0x340C, 0x3410, 0x3410,
+ ~0 /* sentinel */
+};
+
static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
@@ -532,6 +583,7 @@ static const struct adreno_gpu_funcs funcs = {
#endif
.gpu_state_get = a4xx_gpu_state_get,
.gpu_state_put = adreno_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a4xx_get_timestamp,
};
@@ -563,13 +615,14 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
gpu->perfcntrs = NULL;
gpu->num_perfcntrs = 0;
- adreno_gpu->registers = a4xx_registers;
- adreno_gpu->reg_offsets = a4xx_register_offsets;
-
ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
+ adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
+ a4xx_registers;
+ adreno_gpu->reg_offsets = a4xx_register_offsets;
+
/* if needed, allocate gmem: */
if (adreno_is_a4xx(adreno_gpu)) {
ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index 075ecce4b5e0..68eddac7771c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -124,13 +124,13 @@ reset_set(void *data, u64 val)
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_put(a5xx_gpu->pm4_bo);
+ drm_gem_object_put_locked(a5xx_gpu->pm4_bo);
a5xx_gpu->pm4_bo = NULL;
}
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_put(a5xx_gpu->pfp_bo);
+ drm_gem_object_put_locked(a5xx_gpu->pfp_bo);
a5xx_gpu->pfp_bo = NULL;
}
@@ -148,27 +148,19 @@ reset_set(void *data, u64 val)
DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
{
struct drm_device *dev;
- int ret;
if (!minor)
- return 0;
+ return;
dev = minor->dev;
- ret = drm_debugfs_create_files(a5xx_debugfs_list,
- ARRAY_SIZE(a5xx_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install a5xx_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(a5xx_debugfs_list,
+ ARRAY_SIZE(a5xx_debugfs_list),
+ minor->debugfs_root, minor);
debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
&reset_fops);
-
- return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 724024a2243a..0e1933ea12f2 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -804,17 +804,17 @@ static void a5xx_destroy(struct msm_gpu *gpu)
if (a5xx_gpu->pm4_bo) {
msm_gem_unpin_iova(a5xx_gpu->pm4_bo, gpu->aspace);
- drm_gem_object_put_unlocked(a5xx_gpu->pm4_bo);
+ drm_gem_object_put(a5xx_gpu->pm4_bo);
}
if (a5xx_gpu->pfp_bo) {
msm_gem_unpin_iova(a5xx_gpu->pfp_bo, gpu->aspace);
- drm_gem_object_put_unlocked(a5xx_gpu->pfp_bo);
+ drm_gem_object_put(a5xx_gpu->pfp_bo);
}
if (a5xx_gpu->gpmu_bo) {
msm_gem_unpin_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
- drm_gem_object_put_unlocked(a5xx_gpu->gpmu_bo);
+ drm_gem_object_put(a5xx_gpu->gpmu_bo);
}
adreno_gpu_cleanup(adreno_gpu);
@@ -1404,6 +1404,10 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
{
u64 busy_cycles, busy_time;
+ /* Only read the gpu busy if the hardware is already active */
+ if (pm_runtime_get_if_in_use(&gpu->pdev->dev) == 0)
+ return 0;
+
busy_cycles = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
@@ -1412,6 +1416,8 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
gpu->devfreq.busy_cycles = busy_cycles;
+ pm_runtime_put(&gpu->pdev->dev);
+
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
@@ -1439,6 +1445,7 @@ static const struct adreno_gpu_funcs funcs = {
.gpu_busy = a5xx_gpu_busy,
.gpu_state_get = a5xx_gpu_state_get,
.gpu_state_put = a5xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index 833468ce6b6d..54868d4e3958 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -41,7 +41,7 @@ struct a5xx_gpu {
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
#ifdef CONFIG_DEBUG_FS
-int a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
+void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
/*
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
index ed78fee2a262..47840b73cdda 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -1047,6 +1047,8 @@ enum a6xx_tex_type {
#define REG_A6XX_CP_MISC_CNTL 0x00000840
+#define REG_A6XX_CP_APRIV_CNTL 0x00000844
+
#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
@@ -1764,6 +1766,8 @@ static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+#define REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL 0x00000011
+
#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
@@ -2418,6 +2422,16 @@ static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0 0x0000b608
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1 0x0000b609
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2 0x0000b60a
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3 0x0000b60b
+
+#define REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4 0x0000b60c
+
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index c4e71abbdd53..096be97ce9f9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -2,14 +2,16 @@
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
-#include <linux/dma-mapping.h>
#include <linux/interconnect.h>
#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
+#include <drm/drm_gem.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
{
@@ -127,8 +129,6 @@ static void __a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
if (ret)
dev_err(gmu->dev, "GMU set GPU frequency error: %d\n", ret);
- gmu->freq = gmu->gpu_freqs[index];
-
/*
* Eventually we will want to scale the path vote with the frequency but
* for now leave it at max so that the performance is nominal.
@@ -151,8 +151,21 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, unsigned long freq)
break;
gmu->current_perf_index = perf_index;
+ gmu->freq = gmu->gpu_freqs[perf_index];
+
+ /*
+ * This can get called from devfreq while the hardware is idle. Don't
+ * bring up the power if it isn't already active
+ */
+ if (pm_runtime_get_if_in_use(gmu->dev) == 0)
+ return;
- __a6xx_gmu_set_freq(gmu, perf_index);
+ if (gmu->legacy)
+ __a6xx_gmu_set_freq(gmu, perf_index);
+ else
+ a6xx_hfi_set_freq(gmu, perf_index);
+
+ pm_runtime_put(gmu->dev);
}
unsigned long a6xx_gmu_get_freq(struct msm_gpu *gpu)
@@ -196,6 +209,12 @@ static int a6xx_gmu_start(struct a6xx_gmu *gmu)
u32 val;
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+
+ /* Set the log wptr index
+ * note: downstream saves the value in poweroff and restores it here
+ */
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP, 0);
+
gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
@@ -232,8 +251,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
switch (state) {
case GMU_OOB_GPU_SET:
- request = GMU_OOB_GPU_SET_REQUEST;
- ack = GMU_OOB_GPU_SET_ACK;
+ if (gmu->legacy) {
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ } else {
+ request = GMU_OOB_GPU_SET_REQUEST_NEW;
+ ack = GMU_OOB_GPU_SET_ACK_NEW;
+ }
name = "GPU_SET";
break;
case GMU_OOB_BOOT_SLUMBER:
@@ -272,6 +296,13 @@ int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
/* Clear a pending OOB state in the GMU */
void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
{
+ if (!gmu->legacy) {
+ WARN_ON(state != GMU_OOB_GPU_SET);
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR_NEW);
+ return;
+ }
+
switch (state) {
case GMU_OOB_GPU_SET:
gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
@@ -294,6 +325,9 @@ static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
int ret;
u32 val;
+ if (!gmu->legacy)
+ return 0;
+
gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
@@ -313,6 +347,9 @@ static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
u32 val;
int ret;
+ if (!gmu->legacy)
+ return;
+
/* Make sure retention is on */
gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
@@ -356,6 +393,11 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
a6xx_sptprac_disable(gmu);
+ if (!gmu->legacy) {
+ ret = a6xx_hfi_send_prep_slumber(gmu);
+ goto out;
+ }
+
/* Tell the GMU to get ready to slumber */
gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
@@ -371,6 +413,7 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
}
+out:
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -392,7 +435,7 @@ static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
return ret;
}
- ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
!val, 100, 10000);
if (ret) {
@@ -418,7 +461,7 @@ static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
- ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ ret = gmu_poll_timeout_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
val, val & (1 << 16), 100, 10000);
if (ret)
DRM_DEV_ERROR(gmu->dev, "Unable to power off the GPU RSC\n");
@@ -441,32 +484,48 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
void __iomem *seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
+ uint32_t pdc_address_offset;
if (!pdcptr || !seqptr)
goto err;
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a640(adreno_gpu))
+ pdc_address_offset = 0x30090;
+ else if (adreno_is_a650(adreno_gpu))
+ pdc_address_offset = 0x300a0;
+ else
+ pdc_address_offset = 0x30080;
+
/* Disable SDE clock gating */
- gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+ gmu_write_rscc(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
/* Setup RSC PDC handshake for sleep and wakeup */
- gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
- gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
- gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
- gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
/* Load RSC sequencer uCode for sleep and wakeup */
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
- gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ if (adreno_is_a650(adreno_gpu)) {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xeaaae5a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xe1a1ebab);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e0a581);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xecac82e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020edad);
+ } else {
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write_rscc(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+ }
/* Load PDC sequencer uCode for power up and power down sequence */
pdc_write(seqptr, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
@@ -487,10 +546,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
- if (adreno_is_a618(adreno_gpu))
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30090);
- else
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
@@ -502,17 +558,12 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
- if (adreno_is_a618(adreno_gpu))
+ if (adreno_is_a618(adreno_gpu) || adreno_is_a650(adreno_gpu))
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x2);
else
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
-
-
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
- if (adreno_is_a618(adreno_gpu))
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30090);
- else
- pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, pdc_address_offset);
pdc_write(pdcptr, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
/* Setup GPU PDC */
@@ -542,6 +593,8 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
{
/* Disable GMU WB/RB buffer */
gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_ICACHE_CONFIG, 0x1);
+ gmu_write(gmu, REG_A6XX_GMU_DCACHE_CONFIG, 0x1);
gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
@@ -571,14 +624,95 @@ static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
}
+struct block_header {
+ u32 addr;
+ u32 size;
+ u32 type;
+ u32 value;
+ u32 data[];
+};
+
+/* this should be a general kernel helper */
+static int in_range(u32 addr, u32 start, u32 size)
+{
+ return addr >= start && addr < start + size;
+}
+
+static bool fw_block_mem(struct a6xx_gmu_bo *bo, const struct block_header *blk)
+{
+ if (!in_range(blk->addr, bo->iova, bo->size))
+ return false;
+
+ memcpy(bo->virt + blk->addr - bo->iova, blk->data, blk->size);
+ return true;
+}
+
+static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ const struct firmware *fw_image = adreno_gpu->fw[ADRENO_FW_GMU];
+ const struct block_header *blk;
+ u32 reg_offset;
+
+ u32 itcm_base = 0x00000000;
+ u32 dtcm_base = 0x00040000;
+
+ if (adreno_is_a650(adreno_gpu))
+ dtcm_base = 0x10004000;
+
+ if (gmu->legacy) {
+ /* Sanity check the size of the firmware that was loaded */
+ if (fw_image->size > 0x8000) {
+ DRM_DEV_ERROR(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ gmu_write_bulk(gmu, REG_A6XX_GMU_CM3_ITCM_START,
+ (u32*) fw_image->data, fw_image->size);
+ return 0;
+ }
+
+
+ for (blk = (const struct block_header *) fw_image->data;
+ (const u8*) blk < fw_image->data + fw_image->size;
+ blk = (const struct block_header *) &blk->data[blk->size >> 2]) {
+ if (blk->size == 0)
+ continue;
+
+ if (in_range(blk->addr, itcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - itcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_ITCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (in_range(blk->addr, dtcm_base, SZ_16K)) {
+ reg_offset = (blk->addr - dtcm_base) >> 2;
+ gmu_write_bulk(gmu,
+ REG_A6XX_GMU_CM3_DTCM_START + reg_offset,
+ blk->data, blk->size);
+ } else if (!fw_block_mem(&gmu->icache, blk) &&
+ !fw_block_mem(&gmu->dcache, blk) &&
+ !fw_block_mem(&gmu->dummy, blk)) {
+ DRM_DEV_ERROR(gmu->dev,
+ "failed to match fw block (addr=%.8x size=%d data[0]=%.8x)\n",
+ blk->addr, blk->size, blk->data[0]);
+ }
+ }
+
+ return 0;
+}
+
static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
{
static bool rpmh_init;
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- int i, ret;
+ int ret;
u32 chipid;
- u32 *image;
+
+ if (adreno_is_a650(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF, 1);
if (state == GMU_WARM_BOOT) {
ret = a6xx_rpmh_start(gmu);
@@ -589,13 +723,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
"GMU firmware is not loaded\n"))
return -ENOENT;
- /* Sanity check the size of the firmware that was loaded */
- if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
- DRM_DEV_ERROR(gmu->dev,
- "GMU firmware is bigger than the available region\n");
- return -EINVAL;
- }
-
/* Turn on register retention */
gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
@@ -609,18 +736,16 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
return ret;
}
- image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
-
- for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
- gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
- image[i]);
+ ret = a6xx_gmu_fw_load(gmu);
+ if (ret)
+ return ret;
}
gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
/* Write the iova of the HFI table */
- gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
@@ -633,6 +758,9 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+ gmu_write(gmu, REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG,
+ gmu->log.iova | (gmu->log.size / SZ_4K - 1));
+
/* Set up the lowest idle level on the GMU */
a6xx_gmu_power_config(gmu);
@@ -640,9 +768,11 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (ret)
return ret;
- ret = a6xx_gmu_gfx_rail_on(gmu);
- if (ret)
- return ret;
+ if (gmu->legacy) {
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+ }
/* Enable SPTP_PC if the CPU is responsible for it */
if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
@@ -683,13 +813,13 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
u32 val;
/* Make sure there are no outstanding RPMh votes */
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
(val & 1), 100, 10000);
- gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ gmu_poll_timeout_rscc(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
}
@@ -744,6 +874,13 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
GMU_WARM_BOOT : GMU_COLD_BOOT;
+ /*
+ * Warm boot path does not work on newer GPUs
+ * Presumably this is because icache/dcache regions must be restored
+ */
+ if (!gmu->legacy)
+ status = GMU_COLD_BOOT;
+
ret = a6xx_gmu_fw_start(gmu, status);
if (ret)
goto out;
@@ -761,7 +898,10 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
enable_irq(gmu->hfi_irq);
/* Set the GPU to the current freq */
- __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
+ if (gmu->legacy)
+ __a6xx_gmu_set_freq(gmu, gmu->current_perf_index);
+ else
+ a6xx_hfi_set_freq(gmu, gmu->current_perf_index);
/*
* "enable" the GX power domain which won't actually do anything but it
@@ -919,34 +1059,75 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
return 0;
}
-static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
{
- if (IS_ERR_OR_NULL(bo))
- return;
-
- dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
- kfree(bo);
+ msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->icache.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->dcache.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->dummy.obj, gmu->aspace, false);
+ msm_gem_kernel_put(gmu->log.obj, gmu->aspace, false);
+
+ gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
+ msm_gem_address_space_put(gmu->aspace);
}
-static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
- size_t size)
+static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
+ size_t size, u64 iova)
{
- struct a6xx_gmu_bo *bo;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct drm_device *dev = a6xx_gpu->base.base.dev;
+ uint32_t flags = MSM_BO_WC;
+ u64 range_start, range_end;
+ int ret;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
+ size = PAGE_ALIGN(size);
+ if (!iova) {
+ /* no fixed address - use GMU's uncached range */
+ range_start = 0x60000000 + PAGE_SIZE; /* skip dummy page */
+ range_end = 0x80000000;
+ } else {
+ /* range for fixed address */
+ range_start = iova;
+ range_end = iova + size;
+ /* use IOMMU_PRIV for icache/dcache */
+ flags |= MSM_BO_MAP_PRIV;
+ }
- bo->size = PAGE_ALIGN(size);
+ bo->obj = msm_gem_new(dev, size, flags);
+ if (IS_ERR(bo->obj))
+ return PTR_ERR(bo->obj);
- bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
+ ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
+ range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
+ if (ret) {
+ drm_gem_object_put(bo->obj);
+ return ret;
+ }
+
+ bo->virt = msm_gem_get_vaddr(bo->obj);
+ bo->size = size;
+
+ return 0;
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
- if (!bo->virt) {
- kfree(bo);
- return ERR_PTR(-ENOMEM);
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return -ENODEV;
+
+ mmu = msm_iommu_new(gmu->dev, domain);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ if (IS_ERR(gmu->aspace)) {
+ iommu_domain_free(domain);
+ return PTR_ERR(gmu->aspace);
}
- return bo;
+ return 0;
}
/* Return the 'arc-level' for the given frequency */
@@ -1011,8 +1192,8 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
if (j == pri_count) {
DRM_DEV_ERROR(dev,
- "Level %u not found in in the RPMh list\n",
- level);
+ "Level %u not found in the RPMh list\n",
+ level);
DRM_DEV_ERROR(dev, "Available levels:\n");
for (j = 0; j < pri_count; j++)
DRM_DEV_ERROR(dev, " %u\n", pri[j]);
@@ -1190,6 +1371,7 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = to_platform_device(gmu->dev);
if (!gmu->initialized)
return;
@@ -1202,9 +1384,12 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
}
iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
gmu->mmio = NULL;
+ gmu->rscc = NULL;
- a6xx_gmu_memory_free(gmu, gmu->hfi);
+ a6xx_gmu_memory_free(gmu);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
@@ -1217,6 +1402,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
struct platform_device *pdev = of_find_device_by_node(node);
int ret;
@@ -1226,15 +1412,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->dev = &pdev->dev;
- /* Pass force_dma false to require the DT to set the dma region */
- ret = of_dma_configure(gmu->dev, node, false);
- if (ret)
- return ret;
-
- /* Set the mask after the of_dma_configure() */
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
- if (ret)
- return ret;
+ of_dma_configure(gmu->dev, node, true);
/* Fow now, don't do anything fancy until we get our feet under us */
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1246,20 +1424,64 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (ret)
goto err_put_device;
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ goto err_put_device;
+
+ /* Allocate memory for the GMU dummy page */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, SZ_4K, 0x60000000);
+ if (ret)
+ goto err_memory;
+
+ if (adreno_is_a650(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_16M - SZ_16K, 0x04000);
+ if (ret)
+ goto err_memory;
+ } else if (adreno_is_a640(adreno_gpu)) {
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
+ SZ_256K - SZ_16K, 0x04000);
+ if (ret)
+ goto err_memory;
+
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
+ SZ_256K - SZ_16K, 0x44000);
+ if (ret)
+ goto err_memory;
+ } else {
+ /* HFI v1, has sptprac */
+ gmu->legacy = true;
+
+ /* Allocate memory for the GMU debug region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ if (ret)
+ goto err_memory;
+ }
+
/* Allocate memory for for the HFI queues */
- gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
- if (IS_ERR(gmu->hfi))
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ if (ret)
goto err_memory;
- /* Allocate memory for the GMU debug region */
- gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
- if (IS_ERR(gmu->debug))
+ /* Allocate memory for the GMU log region */
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ if (ret)
goto err_memory;
/* Map the GMU registers */
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
- if (IS_ERR(gmu->mmio))
+ if (IS_ERR(gmu->mmio)) {
+ ret = PTR_ERR(gmu->mmio);
goto err_memory;
+ }
+
+ if (adreno_is_a650(adreno_gpu)) {
+ gmu->rscc = a6xx_gmu_get_mmio(pdev, "rscc");
+ if (IS_ERR(gmu->rscc))
+ goto err_mmio;
+ } else {
+ gmu->rscc = gmu->mmio + 0x23000;
+ }
/* Get the HFI and GMU interrupts */
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
@@ -1286,13 +1508,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
err_mmio:
iounmap(gmu->mmio);
+ if (platform_get_resource_byname(pdev, IORESOURCE_MEM, "rscc"))
+ iounmap(gmu->rscc);
free_irq(gmu->gmu_irq, gmu);
free_irq(gmu->hfi_irq, gmu);
-err_memory:
- a6xx_gmu_memory_free(gmu, gmu->hfi);
ret = -ENODEV;
+err_memory:
+ a6xx_gmu_memory_free(gmu);
err_put_device:
/* Drop reference taken in of_find_device_by_node */
put_device(gmu->dev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index 4af65a36d5ca..47df4745db50 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -10,9 +10,10 @@
#include "a6xx_hfi.h"
struct a6xx_gmu_bo {
+ struct drm_gem_object *obj;
void *virt;
size_t size;
- dma_addr_t iova;
+ u64 iova;
};
/*
@@ -43,7 +44,10 @@ struct a6xx_gmu_bo {
struct a6xx_gmu {
struct device *dev;
+ struct msm_gem_address_space *aspace;
+
void * __iomem mmio;
+ void * __iomem rscc;
int hfi_irq;
int gmu_irq;
@@ -52,8 +56,12 @@ struct a6xx_gmu {
int idle_level;
- struct a6xx_gmu_bo *hfi;
- struct a6xx_gmu_bo *debug;
+ struct a6xx_gmu_bo hfi;
+ struct a6xx_gmu_bo debug;
+ struct a6xx_gmu_bo icache;
+ struct a6xx_gmu_bo dcache;
+ struct a6xx_gmu_bo dummy;
+ struct a6xx_gmu_bo log;
int nr_clocks;
struct clk_bulk_data *clocks;
@@ -76,6 +84,7 @@ struct a6xx_gmu {
bool initialized;
bool hung;
+ bool legacy; /* a618 or a630 */
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
@@ -88,6 +97,13 @@ static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
return msm_writel(value, gmu->mmio + (offset << 2));
}
+static inline void
+gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size)
+{
+ memcpy_toio(gmu->mmio + (offset << 2), data, size);
+ wmb();
+}
+
static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
{
u32 val = gmu_read(gmu, reg);
@@ -111,6 +127,15 @@ static inline u64 gmu_read64(struct a6xx_gmu *gmu, u32 lo, u32 hi)
readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
interval, timeout)
+static inline void gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->rscc + (offset << 2));
+}
+
+#define gmu_poll_timeout_rscc(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->rscc + ((addr) << 2), val, cond, \
+ interval, timeout)
+
/*
* These are the available OOB (out of band requests) to the GMU where "out of
* band" means that the CPU talks to the GMU directly and not through HFI.
@@ -156,10 +181,16 @@ enum a6xx_gmu_oob_state {
#define GMU_OOB_GPU_SET_ACK 24
#define GMU_OOB_GPU_SET_CLEAR 24
+#define GMU_OOB_GPU_SET_REQUEST_NEW 30
+#define GMU_OOB_GPU_SET_ACK_NEW 31
+#define GMU_OOB_GPU_SET_CLEAR_NEW 31
+
void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu);
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index);
bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu);
bool a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
index 1cc1c135236b..176ae94d9fe6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -101,6 +101,10 @@ static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+#define REG_A6XX_GMU_ICACHE_CONFIG 0x00004c00
+
+#define REG_A6XX_GMU_DCACHE_CONFIG 0x00004c01
+
#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
@@ -199,6 +203,12 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+#define REG_A6XX_GPU_GMU_CX_GMU_CX_FAL_INTF 0x000050f0
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x00005100
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x00005101
+
#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
@@ -330,8 +340,6 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
-#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
-
#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
@@ -344,39 +352,41 @@ static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
-#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00000004
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00000008
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00000009
-#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x0000000a
-#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x0000000b
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x0000000d
-#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x0000000e
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00000082
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00000083
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00000089
-#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x0000008c
-#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00000100
-#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00000101
-#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00000180
-#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00000346
-#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000003ee
-#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00000496
-#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000053e
#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 68af24150de5..68314dcfce18 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -414,7 +414,17 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
a6xx_set_hwcg(gpu, true);
/* VBIF/GBIF start*/
- gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE0, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE1, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE2, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_GBIF_QSB_SIDE3, 0x00071620);
+ gpu_write(gpu, REG_A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3);
+ } else {
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+ }
+
if (adreno_is_a630(adreno_gpu))
gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
@@ -429,25 +439,35 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
- /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
- REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+ if (!adreno_is_a650(adreno_gpu)) {
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
- gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
- REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
- 0x00100000 + adreno_gpu->gmem - 1);
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+ }
gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
- gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ if (adreno_is_a640(adreno_gpu) || adreno_is_a650(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
+ else
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
/* Setting the mem pool size */
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
/* Setting the primFifo thresholds default values */
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+ if (adreno_is_a650(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300000);
+ else if (adreno_is_a640(adreno_gpu))
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200000);
+ else
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
@@ -471,6 +491,19 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+ /* Set weights for bicubic filtering */
+ if (adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_0, 0);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_1,
+ 0x3fe05ff4);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_2,
+ 0x3fa0ebee);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_3,
+ 0x3f5193ed);
+ gpu_write(gpu, REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE_4,
+ 0x3f0243f0);
+ }
+
/* Protect registers from the CP */
gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
@@ -508,6 +541,11 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
A6XX_PROTECT_RDONLY(0x980, 0x4));
gpu_write(gpu, REG_A6XX_CP_PROTECT(25), A6XX_PROTECT_RW(0xa630, 0x0));
+ if (adreno_is_a650(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_CP_APRIV_CNTL,
+ (1 << 6) | (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1));
+ }
+
/* Enable interrupts */
gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
@@ -566,8 +604,10 @@ out:
*/
a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
- /* Take the GMU out of its special boot mode */
- a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ if (a6xx_gpu->gmu.legacy) {
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+ }
return ret;
}
@@ -795,7 +835,7 @@ static void a6xx_destroy(struct msm_gpu *gpu)
if (a6xx_gpu->sqe_bo) {
msm_gem_unpin_iova(a6xx_gpu->sqe_bo, gpu->aspace);
- drm_gem_object_put_unlocked(a6xx_gpu->sqe_bo);
+ drm_gem_object_put(a6xx_gpu->sqe_bo);
}
a6xx_gmu_remove(a6xx_gpu);
@@ -810,6 +850,11 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
u64 busy_cycles, busy_time;
+
+ /* Only read the gpu busy if the hardware is already active */
+ if (pm_runtime_get_if_in_use(a6xx_gpu->gmu.dev) == 0)
+ return 0;
+
busy_cycles = gmu_read64(&a6xx_gpu->gmu,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H);
@@ -819,6 +864,8 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
gpu->devfreq.busy_cycles = busy_cycles;
+ pm_runtime_put(a6xx_gpu->gmu.dev);
+
if (WARN_ON(busy_time > ~0LU))
return ~0LU;
@@ -846,6 +893,7 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
+ .create_address_space = adreno_iommu_create_address_space,
#endif
},
.get_timestamp = a6xx_get_timestamp,
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index e450e0b97211..9921e632f1ca 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -17,10 +17,14 @@ static const char * const a6xx_hfi_msg_id[] = {
HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
HFI_MSG_ID(HFI_H2F_MSG_TEST),
+ HFI_MSG_ID(HFI_H2F_MSG_START),
+ HFI_MSG_ID(HFI_H2F_MSG_CORE_FW_START),
+ HFI_MSG_ID(HFI_H2F_MSG_GX_BW_PERF_VOTE),
+ HFI_MSG_ID(HFI_H2F_MSG_PREPARE_SLUMBER),
};
-static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
- u32 dwords)
+static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
{
struct a6xx_hfi_queue_header *header = queue->header;
u32 i, hdr, index = header->read_index;
@@ -48,6 +52,9 @@ static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
index = (index + 1) % header->size;
}
+ if (!gmu->legacy)
+ index = ALIGN(index, 4) % header->size;
+
header->read_index = index;
return HFI_HEADER_SIZE(hdr);
}
@@ -73,6 +80,12 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
index = (index + 1) % header->size;
}
+ /* Cookify any non used data at the end of the write buffer */
+ if (!gmu->legacy) {
+ for (; index % 4; index = (index + 1) % header->size)
+ queue->data[index] = 0xfafafafa;
+ }
+
header->write_index = index;
spin_unlock(&queue->lock);
@@ -106,7 +119,7 @@ static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
struct a6xx_hfi_msg_response resp;
/* Get the next packet */
- ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ ret = a6xx_hfi_queue_read(gmu, queue, (u32 *) &resp,
sizeof(resp) >> 2);
/* If the queue is empty our response never made it */
@@ -176,8 +189,8 @@ static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
{
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
- msg.dbg_buffer_addr = (u32) gmu->debug->iova;
- msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.dbg_buffer_addr = (u32) gmu->debug.iova;
+ msg.dbg_buffer_size = (u32) gmu->debug.size;
msg.boot_state = boot_state;
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
@@ -195,6 +208,28 @@ static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
version, sizeof(*version));
}
+static int a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table_v1 msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
{
struct a6xx_hfi_msg_perf_table msg = { 0 };
@@ -205,6 +240,7 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
for (i = 0; i < gmu->nr_gpu_freqs; i++) {
msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].acd = 0xffffffff;
msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
}
@@ -306,7 +342,45 @@ static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
NULL, 0);
}
-int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+static int a6xx_hfi_send_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_START, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_core_fw_start msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_CORE_FW_START, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ struct a6xx_hfi_gx_bw_perf_vote_cmd msg = { 0 };
+
+ msg.ack_type = 1; /* blocking */
+ msg.freq = index;
+ msg.bw = 0; /* TODO: bus scaling */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_GX_BW_PERF_VOTE, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+int a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_prep_slumber_cmd msg = { 0 };
+
+ /* TODO: should freq and bw fields be non-zero ? */
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PREPARE_SLUMBER, &msg,
+ sizeof(msg), NULL, 0);
+}
+
+static int a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state)
{
int ret;
@@ -324,7 +398,7 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
* the GMU firmware
*/
- ret = a6xx_hfi_send_perf_table(gmu);
+ ret = a6xx_hfi_send_perf_table_v1(gmu);
if (ret)
return ret;
@@ -341,6 +415,37 @@ int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
return 0;
}
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ if (gmu->legacy)
+ return a6xx_hfi_start_v1(gmu, boot_state);
+
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_core_fw_start(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Downstream driver sends this in its "a6xx_hw_init" equivalent,
+ * but seems to be no harm in sending it here
+ */
+ ret = a6xx_hfi_send_start(gmu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
void a6xx_hfi_stop(struct a6xx_gmu *gmu)
{
int i;
@@ -385,7 +490,7 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
void a6xx_hfi_init(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_gmu_bo *hfi = &gmu->hfi;
struct a6xx_hfi_queue_table_header *table = hfi->virt;
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
u64 offset;
@@ -415,5 +520,5 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu)
/* GMU response queue */
offset += SZ_4K;
a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
- hfi->iova + offset, 4);
+ hfi->iova + offset, gmu->legacy ? 4 : 1);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 60d1319fa44f..2bd670ca42d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -51,7 +51,8 @@ struct a6xx_hfi_queue {
/* HFI message types */
#define HFI_MSG_CMD 0
-#define HFI_MSG_ACK 2
+#define HFI_MSG_ACK 1
+#define HFI_MSG_ACK_V1 2
#define HFI_F2H_MSG_ACK 126
@@ -94,7 +95,13 @@ struct perf_level {
u32 freq;
};
-struct a6xx_hfi_msg_perf_table {
+struct perf_gx_level {
+ u32 vote;
+ u32 acd;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table_v1 {
u32 header;
u32 num_gpu_levels;
u32 num_gmu_levels;
@@ -103,6 +110,15 @@ struct a6xx_hfi_msg_perf_table {
struct perf_level cx_votes[4];
};
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_gx_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
#define HFI_H2F_MSG_BW_TABLE 3
struct a6xx_hfi_msg_bw_table {
@@ -124,4 +140,34 @@ struct a6xx_hfi_msg_test {
u32 header;
};
+#define HFI_H2F_MSG_START 10
+
+struct a6xx_hfi_msg_start {
+ u32 header;
+};
+
+#define HFI_H2F_MSG_CORE_FW_START 14
+
+struct a6xx_hfi_msg_core_fw_start {
+ u32 header;
+ u32 handle;
+};
+
+#define HFI_H2F_MSG_GX_BW_PERF_VOTE 30
+
+struct a6xx_hfi_gx_bw_perf_vote_cmd {
+ u32 header;
+ u32 ack_type;
+ u32 freq;
+ u32 bw;
+};
+
+#define HFI_H2F_MSG_PREPARE_SLUMBER 33
+
+struct a6xx_hfi_prep_slumber_cmd {
+ u32 header;
+ u32 bw;
+ u32 freq;
+};
+
#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index cb3a6e597d76..7732f03d9e3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -93,6 +93,17 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
+ .rev = ADRENO_REV(4, 0, 5, ANY_ID),
+ .revn = 405,
+ .name = "A405",
+ .fw = {
+ [ADRENO_FW_PM4] = "a420_pm4.fw",
+ [ADRENO_FW_PFP] = "a420_pfp.fw",
+ },
+ .gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a4xx_gpu_init,
+ }, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
.revn = 420,
.name = "A420",
@@ -189,6 +200,30 @@ static const struct adreno_info gpulist[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
.zapfw = "a630_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 4, 0, ANY_ID),
+ .revn = 640,
+ .name = "A640",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a640_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a640_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 5, 0, ANY_ID),
+ .revn = 650,
+ .name = "A650",
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a650_gmu.bin",
+ },
+ .gmem = SZ_1M + SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a650_zap.mdt",
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 1d5c43c22269..89673c7ed473 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -185,6 +185,23 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
}
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev)
+{
+ struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+ struct msm_gem_address_space *aspace;
+
+ aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
+ 0xfffffff);
+
+ if (IS_ERR(aspace) && !IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
+
+ return aspace;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -197,7 +214,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->gmem;
return 0;
case MSM_PARAM_GMEM_BASE:
- *value = 0x100000;
+ *value = !adreno_is_a650(adreno_gpu) ? 0x100000 : 0;
return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
@@ -459,7 +476,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
break;
/* fall-thru */
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
@@ -988,12 +1005,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
- adreno_gpu_config.va_start = SZ_16M;
- adreno_gpu_config.va_end = 0xffffffff;
- /* maximum range of a2xx mmu */
- if (adreno_is_a2xx(adreno_gpu))
- adreno_gpu_config.va_end = SZ_16M + 0xfff * SZ_64K;
-
adreno_gpu_config.nr_rings = nr_rings;
adreno_get_pwrlevels(&pdev->dev, gpu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 9ff4e550e7bd..2f5d2c3acc3a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -202,6 +202,11 @@ static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
return (gpu->revn >= 400) && (gpu->revn < 500);
}
+static inline int adreno_is_a405(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 405;
+}
+
static inline int adreno_is_a420(struct adreno_gpu *gpu)
{
return gpu->revn == 420;
@@ -237,6 +242,16 @@ static inline int adreno_is_a630(struct adreno_gpu *gpu)
return gpu->revn == 630;
}
+static inline int adreno_is_a640(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 640;
+}
+
+static inline int adreno_is_a650(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 650;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
const struct firmware *adreno_request_fw(struct adreno_gpu *adreno_gpu,
const char *fwname);
@@ -273,6 +288,14 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
/*
+ * Common helper function to initialize the default address space for arm-smmu
+ * attached targets
+ */
+struct msm_gem_address_space *
+adreno_iommu_create_address_space(struct msm_gpu *gpu,
+ struct platform_device *pdev);
+
+/*
* For a5xx and a6xx targets load the zap shader that is used to pull the GPU
* out of secure mode
*/
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
index 11f2bebe3869..7c230f719ad3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -36,22 +36,6 @@ static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
return to_dpu_kms(priv->kms);
}
-static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
-{
- struct drm_crtc *tmp_crtc;
-
- drm_for_each_crtc(tmp_crtc, crtc->dev) {
- if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
- tmp_crtc->enabled) {
- DPU_DEBUG("video interface connected crtc:%d\n",
- tmp_crtc->base.id);
- return true;
- }
- }
-
- return false;
-}
-
static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
struct drm_crtc *crtc,
struct drm_crtc_state *state,
@@ -94,7 +78,6 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
u32 bw, threshold;
u64 bw_sum_of_intfs = 0;
enum dpu_crtc_client_type curr_client_type;
- bool is_video_mode;
struct dpu_crtc_state *dpu_cstate;
struct drm_crtc *tmp_crtc;
struct dpu_kms *kms;
@@ -144,11 +127,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
DPU_DEBUG("calculated bandwidth=%uk\n", bw);
- is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
- threshold = (is_video_mode ||
- _dpu_core_video_mode_intf_connected(crtc)) ?
- kms->catalog->perf.max_bw_low :
- kms->catalog->perf.max_bw_high;
+ threshold = kms->catalog->perf.max_bw_high;
DPU_DEBUG("final threshold bw limit = %d\n", threshold);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 17448505a9b5..e15b42a780e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -9,6 +9,7 @@
#include <linux/sort.h>
#include <linux/debugfs.h>
#include <linux/ktime.h>
+#include <linux/bits.h>
#include <drm/drm_crtc.h>
#include <drm/drm_flip_work.h>
@@ -20,6 +21,7 @@
#include "dpu_kms.h"
#include "dpu_hw_lm.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
#include "dpu_crtc.h"
#include "dpu_plane.h"
#include "dpu_encoder.h"
@@ -40,6 +42,9 @@
/* timeout in ms waiting for frame done */
#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+#define CONVERT_S3_15(val) \
+ (((((u64)val) & ~BIT_ULL(63)) >> 17) & GENMASK_ULL(17, 0))
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -88,11 +93,9 @@ static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *crtc_state;
int lm_idx, lm_horiz_position;
- dpu_crtc = to_dpu_crtc(crtc);
crtc_state = to_dpu_crtc_state(crtc->state);
lm_horiz_position = 0;
@@ -422,6 +425,74 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(adj_mode);
}
+static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+ struct drm_color_ctm *ctm;
+
+ memset(cfg, 0, sizeof(struct dpu_hw_pcc_cfg));
+
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (!ctm)
+ return;
+
+ cfg->r.r = CONVERT_S3_15(ctm->matrix[0]);
+ cfg->g.r = CONVERT_S3_15(ctm->matrix[1]);
+ cfg->b.r = CONVERT_S3_15(ctm->matrix[2]);
+
+ cfg->r.g = CONVERT_S3_15(ctm->matrix[3]);
+ cfg->g.g = CONVERT_S3_15(ctm->matrix[4]);
+ cfg->b.g = CONVERT_S3_15(ctm->matrix[5]);
+
+ cfg->r.b = CONVERT_S3_15(ctm->matrix[6]);
+ cfg->g.b = CONVERT_S3_15(ctm->matrix[7]);
+ cfg->b.b = CONVERT_S3_15(ctm->matrix[8]);
+}
+
+static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
+{
+ struct drm_crtc_state *state = crtc->state;
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
+ struct dpu_crtc_mixer *mixer = cstate->mixers;
+ struct dpu_hw_pcc_cfg cfg;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_dspp *dspp;
+ int i;
+
+
+ if (!state->color_mgmt_changed)
+ return;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+ ctl = mixer[i].lm_ctl;
+ lm = mixer[i].hw_lm;
+ dspp = mixer[i].hw_dspp;
+
+ if (!dspp || !dspp->ops.setup_pcc)
+ continue;
+
+ if (!state->ctm) {
+ dspp->ops.setup_pcc(dspp, NULL);
+ } else {
+ _dpu_crtc_get_pcc_coeff(state, &cfg);
+ dspp->ops.setup_pcc(dspp, &cfg);
+ }
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_dspp(ctl,
+ mixer[i].hw_dspp->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - DSPP_0,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+ }
+}
+
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -430,7 +501,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_encoder *encoder;
struct drm_device *dev;
unsigned long flags;
- struct dpu_crtc_smmu_state_data *smmu_state;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
@@ -448,7 +518,6 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
dev = crtc->dev;
- smmu_state = &dpu_crtc->smmu_state;
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
@@ -475,6 +544,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
_dpu_crtc_blend_setup(crtc);
+ _dpu_crtc_setup_cp_blocks(crtc);
+
/*
* PP_DONE irq is only used by command mode for now.
* It is better to request pending before FLUSH and START trigger
@@ -491,7 +562,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_device *dev;
struct drm_plane *plane;
struct msm_drm_private *priv;
- struct msm_drm_thread *event_thread;
unsigned long flags;
struct dpu_crtc_state *cstate;
@@ -513,8 +583,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
return;
}
- event_thread = &priv->event_thread[crtc->index];
-
if (dpu_crtc->event) {
DPU_DEBUG("already received dpu_crtc->event\n");
} else {
@@ -567,7 +635,6 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
if (!crtc || !state) {
@@ -575,7 +642,6 @@ static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
return;
}
- dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(state);
DPU_DEBUG("crtc%d\n", crtc->base.id);
@@ -662,11 +728,9 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
/**
* dpu_crtc_duplicate_state - state duplicate hook
* @crtc: Pointer to drm crtc structure
- * @Returns: Pointer to new drm_crtc_state structure
*/
static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
{
- struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate, *old_cstate;
if (!crtc || !crtc->state) {
@@ -674,7 +738,6 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
return NULL;
}
- dpu_crtc = to_dpu_crtc(crtc);
old_cstate = to_dpu_crtc_state(crtc->state);
cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
if (!cstate) {
@@ -693,9 +756,7 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct dpu_crtc_state *cstate;
- struct drm_display_mode *mode;
struct drm_encoder *encoder;
- struct msm_drm_private *priv;
unsigned long flags;
bool release_bandwidth = false;
@@ -705,8 +766,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
}
dpu_crtc = to_dpu_crtc(crtc);
cstate = to_dpu_crtc_state(crtc->state);
- mode = &cstate->base.adjusted_mode;
- priv = crtc->dev->dev_private;
DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
@@ -768,14 +827,12 @@ static void dpu_crtc_enable(struct drm_crtc *crtc,
{
struct dpu_crtc *dpu_crtc;
struct drm_encoder *encoder;
- struct msm_drm_private *priv;
bool request_bandwidth;
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
- priv = crtc->dev->dev_private;
pm_runtime_get_sync(crtc->dev->dev);
@@ -1319,6 +1376,8 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane,
drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ drm_crtc_enable_color_mgmt(crtc, 0, true, 0);
+
/* save user friendly CRTC name for later */
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index 5174e86124cc..cec3474340e8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -73,12 +73,14 @@ struct dpu_crtc_smmu_state_data {
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
+ * @lm_dspp: DSPP HW driver context
* @mixer_op_mode: mixer blending operation mode
* @flush_mask: mixer flush mask for ctl, mixer and pipe
*/
struct dpu_crtc_mixer {
struct dpu_hw_mixer *hw_lm;
struct dpu_hw_ctl *lm_ctl;
+ struct dpu_hw_dspp *hw_dspp;
u32 mixer_op_mode;
u32 flush_mask;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index a1b79ee2bd9d..797e8fd4c16f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -20,6 +20,7 @@
#include "dpu_hw_catalog.h"
#include "dpu_hw_intf.h"
#include "dpu_hw_ctl.h"
+#include "dpu_hw_dspp.h"
#include "dpu_formats.h"
#include "dpu_encoder_phys.h"
#include "dpu_crtc.h"
@@ -498,23 +499,6 @@ void dpu_encoder_helper_split_config(
}
}
-static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
- struct drm_display_mode *adj_mode)
-{
- struct drm_display_mode *cur_mode;
-
- if (!connector || !adj_mode)
- return;
-
- list_for_each_entry(cur_mode, &connector->modes, head) {
- if (cur_mode->vdisplay == adj_mode->vdisplay &&
- cur_mode->hdisplay == adj_mode->hdisplay &&
- drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
- adj_mode->private_flags |= cur_mode->private_flags;
- }
- }
-}
-
static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_encoder_virt *dpu_enc,
struct dpu_kms *dpu_kms,
@@ -536,6 +520,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
+ * Adding color blocks only to primary interface
*/
if (intf_count == 2)
topology.num_lm = 2;
@@ -544,6 +529,9 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
+ topology.num_dspp = topology.num_lm;
+
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -580,15 +568,6 @@ static int dpu_encoder_virt_atomic_check(
global_state = dpu_kms_get_existing_global_state(dpu_kms);
trace_dpu_enc_atomic_check(DRMID(drm_enc));
- /*
- * display drivers may populate private fields of the drm display mode
- * structure while registering possible modes of a connector with DRM.
- * These private fields are not populated back while DRM invokes
- * the mode_set callbacks. This module retrieves and populates the
- * private fields of the given mode.
- */
- _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
-
/* perform atomic check on the first physical encoder (master) */
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -621,8 +600,7 @@ static int dpu_encoder_virt_atomic_check(
}
}
- trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
- adj_mode->private_flags);
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
return ret;
}
@@ -959,7 +937,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
- int num_lm, num_ctl, num_pp;
+ struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm, num_ctl, num_pp, num_dspp;
int i, j;
if (!drm_enc) {
@@ -1008,6 +987,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
+ num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
+ drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
+ ARRAY_SIZE(hw_dspp));
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
@@ -1020,6 +1002,7 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
+ cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
}
cstate->num_mixers = num_lm;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index c567917541e8..29d4fde3172b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -41,6 +41,8 @@
#define PINGPONG_SDM845_SPLIT_MASK \
(PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+#define DSPP_SC7180_MASK BIT(DPU_DSPP_PCC)
+
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
#define DEFAULT_DPU_LINE_WIDTH 2048
#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
@@ -291,29 +293,30 @@ static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
},
};
-#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair) \
+#define LM_BLK(_name, _id, _base, _fmask, _sblk, _pp, _lmpair, _dspp) \
{ \
.name = _name, .id = _id, \
.base = _base, .len = 0x320, \
.features = _fmask, \
.sblk = _sblk, \
.pingpong = _pp, \
- .lm_pair_mask = (1 << _lmpair) \
+ .lm_pair_mask = (1 << _lmpair), \
+ .dspp = _dspp \
}
static const struct dpu_lm_cfg sdm845_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_0, LM_1),
+ &sdm845_lm_sblk, PINGPONG_0, LM_1, 0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_1, LM_0),
+ &sdm845_lm_sblk, PINGPONG_1, LM_0, 0),
LM_BLK("lm_2", LM_2, 0x46000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_2, LM_5),
+ &sdm845_lm_sblk, PINGPONG_2, LM_5, 0),
LM_BLK("lm_3", LM_3, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_4", LM_4, 0x0, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_MAX, 0),
+ &sdm845_lm_sblk, PINGPONG_MAX, 0, 0),
LM_BLK("lm_5", LM_5, 0x49000, MIXER_SDM845_MASK,
- &sdm845_lm_sblk, PINGPONG_3, LM_2),
+ &sdm845_lm_sblk, PINGPONG_3, LM_2, 0),
};
/* SC7180 */
@@ -328,11 +331,30 @@ static const struct dpu_lm_sub_blks sc7180_lm_sblk = {
static const struct dpu_lm_cfg sc7180_lm[] = {
LM_BLK("lm_0", LM_0, 0x44000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_0, LM_1),
+ &sc7180_lm_sblk, PINGPONG_0, LM_1, DSPP_0),
LM_BLK("lm_1", LM_1, 0x45000, MIXER_SC7180_MASK,
- &sc7180_lm_sblk, PINGPONG_1, LM_0),
+ &sc7180_lm_sblk, PINGPONG_1, LM_0, 0),
+};
+
+/*************************************************************
+ * DSPP sub blocks config
+ *************************************************************/
+static const struct dpu_dspp_sub_blks sc7180_dspp_sblk = {
+ .pcc = {.id = DPU_DSPP_PCC, .base = 0x1700,
+ .len = 0x90, .version = 0x10000},
};
+#define DSPP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1800, \
+ .features = DSPP_SC7180_MASK, \
+ .sblk = &sc7180_dspp_sblk \
+ }
+
+static const struct dpu_dspp_cfg sc7180_dspp[] = {
+ DSPP_BLK("dspp_0", DSPP_0, 0x54000),
+};
/*************************************************************
* PINGPONG sub blocks config
*************************************************************/
@@ -515,8 +537,8 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
};
static const struct dpu_perf_cfg sc7180_perf_data = {
- .max_bw_low = 3900000,
- .max_bw_high = 5500000,
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
@@ -587,6 +609,8 @@ static void sc7180_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
.sspp = sc7180_sspp,
.mixer_count = ARRAY_SIZE(sc7180_lm),
.mixer = sc7180_lm,
+ .dspp_count = ARRAY_SIZE(sc7180_dspp),
+ .dspp = sc7180_dspp,
.pingpong_count = ARRAY_SIZE(sc7180_pp),
.pingpong = sc7180_pp,
.intf_count = ARRAY_SIZE(sc7180_intf),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index 09df7d87dd43..f7de43838c69 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -146,6 +146,17 @@ enum {
};
/**
+ * DSPP sub-blocks
+ * @DPU_DSPP_PCC Panel color correction block
+ * @DPU_DSPP_GC Gamma correction block
+ */
+enum {
+ DPU_DSPP_PCC = 0x1,
+ DPU_DSPP_GC,
+ DPU_DSPP_MAX
+};
+
+/**
* PINGPONG sub-blocks
* @DPU_PINGPONG_TE Tear check block
* @DPU_PINGPONG_TE2 Additional tear check block for split pipes
@@ -377,6 +388,16 @@ struct dpu_lm_sub_blks {
struct dpu_pp_blk gc;
};
+/**
+ * struct dpu_dspp_sub_blks: Information of DSPP block
+ * @gc : gamma correction block
+ * @pcc: pixel color correction block
+ */
+struct dpu_dspp_sub_blks {
+ struct dpu_pp_blk gc;
+ struct dpu_pp_blk pcc;
+};
+
struct dpu_pingpong_sub_blks {
struct dpu_pp_blk te;
struct dpu_pp_blk te2;
@@ -471,10 +492,24 @@ struct dpu_lm_cfg {
DPU_HW_BLK_INFO;
const struct dpu_lm_sub_blks *sblk;
u32 pingpong;
+ u32 dspp;
unsigned long lm_pair_mask;
};
/**
+ * struct dpu_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct dpu_dspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_dspp_sub_blks *sblk;
+};
+
+/**
* struct dpu_pingpong_cfg - information of PING-PONG blocks
* @id enum identifying this block
* @base register offset of this block
@@ -688,6 +723,9 @@ struct dpu_mdss_cfg {
u32 ad_count;
+ u32 dspp_count;
+ const struct dpu_dspp_cfg *dspp;
+
/* Add additional block data structures here */
struct dpu_perf_cfg perf;
@@ -716,6 +754,7 @@ struct dpu_mdss_hw_cfg_handler {
#define BLK_PINGPONG(s) ((s)->pingpong)
#define BLK_INTF(s) ((s)->intf)
#define BLK_AD(s) ((s)->ad)
+#define BLK_DSPP(s) ((s)->dspp)
/**
* dpu_hw_catalog_init - dpu hardware catalog init API retrieves
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
index 831e5f7a9b7f..613ae8f0cfcd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -272,6 +272,31 @@ static int dpu_hw_ctl_active_get_bitmask_intf(struct dpu_hw_ctl *ctx,
return 0;
}
+static uint32_t dpu_hw_ctl_get_bitmask_dspp(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp dspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (dspp) {
+ case DSPP_0:
+ flushbits = BIT(13);
+ break;
+ case DSPP_1:
+ flushbits = BIT(14);
+ break;
+ case DSPP_2:
+ flushbits = BIT(15);
+ break;
+ case DSPP_3:
+ flushbits = BIT(21);
+ break;
+ default:
+ return 0;
+ }
+
+ return flushbits;
+}
+
static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
{
struct dpu_hw_blk_reg_map *c = &ctx->hw;
@@ -548,6 +573,7 @@ static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_dspp = dpu_hw_ctl_get_bitmask_dspp;
};
static struct dpu_hw_blk_ops dpu_hw_ops;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
index 09e1263c72e2..ec579b470a80 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -139,6 +139,9 @@ struct dpu_hw_ctl_ops {
uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
enum dpu_lm blk);
+ uint32_t (*get_bitmask_dspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_dspp blk);
+
/**
* Query the value of the intf flush mask
* No effect on hardware
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
new file mode 100644
index 000000000000..a7a24539921f
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_dspp.h"
+#include "dpu_kms.h"
+
+
+/* DSPP_PCC */
+#define PCC_EN BIT(0)
+#define PCC_DIS 0
+#define PCC_RED_R_OFF 0x10
+#define PCC_RED_G_OFF 0x1C
+#define PCC_RED_B_OFF 0x28
+#define PCC_GREEN_R_OFF 0x14
+#define PCC_GREEN_G_OFF 0x20
+#define PCC_GREEN_B_OFF 0x2C
+#define PCC_BLUE_R_OFF 0x18
+#define PCC_BLUE_G_OFF 0x24
+#define PCC_BLUE_B_OFF 0x30
+
+static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
+ struct dpu_hw_pcc_cfg *cfg)
+{
+
+ u32 base = ctx->cap->sblk->pcc.base;
+
+ if (!ctx || !base) {
+ DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
+ return;
+ }
+
+ if (!cfg) {
+ DRM_DEBUG_DRIVER("disable pcc feature\n");
+ DPU_REG_WRITE(&ctx->hw, base, PCC_DIS);
+ return;
+ }
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_R_OFF, cfg->r.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_G_OFF, cfg->r.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_RED_B_OFF, cfg->r.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_R_OFF, cfg->g.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_G_OFF, cfg->g.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_GREEN_B_OFF, cfg->g.b);
+
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_R_OFF, cfg->b.r);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_G_OFF, cfg->b.g);
+ DPU_REG_WRITE(&ctx->hw, base + PCC_BLUE_B_OFF, cfg->b.b);
+
+ DPU_REG_WRITE(&ctx->hw, base, PCC_EN);
+}
+
+static void _setup_dspp_ops(struct dpu_hw_dspp *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_DSPP_PCC, &features) &&
+ IS_SC7180_TARGET(c->hw.hwversion))
+ c->ops.setup_pcc = dpu_setup_dspp_pcc;
+}
+
+static const struct dpu_dspp_cfg *_dspp_offset(enum dpu_dspp dspp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dspp[i].base;
+ b->length = m->dspp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_DSPP;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops;
+
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_dspp *c;
+ const struct dpu_dspp_cfg *cfg;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ dpu_hw_blk_init(&c->base, DPU_HW_BLK_DSPP, idx, &dpu_hw_ops);
+
+ return c;
+}
+
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp)
+{
+ if (dspp)
+ dpu_hw_blk_destroy(&dspp->base);
+
+ kfree(dspp);
+}
+
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
new file mode 100644
index 000000000000..7fa189cfcb06
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DPU_HW_DSPP_H
+#define _DPU_HW_DSPP_H
+
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_dspp;
+
+/**
+ * struct dpu_hw_pcc_coeff - PCC coefficient structure for each color
+ * component.
+ * @r: red coefficient.
+ * @g: green coefficient.
+ * @b: blue coefficient.
+ */
+
+struct dpu_hw_pcc_coeff {
+ __u32 r;
+ __u32 g;
+ __u32 b;
+};
+
+/**
+ * struct dpu_hw_pcc - pcc feature structure
+ * @r: red coefficients.
+ * @g: green coefficients.
+ * @b: blue coefficients.
+ */
+struct dpu_hw_pcc_cfg {
+ struct dpu_hw_pcc_coeff r;
+ struct dpu_hw_pcc_coeff g;
+ struct dpu_hw_pcc_coeff b;
+};
+
+/**
+ * struct dpu_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_dspp_ops {
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct dpu_hw_dspp *ctx, struct dpu_hw_pcc_cfg *cfg);
+
+};
+
+/**
+ * struct dpu_hw_dspp - dspp description
+ * @base: Hardware block base structure
+ * @hw: Block hardware details
+ * @idx: DSPP index
+ * @cap: Pointer to layer_cfg
+ * @ops: Pointer to operations possible for this DSPP
+ */
+struct dpu_hw_dspp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* dspp */
+ int idx;
+ const struct dpu_dspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_dspp_ops ops;
+};
+
+/**
+ * dpu_hw_dspp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_dspp *to_dpu_hw_dspp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_dspp, base);
+}
+
+/**
+ * dpu_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @Return: pointer to structure or ERR_PTR
+ */
+struct dpu_hw_dspp *dpu_hw_dspp_init(enum dpu_dspp idx,
+ void __iomem *addr, const struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void dpu_hw_dspp_destroy(struct dpu_hw_dspp *dspp);
+
+#endif /*_DPU_HW_DSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
index 686882132bf6..402dc5832361 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -95,6 +95,7 @@ enum dpu_hw_blk_type {
DPU_HW_BLK_PINGPONG,
DPU_HW_BLK_INTF,
DPU_HW_BLK_WB,
+ DPU_HW_BLK_DSPP,
DPU_HW_BLK_MAX,
};
@@ -425,5 +426,6 @@ struct dpu_mdss_color {
#define DPU_DBG_MASK_TOP (1 << 7)
#define DPU_DBG_MASK_VBIF (1 << 8)
#define DPU_DBG_MASK_ROT (1 << 9)
+#define DPU_DBG_MASK_DSPP (1 << 10)
#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index ce19f1d39367..b8615d4fe8a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -772,29 +772,21 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
{
struct iommu_domain *domain;
struct msm_gem_address_space *aspace;
- int ret;
+ struct msm_mmu *mmu;
domain = iommu_domain_alloc(&platform_bus_type);
if (!domain)
return 0;
- domain->geometry.aperture_start = 0x1000;
- domain->geometry.aperture_end = 0xffffffff;
+ mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ aspace = msm_gem_address_space_create(mmu, "dpu1",
+ 0x1000, 0xfffffff);
- aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
- domain, "dpu1");
if (IS_ERR(aspace)) {
- iommu_domain_free(domain);
+ mmu->funcs->destroy(mmu);
return PTR_ERR(aspace);
}
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- DPU_ERROR("failed to attach iommu %d\n", ret);
- msm_gem_address_space_put(aspace);
- return ret;
- }
-
dpu_kms->base.aspace = aspace;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 211f5de99a44..a3b122bfb676 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -158,6 +158,7 @@ struct dpu_global_state {
uint32_t mixer_to_enc_id[LM_MAX - LM_0];
uint32_t ctl_to_enc_id[CTL_MAX - CTL_0];
uint32_t intf_to_enc_id[INTF_MAX - INTF_0];
+ uint32_t dspp_to_enc_id[DSPP_MAX - DSPP_0];
};
struct dpu_global_state
@@ -170,7 +171,7 @@ struct dpu_global_state
*
* Main debugfs documentation is located at,
*
- * Documentation/filesystems/debugfs.txt
+ * Documentation/filesystems/debugfs.rst
*
* @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
* @dpu_debugfs_create_regset32: Create 32-bit register dump file
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 9b62451b01ee..9b2b5044e8e0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -9,6 +9,7 @@
#include "dpu_hw_ctl.h"
#include "dpu_hw_pingpong.h"
#include "dpu_hw_intf.h"
+#include "dpu_hw_dspp.h"
#include "dpu_encoder.h"
#include "dpu_trace.h"
@@ -174,6 +175,23 @@ int dpu_rm_init(struct dpu_rm *rm,
rm->ctl_blks[ctl->id - CTL_0] = &hw->base;
}
+ for (i = 0; i < cat->dspp_count; i++) {
+ struct dpu_hw_dspp *hw;
+ const struct dpu_dspp_cfg *dspp = &cat->dspp[i];
+
+ if (dspp->id < DSPP_0 || dspp->id >= DSPP_MAX) {
+ DPU_ERROR("skip dspp %d with invalid id\n", dspp->id);
+ continue;
+ }
+ hw = dpu_hw_dspp_init(dspp->id, mmio, cat);
+ if (IS_ERR_OR_NULL(hw)) {
+ rc = PTR_ERR(hw);
+ DPU_ERROR("failed dspp object creation: err %d\n", rc);
+ goto fail;
+ }
+ rm->dspp_blks[dspp->id - DSPP_0] = &hw->base;
+ }
+
return 0;
fail:
@@ -222,12 +240,17 @@ static bool _dpu_rm_check_lm_peer(struct dpu_rm *rm, int primary_idx,
* if lm, and all other hardwired blocks connected to the lm (pp) is
* available and appropriate
* @pp_idx: output parameter, index of pingpong block attached to the layer
- * mixer in rm->pongpong_blks[].
+ * mixer in rm->pingpong_blks[].
+ * @dspp_idx: output parameter, index of dspp block attached to the layer
+ * mixer in rm->dspp_blks[].
+ * @reqs: input parameter, rm requirements for HW blocks needed in the
+ * datapath.
* @Return: true if lm matches all requirements, false otherwise
*/
static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
struct dpu_global_state *global_state,
- uint32_t enc_id, int lm_idx, int *pp_idx)
+ uint32_t enc_id, int lm_idx, int *pp_idx, int *dspp_idx,
+ struct dpu_rm_requirements *reqs)
{
const struct dpu_lm_cfg *lm_cfg;
int idx;
@@ -251,6 +274,23 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return false;
}
*pp_idx = idx;
+
+ if (!reqs->topology.num_dspp)
+ return true;
+
+ idx = lm_cfg->dspp - DSPP_0;
+ if (idx < 0 || idx >= ARRAY_SIZE(rm->dspp_blks)) {
+ DPU_ERROR("failed to get dspp on lm %d\n", lm_cfg->dspp);
+ return false;
+ }
+
+ if (reserved_by_other(global_state->dspp_to_enc_id, idx, enc_id)) {
+ DPU_DEBUG("lm %d dspp %d already reserved\n", lm_cfg->id,
+ lm_cfg->dspp);
+ return false;
+ }
+ *dspp_idx = idx;
+
return true;
}
@@ -262,6 +302,7 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
{
int lm_idx[MAX_BLOCKS];
int pp_idx[MAX_BLOCKS];
+ int dspp_idx[MAX_BLOCKS] = {0};
int i, j, lm_count = 0;
if (!reqs->topology.num_lm) {
@@ -279,7 +320,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
lm_idx[lm_count] = i;
if (!_dpu_rm_check_lm_and_get_connected_blks(rm, global_state,
- enc_id, i, &pp_idx[lm_count])) {
+ enc_id, i, &pp_idx[lm_count],
+ &dspp_idx[lm_count], reqs)) {
continue;
}
@@ -299,7 +341,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
if (!_dpu_rm_check_lm_and_get_connected_blks(rm,
global_state, enc_id, j,
- &pp_idx[lm_count])) {
+ &pp_idx[lm_count], &dspp_idx[lm_count],
+ reqs)) {
continue;
}
@@ -316,6 +359,8 @@ static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
for (i = 0; i < lm_count; i++) {
global_state->mixer_to_enc_id[lm_idx[i]] = enc_id;
global_state->pingpong_to_enc_id[pp_idx[i]] = enc_id;
+ global_state->dspp_to_enc_id[dspp_idx[i]] =
+ reqs->topology.num_dspp ? enc_id : 0;
trace_dpu_rm_reserve_lms(lm_idx[i] + LM_0, enc_id,
pp_idx[i] + PINGPONG_0);
@@ -560,6 +605,11 @@ int dpu_rm_get_assigned_resources(struct dpu_rm *rm,
hw_to_enc_id = global_state->intf_to_enc_id;
max_blks = ARRAY_SIZE(rm->intf_blks);
break;
+ case DPU_HW_BLK_DSPP:
+ hw_blks = rm->dspp_blks;
+ hw_to_enc_id = global_state->dspp_to_enc_id;
+ max_blks = ARRAY_SIZE(rm->dspp_blks);
+ break;
default:
DPU_ERROR("blk type %d not managed by rm\n", type);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
index 6d2b04f306f0..08726bb1063a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -19,6 +19,7 @@ struct dpu_global_state;
* @mixer_blks: array of layer mixer hardware resources
* @ctl_blks: array of ctl hardware resources
* @intf_blks: array of intf hardware resources
+ * @dspp_blks: array of dspp hardware resources
* @lm_max_width: cached layer mixer maximum width
* @rm_lock: resource manager mutex
*/
@@ -27,6 +28,7 @@ struct dpu_rm {
struct dpu_hw_blk *mixer_blks[LM_MAX - LM_0];
struct dpu_hw_blk *ctl_blks[CTL_MAX - CTL_0];
struct dpu_hw_blk *intf_blks[INTF_MAX - INTF_0];
+ struct dpu_hw_blk *dspp_blks[DSPP_MAX - DSPP_0];
uint32_t lm_max_width;
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
index eecfe9b3199e..6714b088970f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -327,20 +327,18 @@ DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
);
TRACE_EVENT(dpu_enc_atomic_check_flags,
- TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
- TP_ARGS(drm_id, flags, private_flags),
+ TP_PROTO(uint32_t drm_id, unsigned int flags),
+ TP_ARGS(drm_id, flags),
TP_STRUCT__entry(
__field( uint32_t, drm_id )
__field( unsigned int, flags )
- __field( int, private_flags )
),
TP_fast_assign(
__entry->drm_id = drm_id;
__entry->flags = flags;
- __entry->private_flags = private_flags;
),
- TP_printk("id=%u, flags=%u, private_flags=%d",
- __entry->drm_id, __entry->flags, __entry->private_flags)
+ TP_printk("id=%u, flags=%u",
+ __entry->drm_id, __entry->flags)
);
DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index c9239b07fe4f..a0253297bc76 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -119,7 +119,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp4_kms->base.base;
msm_gem_unpin_iova(val, kms->aspace);
- drm_gem_object_put_unlocked(val);
+ drm_gem_object_put(val);
}
static void mdp4_crtc_destroy(struct drm_crtc *crtc)
@@ -452,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail:
- drm_gem_object_put_unlocked(cursor_bo);
+ drm_gem_object_put(cursor_bo);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index dda05436f716..19291d77df40 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -165,7 +165,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (mdp4_kms->blank_cursor_iova)
msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->aspace);
- drm_gem_object_put_unlocked(mdp4_kms->blank_cursor_bo);
+ drm_gem_object_put(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu);
@@ -510,18 +510,20 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- aspace = msm_gem_address_space_create(&pdev->dev,
- config->iommu, "mdp4");
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev,
+ config->iommu);
+
+ aspace = msm_gem_address_space_create(mmu,
+ "mdp4", 0x1000, 0xffffffff);
+
if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret)
- goto fail;
} else {
DRM_DEV_INFO(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
@@ -569,10 +571,6 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
- if (config.iommu) {
- config.iommu->geometry.aperture_start = 0x1000;
- config.iommu->geometry.aperture_end = 0xffffffff;
- }
return &config;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index e3c4c250238b..25a13a2a57a9 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -342,6 +342,81 @@ static const struct mdp5_cfg_hw msm8x16_config = {
.max_clk = 320000000,
};
+static const struct mdp5_cfg_hw msm8x36_config = {
+ .name = "msm8x36",
+ .mdp = {
+ .count = 1,
+ .base = { 0x0 },
+ .caps = MDP_CAP_SMP |
+ 0,
+ },
+ .smp = {
+ .mmb_count = 8,
+ .mmb_size = 10240,
+ .clients = {
+ [SSPP_VIG0] = 1, [SSPP_DMA0] = 4,
+ [SSPP_RGB0] = 7, [SSPP_RGB1] = 8,
+ },
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0x4003ffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
+ },
+ .lm = {
+ .count = 2,
+ .base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
+ .nb_stages = 8,
+ .max_width = 2560,
+ .max_height = 0xFFFF,
+ },
+ .pp = {
+ .count = 1,
+ .base = { 0x70000 },
+ },
+ .ad = {
+ .count = 1,
+ .base = { 0x78000 },
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+ },
+ .intf = {
+ .base = { 0x00000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 366670000,
+};
+
static const struct mdp5_cfg_hw msm8x94_config = {
.name = "msm8x94",
.mdp = {
@@ -840,6 +915,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 2, .config = { .hw = &msm8x74v2_config } },
{ .revision = 3, .config = { .hw = &apq8084_config } },
{ .revision = 6, .config = { .hw = &msm8x16_config } },
+ { .revision = 8, .config = { .hw = &msm8x36_config } },
{ .revision = 9, .config = { .hw = &msm8x94_config } },
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
@@ -941,10 +1017,6 @@ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
- if (config.iommu) {
- config.iommu->geometry.aperture_start = 0x1000;
- config.iommu->geometry.aperture_end = 0xffffffff;
- }
return &config;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 998bef1190a3..e152016a6a7d 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -166,7 +166,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
struct msm_kms *kms = &mdp5_kms->base.base;
msm_gem_unpin_iova(val, kms->aspace);
- drm_gem_object_put_unlocked(val);
+ drm_gem_object_put(val);
}
static void mdp5_crtc_destroy(struct drm_crtc *crtc)
@@ -959,7 +959,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!ctl)
return -EINVAL;
- /* don't support LM cursors when we we have source split enabled */
+ /* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
@@ -1030,7 +1030,7 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return -EINVAL;
}
- /* don't support LM cursors when we we have source split enabled */
+ /* don't support LM cursors when we have source split enabled */
if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 47b989834af1..19ec48695ffb 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -259,17 +259,9 @@ static struct drm_info_list mdp5_debugfs_list[] = {
static int mdp5_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(mdp5_debugfs_list,
- ARRAY_SIZE(mdp5_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install mdp5_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(mdp5_debugfs_list,
+ ARRAY_SIZE(mdp5_debugfs_list),
+ minor->debugfs_root, minor);
return 0;
}
@@ -632,25 +624,25 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
+ struct msm_mmu *mmu;
+
iommu_dev = &pdev->dev;
if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
- aspace = msm_gem_address_space_create(iommu_dev,
- config->platform.iommu, "mdp5");
+ mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
+
+ aspace = msm_gem_address_space_create(mmu, "mdp5",
+ 0x1000, 0xffffffff);
+
if (IS_ERR(aspace)) {
+ if (!IS_ERR(mmu))
+ mmu->funcs->destroy(mmu);
ret = PTR_ERR(aspace);
goto fail;
}
kms->aspace = aspace;
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- DRM_DEV_ERROR(&pdev->dev, "failed to attach iommu: %d\n",
- ret);
- goto fail;
- }
} else {
DRM_DEV_INFO(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
@@ -943,7 +935,8 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
return 0;
fail:
- mdp5_destroy(pdev);
+ if (mdp5_kms)
+ mdp5_destroy(pdev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 11ae5b8444c3..66ca0c009cfa 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1132,7 +1132,7 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
priv = dev->dev_private;
if (msm_host->tx_gem_obj) {
msm_gem_unpin_iova(msm_host->tx_gem_obj, priv->kms->aspace);
- drm_gem_object_put_unlocked(msm_host->tx_gem_obj);
+ drm_gem_object_put(msm_host->tx_gem_obj);
msm_host->tx_gem_obj = NULL;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1c74381a4fc9..ee2e270f464c 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -214,31 +214,20 @@ int msm_debugfs_late_init(struct drm_device *dev)
return ret;
}
-int msm_debugfs_init(struct drm_minor *minor)
+void msm_debugfs_init(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- ret = drm_debugfs_create_files(msm_debugfs_list,
- ARRAY_SIZE(msm_debugfs_list),
- minor->debugfs_root, minor);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "could not install msm_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
dev, &msm_gpu_fops);
- if (priv->kms && priv->kms->funcs->debugfs_init) {
- ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
- if (ret)
- return ret;
- }
-
- return ret;
+ if (priv->kms && priv->kms->funcs->debugfs_init)
+ priv->kms->funcs->debugfs_init(priv->kms, minor);
}
#endif
diff --git a/drivers/gpu/drm/msm/msm_debugfs.h b/drivers/gpu/drm/msm/msm_debugfs.h
index 2b91f8c178ad..ef58f66abbb3 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.h
+++ b/drivers/gpu/drm/msm/msm_debugfs.h
@@ -8,7 +8,7 @@
#define __MSM_DEBUGFS_H__
#ifdef CONFIG_DEBUG_FS
-int msm_debugfs_init(struct drm_minor *minor);
+void msm_debugfs_init(struct drm_minor *minor);
#endif
#endif /* __MSM_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 29295dee2a2e..c981cc10aebf 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -37,9 +37,10 @@
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
* - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
+ * - 1.6.0 - Syncobj support
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 5
+#define MSM_VERSION_MINOR 6
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -757,7 +758,7 @@ static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
ret = msm_gem_cpu_prep(obj, args->op, &timeout);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -775,7 +776,7 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
ret = msm_gem_cpu_fini(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -867,7 +868,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
break;
}
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -932,7 +933,7 @@ static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
ret = 0;
}
- drm_gem_object_put(obj);
+ drm_gem_object_put_locked(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -1002,7 +1003,8 @@ static struct drm_driver msm_driver = {
.driver_features = DRIVER_GEM |
DRIVER_RENDER |
DRIVER_ATOMIC |
- DRIVER_MODESET,
+ DRIVER_MODESET |
+ DRIVER_SYNCOBJ,
.open = msm_open,
.postclose = msm_postclose,
.lastclose = drm_fb_helper_lastclose,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 194d900a460e..e2d6a6056418 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -105,6 +105,7 @@ struct msm_display_topology {
u32 num_lm;
u32 num_enc;
u32 num_intf;
+ u32 num_dspp;
};
/**
@@ -236,7 +237,8 @@ int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages);
+ struct msm_gem_vma *vma, int npages,
+ u64 range_start, u64 range_end);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
@@ -250,12 +252,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
struct msm_gem_address_space *
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name);
-
-struct msm_gem_address_space *
-msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
- const char *name, uint64_t va_start, uint64_t va_end);
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size);
int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
@@ -276,6 +274,9 @@ vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end);
int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
uint64_t msm_gem_iova(struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 37674e886e99..d42f0665359a 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -123,7 +123,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
out_unref:
for (i = 0; i < n; i++)
- drm_gem_object_put_unlocked(bos[i]);
+ drm_gem_object_put(bos[i]);
return ERR_PTR(ret);
}
@@ -238,7 +238,7 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_put_unlocked(bo);
+ drm_gem_object_put(bo);
return ERR_CAST(fb);
}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 5a6a79fbc9d6..38b0c0e1f83e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -389,7 +389,8 @@ put_iova(struct drm_gem_object *obj)
}
static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_gem_vma *vma;
@@ -404,7 +405,8 @@ static int msm_gem_get_iova_locked(struct drm_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT);
+ ret = msm_gem_init_vma(aspace, vma, obj->size >> PAGE_SHIFT,
+ range_start, range_end);
if (ret) {
del_vma(vma);
return ret;
@@ -426,6 +428,9 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
if (!(msm_obj->flags & MSM_BO_GPU_READONLY))
prot |= IOMMU_WRITE;
+ if (msm_obj->flags & MSM_BO_MAP_PRIV)
+ prot |= IOMMU_PRIV;
+
WARN_ON(!mutex_is_locked(&msm_obj->lock));
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED))
@@ -443,9 +448,13 @@ static int msm_gem_pin_iova(struct drm_gem_object *obj,
msm_obj->sgt, obj->size >> PAGE_SHIFT);
}
-/* get iova and pin it. Should have a matching put */
-int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
- struct msm_gem_address_space *aspace, uint64_t *iova)
+/*
+ * get iova and pin it. Should have a matching put
+ * limits iova to specified range (in pages)
+ */
+int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova,
+ u64 range_start, u64 range_end)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
u64 local;
@@ -453,7 +462,8 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, &local);
+ ret = msm_gem_get_iova_locked(obj, aspace, &local,
+ range_start, range_end);
if (!ret)
ret = msm_gem_pin_iova(obj, aspace);
@@ -465,6 +475,13 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
return ret;
}
+/* get iova and pin it. Should have a matching put */
+int msm_gem_get_and_pin_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ return msm_gem_get_and_pin_iova_range(obj, aspace, iova, 0, U64_MAX);
+}
+
/*
* Get an iova but don't pin it. Doesn't need a put because iovas are currently
* valid for the life of the object
@@ -476,7 +493,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
int ret;
mutex_lock(&msm_obj->lock);
- ret = msm_gem_get_iova_locked(obj, aspace, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova, 0, U64_MAX);
mutex_unlock(&msm_obj->lock);
return ret;
@@ -543,7 +560,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = msm_gem_mmap_offset(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
fail:
return ret;
@@ -554,6 +571,9 @@ static void *get_vaddr(struct drm_gem_object *obj, unsigned madv)
struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0;
+ if (obj->import_attach)
+ return ERR_PTR(-ENODEV);
+
mutex_lock(&msm_obj->lock);
if (WARN_ON(msm_obj->madv > madv)) {
@@ -879,7 +899,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
}
#endif
-/* don't call directly! Use drm_gem_object_put() and friends */
+/* don't call directly! Use drm_gem_object_put_locked() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -907,8 +927,7 @@ static void free_object(struct msm_gem_object *msm_obj)
put_iova(obj);
if (obj->import_attach) {
- if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -970,7 +989,7 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
ret = drm_gem_handle_create(file, obj, handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -1089,7 +1108,7 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
@@ -1149,7 +1168,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
return obj;
fail:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
@@ -1183,9 +1202,9 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
return vaddr;
err:
if (locked)
- drm_gem_object_put(obj);
+ drm_gem_object_put_locked(obj);
else
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
@@ -1215,9 +1234,9 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
msm_gem_unpin_iova(bo, aspace);
if (locked)
- drm_gem_object_put(bo);
+ drm_gem_object_put_locked(bo);
else
- drm_gem_object_put_unlocked(bo);
+ drm_gem_object_put(bo);
}
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...)
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 30584eaf8cc8..972490b14ba5 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -13,6 +13,7 @@
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+#define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */
struct msm_gem_address_space {
const char *name;
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 385d4965a8d0..8cb9aa15ff90 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -8,7 +8,9 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
#include "msm_drv.h"
#include "msm_gpu.h"
@@ -387,7 +389,187 @@ static void submit_cleanup(struct msm_gem_submit *submit)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i, false);
list_del_init(&msm_obj->submit_entry);
- drm_gem_object_put(&msm_obj->base);
+ drm_gem_object_put_locked(&msm_obj->base);
+ }
+}
+
+
+struct msm_submit_post_dep {
+ struct drm_syncobj *syncobj;
+ uint64_t point;
+ struct dma_fence_chain *chain;
+};
+
+static struct drm_syncobj **msm_wait_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t in_syncobjs_addr,
+ uint32_t nr_in_syncobjs,
+ size_t syncobj_stride,
+ struct msm_ringbuffer *ring)
+{
+ struct drm_syncobj **syncobjs = NULL;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ syncobjs = kcalloc(nr_in_syncobjs, sizeof(*syncobjs),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!syncobjs)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_in_syncobjs; ++i) {
+ uint64_t address = in_syncobjs_addr + i * syncobj_stride;
+ struct dma_fence *fence;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (syncobj_desc.point &&
+ !drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (syncobj_desc.flags & ~MSM_SUBMIT_SYNCOBJ_FLAGS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = drm_syncobj_find_fence(file, syncobj_desc.handle,
+ syncobj_desc.point, 0, &fence);
+ if (ret)
+ break;
+
+ if (!dma_fence_match_context(fence, ring->fctx->context))
+ ret = dma_fence_wait(fence, true);
+
+ dma_fence_put(fence);
+ if (ret)
+ break;
+
+ if (syncobj_desc.flags & MSM_SUBMIT_SYNCOBJ_RESET) {
+ syncobjs[i] =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!syncobjs[i]) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ if (syncobjs[j])
+ drm_syncobj_put(syncobjs[j]);
+ }
+ kfree(syncobjs);
+ return ERR_PTR(ret);
+ }
+ return syncobjs;
+}
+
+static void msm_reset_syncobjs(struct drm_syncobj **syncobjs,
+ uint32_t nr_syncobjs)
+{
+ uint32_t i;
+
+ for (i = 0; syncobjs && i < nr_syncobjs; ++i) {
+ if (syncobjs[i])
+ drm_syncobj_replace_fence(syncobjs[i], NULL);
+ }
+}
+
+static struct msm_submit_post_dep *msm_parse_post_deps(struct drm_device *dev,
+ struct drm_file *file,
+ uint64_t syncobjs_addr,
+ uint32_t nr_syncobjs,
+ size_t syncobj_stride)
+{
+ struct msm_submit_post_dep *post_deps;
+ struct drm_msm_gem_submit_syncobj syncobj_desc = {0};
+ int ret = 0;
+ uint32_t i, j;
+
+ post_deps = kmalloc_array(nr_syncobjs, sizeof(*post_deps),
+ GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!post_deps)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_syncobjs; ++i) {
+ uint64_t address = syncobjs_addr + i * syncobj_stride;
+
+ if (copy_from_user(&syncobj_desc,
+ u64_to_user_ptr(address),
+ min(syncobj_stride, sizeof(syncobj_desc)))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ post_deps[i].point = syncobj_desc.point;
+ post_deps[i].chain = NULL;
+
+ if (syncobj_desc.flags) {
+ ret = -EINVAL;
+ break;
+ }
+
+ if (syncobj_desc.point) {
+ if (!drm_core_check_feature(dev,
+ DRIVER_SYNCOBJ_TIMELINE)) {
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ post_deps[i].chain =
+ kmalloc(sizeof(*post_deps[i].chain),
+ GFP_KERNEL);
+ if (!post_deps[i].chain) {
+ ret = -ENOMEM;
+ break;
+ }
+ }
+
+ post_deps[i].syncobj =
+ drm_syncobj_find(file, syncobj_desc.handle);
+ if (!post_deps[i].syncobj) {
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ for (j = 0; j <= i; ++j) {
+ kfree(post_deps[j].chain);
+ if (post_deps[j].syncobj)
+ drm_syncobj_put(post_deps[j].syncobj);
+ }
+
+ kfree(post_deps);
+ return ERR_PTR(ret);
+ }
+
+ return post_deps;
+}
+
+static void msm_process_post_deps(struct msm_submit_post_dep *post_deps,
+ uint32_t count, struct dma_fence *fence)
+{
+ uint32_t i;
+
+ for (i = 0; post_deps && i < count; ++i) {
+ if (post_deps[i].chain) {
+ drm_syncobj_add_point(post_deps[i].syncobj,
+ post_deps[i].chain,
+ fence, post_deps[i].point);
+ post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(post_deps[i].syncobj,
+ fence);
+ }
}
}
@@ -403,6 +585,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct sync_file *sync_file = NULL;
struct msm_gpu_submitqueue *queue;
struct msm_ringbuffer *ring;
+ struct msm_submit_post_dep *post_deps = NULL;
+ struct drm_syncobj **syncobjs_to_reset = NULL;
int out_fence_fd = -1;
struct pid *pid = get_pid(task_pid(current));
bool has_ww_ticket = false;
@@ -411,6 +595,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (!gpu)
return -ENXIO;
+ if (args->pad)
+ return -EINVAL;
+
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
@@ -458,9 +645,29 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
return ret;
}
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_IN) {
+ syncobjs_to_reset = msm_wait_deps(dev, file,
+ args->in_syncobjs,
+ args->nr_in_syncobjs,
+ args->syncobj_stride, ring);
+ if (IS_ERR(syncobjs_to_reset))
+ return PTR_ERR(syncobjs_to_reset);
+ }
+
+ if (args->flags & MSM_SUBMIT_SYNCOBJ_OUT) {
+ post_deps = msm_parse_post_deps(dev, file,
+ args->out_syncobjs,
+ args->nr_out_syncobjs,
+ args->syncobj_stride);
+ if (IS_ERR(post_deps)) {
+ ret = PTR_ERR(post_deps);
+ goto out_post_unlock;
+ }
+ }
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out_post_unlock;
if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
@@ -587,6 +794,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->fence_fd = out_fence_fd;
}
+ msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
+ msm_process_post_deps(post_deps, args->nr_out_syncobjs,
+ submit->fence);
+
+
out:
submit_cleanup(submit);
if (has_ww_ticket)
@@ -597,5 +809,23 @@ out_unlock:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
mutex_unlock(&dev->struct_mutex);
+
+out_post_unlock:
+ if (!IS_ERR_OR_NULL(post_deps)) {
+ for (i = 0; i < args->nr_out_syncobjs; ++i) {
+ kfree(post_deps[i].chain);
+ drm_syncobj_put(post_deps[i].syncobj);
+ }
+ kfree(post_deps);
+ }
+
+ if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
+ for (i = 0; i < args->nr_in_syncobjs; ++i) {
+ if (syncobjs_to_reset[i])
+ drm_syncobj_put(syncobjs_to_reset[i]);
+ }
+ kfree(syncobjs_to_reset);
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 1af5354bcd46..5f6a11211b64 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -103,7 +103,8 @@ void msm_gem_close_vma(struct msm_gem_address_space *aspace,
/* Initialize a new vma and allocate an iova for it */
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, int npages)
+ struct msm_gem_vma *vma, int npages,
+ u64 range_start, u64 range_end)
{
int ret;
@@ -111,7 +112,8 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return -EBUSY;
spin_lock(&aspace->lock);
- ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages);
+ ret = drm_mm_insert_node_in_range(&aspace->mm, &vma->node, npages, 0,
+ 0, range_start, range_end, 0);
spin_unlock(&aspace->lock);
if (ret)
@@ -125,37 +127,14 @@ int msm_gem_init_vma(struct msm_gem_address_space *aspace,
return 0;
}
-
struct msm_gem_address_space *
-msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
- const char *name)
+msm_gem_address_space_create(struct msm_mmu *mmu, const char *name,
+ u64 va_start, u64 size)
{
struct msm_gem_address_space *aspace;
- u64 size = domain->geometry.aperture_end -
- domain->geometry.aperture_start;
-
- aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
- if (!aspace)
- return ERR_PTR(-ENOMEM);
-
- spin_lock_init(&aspace->lock);
- aspace->name = name;
- aspace->mmu = msm_iommu_new(dev, domain);
-
- drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
- size >> PAGE_SHIFT);
- kref_init(&aspace->kref);
-
- return aspace;
-}
-
-struct msm_gem_address_space *
-msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
- const char *name, uint64_t va_start, uint64_t va_end)
-{
- struct msm_gem_address_space *aspace;
- u64 size = va_end - va_start;
+ if (IS_ERR(mmu))
+ return ERR_CAST(mmu);
aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
if (!aspace)
@@ -163,10 +142,9 @@ msm_gem_address_space_create_a2xx(struct device *dev, struct msm_gpu *gpu,
spin_lock_init(&aspace->lock);
aspace->name = name;
- aspace->mmu = msm_gpummu_new(dev, gpu);
+ aspace->mmu = mmu;
- drm_mm_init(&aspace->mm, (va_start >> PAGE_SHIFT),
- size >> PAGE_SHIFT);
+ drm_mm_init(&aspace->mm, va_start >> PAGE_SHIFT, size >> PAGE_SHIFT);
kref_init(&aspace->kref);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 615c5cda5389..86a138641477 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -694,7 +694,7 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
/* move to inactive: */
msm_gem_move_to_inactive(&msm_obj->base);
msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
- drm_gem_object_put(&msm_obj->base);
+ drm_gem_object_put_locked(&msm_obj->base);
}
pm_runtime_mark_last_busy(&gpu->pdev->dev);
@@ -821,51 +821,6 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
return 0;
}
-static struct msm_gem_address_space *
-msm_gpu_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev,
- uint64_t va_start, uint64_t va_end)
-{
- struct msm_gem_address_space *aspace;
- int ret;
-
- /*
- * Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- if (!adreno_is_a2xx(to_adreno_gpu(gpu))) {
- struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
- if (!iommu)
- return NULL;
-
- iommu->geometry.aperture_start = va_start;
- iommu->geometry.aperture_end = va_end;
-
- DRM_DEV_INFO(gpu->dev->dev, "%s: using IOMMU\n", gpu->name);
-
- aspace = msm_gem_address_space_create(&pdev->dev, iommu, "gpu");
- if (IS_ERR(aspace))
- iommu_domain_free(iommu);
- } else {
- aspace = msm_gem_address_space_create_a2xx(&pdev->dev, gpu, "gpu",
- va_start, va_end);
- }
-
- if (IS_ERR(aspace)) {
- DRM_DEV_ERROR(gpu->dev->dev, "failed to init mmu: %ld\n",
- PTR_ERR(aspace));
- return ERR_CAST(aspace);
- }
-
- ret = aspace->mmu->funcs->attach(aspace->mmu);
- if (ret) {
- msm_gem_address_space_put(aspace);
- return ERR_PTR(ret);
- }
-
- return aspace;
-}
-
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, struct msm_gpu_config *config)
@@ -938,8 +893,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
msm_devfreq_init(gpu);
- gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
- config->va_start, config->va_end);
+
+ gpu->aspace = gpu->funcs->create_address_space(gpu, pdev);
if (gpu->aspace == NULL)
DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index be5bc2e8425c..429cb40f7931 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -21,8 +21,6 @@ struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
- uint64_t va_start;
- uint64_t va_end;
unsigned int nr_rings;
};
@@ -57,13 +55,15 @@ struct msm_gpu_funcs {
void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
struct drm_printer *p);
/* for generation specific debugfs: */
- int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
+ void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
unsigned long (*gpu_busy)(struct msm_gpu *gpu);
struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
int (*gpu_state_put)(struct msm_gpu_state *state);
unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
void (*gpu_set_freq)(struct msm_gpu *gpu, unsigned long freq);
+ struct msm_gem_address_space *(*create_address_space)
+ (struct msm_gpu *gpu, struct platform_device *pdev);
};
struct msm_gpu {
diff --git a/drivers/gpu/drm/msm/msm_gpummu.c b/drivers/gpu/drm/msm/msm_gpummu.c
index 34980d8eb7ad..310a31b05faa 100644
--- a/drivers/gpu/drm/msm/msm_gpummu.c
+++ b/drivers/gpu/drm/msm/msm_gpummu.c
@@ -21,17 +21,12 @@ struct msm_gpummu {
#define GPUMMU_PAGE_SIZE SZ_4K
#define TABLE_SIZE (sizeof(uint32_t) * GPUMMU_VA_RANGE / GPUMMU_PAGE_SIZE)
-static int msm_gpummu_attach(struct msm_mmu *mmu)
-{
- return 0;
-}
-
static void msm_gpummu_detach(struct msm_mmu *mmu)
{
}
static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, size_t len, int prot)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
@@ -59,7 +54,7 @@ static int msm_gpummu_map(struct msm_mmu *mmu, uint64_t iova,
return 0;
}
-static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+static int msm_gpummu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_gpummu *gpummu = to_msm_gpummu(mmu);
unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE;
@@ -85,7 +80,6 @@ static void msm_gpummu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .attach = msm_gpummu_attach,
.detach = msm_gpummu_detach,
.map = msm_gpummu_map,
.unmap = msm_gpummu_unmap,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index ad58cfe5998e..3a381a9674c9 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -23,13 +23,6 @@ static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu)
-{
- struct msm_iommu *iommu = to_msm_iommu(mmu);
-
- return iommu_attach_device(iommu->domain, mmu->dev);
-}
-
static void msm_iommu_detach(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -38,7 +31,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu)
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+ struct sg_table *sgt, size_t len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
@@ -49,7 +42,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
return (ret == len) ? 0 : -EINVAL;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
@@ -66,7 +59,6 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
}
static const struct msm_mmu_funcs funcs = {
- .attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
@@ -76,6 +68,10 @@ static const struct msm_mmu_funcs funcs = {
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
{
struct msm_iommu *iommu;
+ int ret;
+
+ if (!domain)
+ return ERR_PTR(-ENODEV);
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu)
@@ -85,5 +81,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
msm_mmu_init(&iommu->base, dev, &funcs);
iommu_set_fault_handler(domain, msm_fault_handler, iommu);
+ ret = iommu_attach_device(iommu->domain, dev);
+ if (ret) {
+ kfree(iommu);
+ return ERR_PTR(ret);
+ }
+
return &iommu->base;
}
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 67a623f14319..3a534ee59bf6 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -10,11 +10,10 @@
#include <linux/iommu.h>
struct msm_mmu_funcs {
- int (*attach)(struct msm_mmu *mmu);
void (*detach)(struct msm_mmu *mmu);
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint64_t iova, unsigned len);
+ size_t len, int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
void (*destroy)(struct msm_mmu *mmu);
};
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 732f65df5c4f..fea30e7aa9e8 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -29,8 +29,6 @@
* or shader programs (if not emitted inline in cmdstream).
*/
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/circ_buf.h>
#include <linux/debugfs.h>
#include <linux/kfifo.h>
@@ -47,6 +45,8 @@ bool rd_full = false;
MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
module_param_named(rd_full, rd_full, bool, 0600);
+#ifdef CONFIG_DEBUG_FS
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 497cf443a9af..47c7dce03da4 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -356,16 +356,7 @@ static struct drm_driver mxsfb_driver = {
.irq_handler = mxsfb_irq_handler,
.irq_preinstall = mxsfb_irq_preinstall,
.irq_uninstall = mxsfb_irq_preinstall,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &fops,
.name = "mxsfb-drm",
.desc = "MXSFB Controller DRM",
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 7a62fa04272d..49e57fba4925 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,8 +1,10 @@
+NOUVEAU_PATH ?= $(srctree)
+
# SPDX-License-Identifier: MIT
-ccflags-y += -I $(srctree)/$(src)/include
-ccflags-y += -I $(srctree)/$(src)/include/nvkm
-ccflags-y += -I $(srctree)/$(src)/nvkm
-ccflags-y += -I $(srctree)/$(src)
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/include/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)/nvkm
+ccflags-y += -I $(NOUVEAU_PATH)/$(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 1f08de4241e0..640738f3196c 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -605,15 +605,16 @@ static int
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nv04_display *disp = nv04_display(crtc->dev);
- struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->primary->fb);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
- ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
- nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+ nouveau_bo_ref(nvbo, &disp->image[nv_crtc->index]);
}
return ret;
@@ -822,8 +823,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
+ struct nouveau_bo *nvbo;
struct drm_framebuffer *drm_fb;
- struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -839,13 +840,12 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
*/
if (atomic) {
drm_fb = passed_fb;
- fb = nouveau_framebuffer(passed_fb);
} else {
drm_fb = crtc->primary->fb;
- fb = nouveau_framebuffer(crtc->primary->fb);
}
- nv_crtc->fb.offset = fb->nvbo->bo.offset;
+ nvbo = nouveau_gem_object(drm_fb->obj[0]);
+ nv_crtc->fb.offset = nvbo->bo.offset;
if (nv_crtc->lut.depth != drm_fb->format->depth) {
nv_crtc->lut.depth = drm_fb->format->depth;
@@ -1017,7 +1017,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return ret;
}
@@ -1143,8 +1143,9 @@ nv04_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
- struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ struct nouveau_bo *old_bo = nouveau_gem_object(old_fb->obj[0]);
+ struct nouveau_bo *new_bo = nouveau_gem_object(fb->obj[0]);
struct nv04_page_flip_state *s;
struct nouveau_channel *chan;
struct nouveau_cli *cli;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 44ee82d0c9b6..0f4ebefed1fd 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -30,6 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_bo.h"
+#include "nouveau_gem.h"
#include <nvif/if0004.h>
@@ -52,13 +53,13 @@ nv04_display_fini(struct drm_device *dev, bool suspend)
/* Un-pin FB and cursors so they'll be evicted to system memory. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo;
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
+ if (!fb || !fb->obj[0])
continue;
-
- nouveau_bo_unpin(nouveau_fb->nvbo);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -104,13 +105,13 @@ nv04_display_init(struct drm_device *dev, bool resume, bool runtime)
/* Re-pin FB/cursors. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_framebuffer *nouveau_fb;
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct nouveau_bo *nvbo;
- nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
- if (!nouveau_fb || !nouveau_fb->nvbo)
+ if (!fb || !fb->obj[0])
continue;
-
- ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
NV_ERROR(drm, "Could not pin framebuffer\n");
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index a3a0a73ae8ab..6248fd1dbc6d 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -31,6 +31,7 @@
#include "nouveau_bo.h"
#include "nouveau_connector.h"
#include "nouveau_display.h"
+#include "nouveau_gem.h"
#include "nvreg.h"
#include "disp.h"
@@ -120,9 +121,9 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nvif_object *dev = &drm->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_bo *cur = nv_plane->cur;
+ struct nouveau_bo *nvbo;
bool flip = nv_plane->flip;
int soff = NV_PCRTC0_SIZE * nv_crtc->index;
int soff2 = NV_PCRTC0_SIZE * !nv_crtc->index;
@@ -140,17 +141,18 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret)
return ret;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
- nv_plane->cur = nv_fb->nvbo;
+ nv_plane->cur = nvbo;
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff, NV_CRTC_FSEL_OVERLAY, NV_CRTC_FSEL_OVERLAY);
nvif_mask(dev, NV_PCRTC_ENGINE_CTRL + soff2, NV_CRTC_FSEL_OVERLAY, 0);
nvif_wr32(dev, NV_PVIDEO_BASE(flip), 0);
- nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset);
+ nvif_wr32(dev, NV_PVIDEO_OFFSET_BUFF(flip), nvbo->bo.offset);
nvif_wr32(dev, NV_PVIDEO_SIZE_IN(flip), src_h << 16 | src_w);
nvif_wr32(dev, NV_PVIDEO_POINT_IN(flip), src_y << 16 | src_x);
nvif_wr32(dev, NV_PVIDEO_DS_DX(flip), (src_w << 20) / crtc_w);
@@ -172,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (format & NV_PVIDEO_FORMAT_PLANAR) {
nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0);
nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip),
- nv_fb->nvbo->bo.offset + fb->offsets[1]);
+ nvbo->bo.offset + fb->offsets[1]);
}
nvif_wr32(dev, NV_PVIDEO_FORMAT(flip), format | fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_STOP, 0);
@@ -368,8 +370,8 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
struct nouveau_plane *nv_plane =
container_of(plane, struct nouveau_plane, base);
- struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
struct nouveau_bo *cur = nv_plane->cur;
+ struct nouveau_bo *nvbo;
uint32_t overlay = 1;
int brightness = (nv_plane->brightness - 512) * 62 / 512;
int ret, i;
@@ -384,11 +386,12 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
if (ret)
return ret;
- ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
if (ret)
return ret;
- nv_plane->cur = nv_fb->nvbo;
+ nv_plane->cur = nvbo;
nvif_wr32(dev, NV_PVIDEO_OE_STATE, 0);
nvif_wr32(dev, NV_PVIDEO_SU_STATE, 0);
@@ -396,7 +399,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
for (i = 0; i < 2; i++) {
nvif_wr32(dev, NV_PVIDEO_BUFF0_START_ADDRESS + 4 * i,
- nv_fb->nvbo->bo.offset);
+ nvbo->bo.offset);
nvif_wr32(dev, NV_PVIDEO_BUFF0_PITCH_LENGTH + 4 * i,
fb->pitches[0]);
nvif_wr32(dev, NV_PVIDEO_BUFF0_OFFSET + 4 * i, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index ee782151d332..511258bfbcbc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -263,7 +263,8 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
struct nv50_disp_base_channel_dma_v0 args = {
.head = head,
};
- struct nv50_disp *disp = nv50_disp(drm->dev);
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ struct nv50_disp *disp50 = nv50_disp(drm->dev);
struct nv50_wndw *wndw;
int ret;
@@ -273,9 +274,9 @@ base507c_new_(const struct nv50_wndw_func *func, const u32 *format,
if (*pwndw = wndw, ret)
return ret;
- ret = nv50_dmac_create(&drm->client.device, &disp->disp->object,
+ ret = nv50_dmac_create(&drm->client.device, &disp->disp.object,
&oclass, head, &args, sizeof(args),
- disp->sync->bo.offset, &wndw->wndw);
+ disp50->sync->bo.offset, &wndw->wndw);
if (ret) {
NV_ERROR(drm, "base%04x allocation failed: %d\n", oclass, ret);
return ret;
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core.h b/drivers/gpu/drm/nouveau/dispnv50/core.h
index ff94f3f6f264..99157dc94d23 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/core.h
@@ -2,6 +2,7 @@
#define __NV50_KMS_CORE_H__
#include "disp.h"
#include "atom.h"
+#include <nouveau_encoder.h>
struct nv50_core {
const struct nv50_core_func *func;
@@ -15,6 +16,7 @@ void nv50_core_del(struct nv50_core **);
struct nv50_core_func {
void (*init)(struct nv50_core *);
void (*ntfy_init)(struct nouveau_bo *, u32 offset);
+ int (*caps_init)(struct nouveau_drm *, struct nv50_disp *);
int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset,
struct nvif_device *);
void (*update)(struct nv50_core *, u32 *interlock, bool ntfy);
@@ -27,6 +29,9 @@ struct nv50_core_func {
const struct nv50_outp_func {
void (*ctrl)(struct nv50_core *, int or, u32 ctrl,
struct nv50_head_atom *);
+ /* XXX: Only used by SORs and PIORs for now */
+ void (*get_caps)(struct nv50_disp *,
+ struct nouveau_encoder *, int or);
} *dac, *pior, *sor;
};
@@ -35,6 +40,7 @@ int core507d_new_(const struct nv50_core_func *, struct nouveau_drm *, s32,
struct nv50_core **);
void core507d_init(struct nv50_core *);
void core507d_ntfy_init(struct nouveau_bo *, u32);
+int core507d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int core507d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void core507d_update(struct nv50_core *, u32 *, bool);
@@ -51,6 +57,7 @@ extern const struct nv50_outp_func sor907d;
int core917d_new(struct nouveau_drm *, s32, struct nv50_core **);
int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **);
+int corec37d_caps_init(struct nouveau_drm *, struct nv50_disp *);
int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *);
void corec37d_update(struct nv50_core *, u32 *, bool);
void corec37d_wndw_owner(struct nv50_core *);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index c5152c39c684..e341f572c269 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -62,6 +62,20 @@ core507d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4, 0x00000000);
}
+int
+core507d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ u32 *push = evo_wait(&disp->core->chan, 2);
+
+ if (push) {
+ evo_mthd(push, 0x008c, 1);
+ evo_data(push, 0x0);
+ evo_kick(push, &disp->core->chan);
+ }
+
+ return 0;
+}
+
void
core507d_init(struct nv50_core *core)
{
@@ -77,6 +91,7 @@ static const struct nv50_core_func
core507d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head507d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core827d.c b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
index 6123a068f836..2e0c1c536afe 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core827d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core827d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core827d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head827d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core907d.c b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
index ef822f813435..271629832629 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core907d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core907d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head907d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core917d.c b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
index 392338df5bfd..5cc072d4c30f 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core917d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core917d.c
@@ -26,6 +26,7 @@ static const struct nv50_core_func
core917d = {
.init = core507d_init,
.ntfy_init = core507d_ntfy_init,
+ .caps_init = core507d_caps_init,
.ntfy_wait_done = core507d_ntfy_wait_done,
.update = core507d_update,
.head = &head917d,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index c03cb987856b..e0c8811fb8e4 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -22,6 +22,7 @@
#include "core.h"
#include "head.h"
+#include <nvif/class.h>
#include <nouveau_bo.h>
#include <nvif/timer.h>
@@ -87,6 +88,30 @@ corec37d_ntfy_init(struct nouveau_bo *bo, u32 offset)
nouveau_bo_wr32(bo, offset / 4 + 3, 0x00000000);
}
+int corec37d_caps_init(struct nouveau_drm *drm, struct nv50_disp *disp)
+{
+ int ret;
+
+ ret = nvif_object_init(&disp->disp->object, 0, GV100_DISP_CAPS,
+ NULL, 0, &disp->caps);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to init notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = nvif_object_map(&disp->caps, NULL, 0);
+ if (ret) {
+ NV_ERROR(drm,
+ "Failed to map notifier caps region: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static void
corec37d_init(struct nv50_core *core)
{
@@ -111,6 +136,7 @@ static const struct nv50_core_func
corec37d = {
.init = corec37d_init,
.ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
index 147adcd60937..10ba9e9e4ae6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec57d.c
@@ -46,6 +46,7 @@ static const struct nv50_core_func
corec57d = {
.init = corec57d_init,
.ntfy_init = corec37d_ntfy_init,
+ .caps_init = corec37d_caps_init,
.ntfy_wait_done = corec37d_ntfy_wait_done,
.update = corec37d_update,
.wndw.owner = corec37d_wndw_owner,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 8c5cf096f69b..658a200ab616 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -32,7 +32,7 @@
bool
curs507a_space(struct nv50_wndw *wndw)
{
- nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 100,
if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
return true;
);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 6be9df1820c5..d472942102f5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -277,7 +277,7 @@ nv50_outp_release(struct nouveau_encoder *nv_encoder)
}
static int
-nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
+nv50_outp_acquire(struct nouveau_encoder *nv_encoder, bool hda)
{
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
@@ -289,6 +289,7 @@ nv50_outp_acquire(struct nouveau_encoder *nv_encoder)
.base.method = NV50_DISP_MTHD_V1_ACQUIRE,
.base.hasht = nv_encoder->dcb->hasht,
.base.hashm = nv_encoder->dcb->hashm,
+ .info.hda = hda,
};
int ret;
@@ -393,7 +394,7 @@ nv50_dac_enable(struct drm_encoder *encoder)
struct nv50_head_atom *asyh = nv50_head_atom(nv_crtc->base.state);
struct nv50_core *core = nv50_disp(encoder->dev)->core;
- nv50_outp_acquire(nv_encoder);
+ nv50_outp_acquire(nv_encoder, false);
core->func->dac->ctrl(core, nv_encoder->or, 1 << nv_crtc->index, asyh);
asyh->or.depth = 0;
@@ -482,15 +483,16 @@ nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
* audio component binding for ELD notification
*/
static void
-nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port)
+nv50_audio_component_eld_notify(struct drm_audio_component *acomp, int port,
+ int dev_id)
{
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
- port, -1);
+ port, dev_id);
}
static int
-nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
+nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
bool *enabled, unsigned char *buf, int max_bytes)
{
struct drm_device *drm_dev = dev_get_drvdata(kdev);
@@ -506,9 +508,10 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int pipe,
nv_encoder = nouveau_encoder(encoder);
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_crtc = nouveau_crtc(encoder->crtc);
- if (!nv_connector || !nv_crtc || nv_crtc->index != port)
+ if (!nv_connector || !nv_crtc || nv_encoder->or != port ||
+ nv_crtc->index != dev_id)
continue;
- *enabled = drm_detect_monitor_audio(nv_connector->edid);
+ *enabled = nv_encoder->audio;
if (*enabled) {
ret = drm_eld_size(nv_connector->base.eld);
memcpy(buf, nv_connector->base.eld,
@@ -598,9 +601,11 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
+ nv_encoder->audio = false;
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
- nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+ nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+ nv_crtc->index);
}
static void
@@ -633,8 +638,10 @@ nv50_audio_enable(struct drm_encoder *encoder, struct drm_display_mode *mode)
nvif_mthd(&disp->disp->object, 0, &args,
sizeof(args.base) + drm_eld_size(args.data));
+ nv_encoder->audio = true;
- nv50_audio_component_eld_notify(drm->audio.component, nv_crtc->index);
+ nv50_audio_component_eld_notify(drm->audio.component, nv_encoder->or,
+ nv_crtc->index);
}
/******************************************************************************
@@ -904,15 +911,9 @@ nv50_msto_atomic_check(struct drm_encoder *encoder,
if (!state->duplicated) {
const int clock = crtc_state->adjusted_mode.clock;
- /*
- * XXX: Since we don't use HDR in userspace quite yet, limit
- * the bpc to 8 to save bandwidth on the topology. In the
- * future, we'll want to properly fix this by dynamically
- * selecting the highest possible bpc that would fit in the
- * topology
- */
- asyh->or.bpc = min(connector->display_info.bpc, 8U);
- asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3, false);
+ asyh->or.bpc = connector->display_info.bpc;
+ asyh->dp.pbn = drm_dp_calc_pbn_mode(clock, asyh->or.bpc * 3,
+ false);
}
slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port,
@@ -968,7 +969,7 @@ nv50_msto_enable(struct drm_encoder *encoder)
DRM_DEBUG_KMS("Failed to allocate VCPI\n");
if (!mstm->links++)
- nv50_outp_acquire(mstm->outp);
+ nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/);
if (mstm->outp->link & 1)
proto = 0x8;
@@ -1058,7 +1059,14 @@ static enum drm_mode_status
nv50_mstc_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- return MODE_OK;
+ struct nv50_mstc *mstc = nv50_mstc(connector);
+ struct nouveau_encoder *outp = mstc->mstm->outp;
+
+ /* TODO: calculate the PBN from the dotclock and validate against the
+ * MSTB's max possible PBN
+ */
+
+ return nv50_dp_mode_valid(connector, outp, mode, NULL);
}
static int
@@ -1072,8 +1080,17 @@ nv50_mstc_get_modes(struct drm_connector *connector)
if (mstc->edid)
ret = drm_add_edid_modes(&mstc->connector, mstc->edid);
- if (!mstc->connector.display_info.bpc)
- mstc->connector.display_info.bpc = 8;
+ /*
+ * XXX: Since we don't use HDR in userspace quite yet, limit the bpc
+ * to 8 to save bandwidth on the topology. In the future, we'll want
+ * to properly fix this by dynamically selecting the highest possible
+ * bpc that would fit in the topology
+ */
+ if (connector->display_info.bpc)
+ connector->display_info.bpc =
+ clamp(connector->display_info.bpc, 6U, 8U);
+ else
+ connector->display_info.bpc = 8;
if (mstc->native)
drm_mode_destroy(mstc->connector.dev, mstc->native);
@@ -1123,8 +1140,10 @@ nv50_mstc_detect(struct drm_connector *connector,
return connector_status_disconnected;
ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(connector->dev->dev);
return connector_status_disconnected;
+ }
ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr,
mstc->port);
@@ -1544,12 +1563,18 @@ nv50_sor_enable(struct drm_encoder *encoder)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector;
struct nvbios *bios = &drm->vbios;
+ bool hda = false;
u8 proto = 0xf;
u8 depth = 0x0;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
nv_encoder->crtc = encoder->crtc;
- nv50_outp_acquire(nv_encoder);
+
+ if ((disp->disp->object.oclass == GT214_DISP ||
+ disp->disp->object.oclass >= GF110_DISP) &&
+ drm_detect_monitor_audio(nv_connector->edid))
+ hda = true;
+ nv50_outp_acquire(nv_encoder, hda);
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_TMDS:
@@ -1659,6 +1684,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
+ struct nv50_disp *disp = nv50_disp(connector->dev);
int type, ret;
switch (dcbe->type) {
@@ -1685,10 +1711,12 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_connector_attach_encoder(connector, encoder);
+ disp->core->func->sor->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
if (dcbe->type == DCB_OUTPUT_DP) {
- struct nv50_disp *disp = nv50_disp(encoder->dev);
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
+
if (aux) {
if (disp->disp->object.oclass < GF110_DISP) {
/* HW has no support for address-only
@@ -1756,7 +1784,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
u8 owner = 1 << nv_crtc->index;
u8 proto;
- nv50_outp_acquire(nv_encoder);
+ nv50_outp_acquire(nv_encoder, false);
switch (asyh->or.bpc) {
case 10: asyh->or.depth = 0x6; break;
@@ -1801,7 +1829,9 @@ nv50_pior_func = {
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
+ struct drm_device *dev = connector->dev;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nv50_disp *disp = nv50_disp(dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
struct nvkm_i2c_aux *aux = NULL;
@@ -1840,6 +1870,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
drm_encoder_helper_add(encoder, &nv50_pior_help);
drm_connector_attach_encoder(connector, encoder);
+
+ disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
+
return 0;
}
@@ -2369,7 +2402,8 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
struct drm_encoder *encoder;
struct drm_plane *plane;
- core->func->init(core);
+ if (resume || runtime)
+ core->func->init(core);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
@@ -2396,6 +2430,8 @@ nv50_display_destroy(struct drm_device *dev)
nv50_audio_component_fini(nouveau_drm(dev));
+ nvif_object_unmap(&disp->caps);
+ nvif_object_fini(&disp->caps);
nv50_core_del(&disp->core);
nouveau_bo_unmap(disp->sync);
@@ -2456,6 +2492,22 @@ nv50_display_create(struct drm_device *dev)
if (ret)
goto out;
+ disp->core->func->init(disp->core);
+ if (disp->core->func->caps_init) {
+ ret = disp->core->func->caps_init(drm, disp);
+ if (ret)
+ goto out;
+ }
+
+ /* Assign the correct format modifiers */
+ if (disp->disp->object.oclass >= TU102_DISP)
+ nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
+ else
+ if (disp->disp->object.oclass >= GF110_DISP)
+ nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
+ else
+ nouveau_display(dev)->format_modifiers = disp50xx_modifiers;
+
/* create crtc objects to represent the hw heads */
if (disp->disp->object.oclass >= GV100_DISP)
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
@@ -2551,3 +2603,53 @@ out:
nv50_display_destroy(dev);
return ret;
}
+
+/******************************************************************************
+ * Format modifiers
+ *****************************************************************************/
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp50xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x7a, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x78, 5),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 1, 0x70, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 disp90xx_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 0, 0xfe, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h
index d54fe00ac3a3..696e70a6b98b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h
@@ -9,6 +9,7 @@ struct nv50_msto;
struct nv50_disp {
struct nvif_disp *disp;
struct nv50_core *core;
+ struct nvif_object caps;
#define NV50_DISP_SYNC(c, o) ((c) * 0x040 + (o))
#define NV50_DISP_CORE_NTFY NV50_DISP_SYNC(0 , 0x00)
@@ -78,6 +79,10 @@ void nv50_dmac_destroy(struct nv50_dmac *);
u32 *evo_wait(struct nv50_dmac *, int nr);
void evo_kick(u32 *, struct nv50_dmac *);
+extern const u64 disp50xx_modifiers[];
+extern const u64 disp90xx_modifiers[];
+extern const u64 wndwc57e_modifiers[];
+
#define evo_mthd(p, m, s) do { \
const u32 _m = (m), _s = (s); \
if (drm_debug_enabled(DRM_UT_KMS)) \
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
index 00011ce109a6..4a9a32b89f74 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc37d.c
@@ -168,14 +168,15 @@ headc37d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
struct nv50_head_mode *m = &asyh->mode;
u32 *push;
- if ((push = evo_wait(core, 12))) {
+ if ((push = evo_wait(core, 13))) {
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
evo_data(push, (m->v.active << 16) | m->h.active );
evo_data(push, (m->v.synce << 16) | m->h.synce );
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+ evo_data(push, m->interlace);
evo_data(push, m->clock * 1000);
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
evo_data(push, m->clock * 1000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
index 938d910a1b1e..859131a8bc3c 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/headc57d.c
@@ -173,14 +173,15 @@ headc57d_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
struct nv50_dmac *core = &nv50_disp(head->base.base.dev)->core->chan;
struct nv50_head_mode *m = &asyh->mode;
u32 *push;
- if ((push = evo_wait(core, 12))) {
+ if ((push = evo_wait(core, 13))) {
evo_mthd(push, 0x2064 + (head->base.index * 0x400), 5);
evo_data(push, (m->v.active << 16) | m->h.active );
evo_data(push, (m->v.synce << 16) | m->h.synce );
evo_data(push, (m->v.blanke << 16) | m->h.blanke );
evo_data(push, (m->v.blanks << 16) | m->h.blanks );
evo_data(push, (m->v.blank2e << 16) | m->v.blank2s);
- evo_mthd(push, 0x200c + (head->base.index * 0x400), 1);
+ evo_mthd(push, 0x2008 + (head->base.index * 0x400), 2);
+ evo_data(push, m->interlace);
evo_data(push, m->clock * 1000);
evo_mthd(push, 0x2028 + (head->base.index * 0x400), 1);
evo_data(push, m->clock * 1000);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
index d2bac6a341dc..45d8ce7d2c28 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/pior507d.c
@@ -38,7 +38,15 @@ pior507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+pior507d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp,
+ int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
const struct nv50_outp_func
pior507d = {
.ctrl = pior507d_ctrl,
+ .get_caps = pior507d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
index 5222fe6a9b21..9a59fa7da00d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor507d.c
@@ -38,7 +38,14 @@ sor507d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sor507d_get_caps(struct nv50_disp *core, struct nouveau_encoder *outp, int or)
+{
+ outp->caps.dp_interlace = true;
+}
+
const struct nv50_outp_func
sor507d = {
.ctrl = sor507d_ctrl,
+ .get_caps = sor507d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
index b0314ec11fb3..9577ccf1c809 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sor907d.c
@@ -21,6 +21,7 @@
*/
#include "core.h"
+#include <nouveau_bo.h>
#include <nvif/class.h>
static void
@@ -35,7 +36,17 @@ sor907d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sor907d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ const int off = or * 2;
+ u32 tmp = nouveau_bo_rd32(disp->sync, 0x000014 + off);
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
const struct nv50_outp_func
sor907d = {
.ctrl = sor907d_ctrl,
+ .get_caps = sor907d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
index dff059241c5d..c86ca955fdcd 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/sorc37d.c
@@ -33,7 +33,16 @@ sorc37d_ctrl(struct nv50_core *core, int or, u32 ctrl,
}
}
+static void
+sorc37d_get_caps(struct nv50_disp *disp, struct nouveau_encoder *outp, int or)
+{
+ u32 tmp = nvif_rd32(&disp->caps, 0x000144 + (or * 8));
+
+ outp->caps.dp_interlace = !!(tmp & 0x04000000);
+}
+
const struct nv50_outp_func
sorc37d = {
.ctrl = sorc37d_ctrl,
+ .get_caps = sorc37d_get_caps,
};
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index bb737f9281e6..99b9b681736d 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -29,6 +29,7 @@
#include <drm/drm_fourcc.h>
#include "nouveau_bo.h"
+#include "nouveau_gem.h"
static void
nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
@@ -39,12 +40,13 @@ nv50_wndw_ctxdma_del(struct nv50_wndw_ctxdma *ctxdma)
}
static struct nv50_wndw_ctxdma *
-nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
+nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct drm_framebuffer *fb)
{
- struct nouveau_drm *drm = nouveau_drm(fb->base.dev);
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
struct nv50_wndw_ctxdma *ctxdma;
- const u8 kind = fb->nvbo->kind;
- const u32 handle = 0xfb000000 | kind;
+ u32 handle;
+ u32 unused;
+ u8 kind;
struct {
struct nv_dma_v0 base;
union {
@@ -56,6 +58,9 @@ nv50_wndw_ctxdma_new(struct nv50_wndw *wndw, struct nouveau_framebuffer *fb)
u32 argc = sizeof(args.base);
int ret;
+ nouveau_framebuffer_get_layout(fb, &unused, &kind);
+ handle = 0xfb000000 | kind;
+
list_for_each_entry(ctxdma, &wndw->ctxdma.list, head) {
if (ctxdma->object.handle == handle)
return ctxdma;
@@ -187,6 +192,8 @@ nv50_wndw_atomic_check_release(struct nv50_wndw *wndw,
wndw->func->release(wndw, asyw, asyh);
asyw->ntfy.handle = 0;
asyw->sema.handle = 0;
+ asyw->xlut.handle = 0;
+ memset(asyw->image.handle, 0x00, sizeof(asyw->image.handle));
}
static int
@@ -234,16 +241,20 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
struct nv50_wndw_atom *asyw,
struct nv50_head_atom *asyh)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(asyw->state.fb);
+ struct drm_framebuffer *fb = asyw->state.fb;
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
+ uint8_t kind;
+ uint32_t tile_mode;
int ret;
NV_ATOMIC(drm, "%s acquire\n", wndw->plane.name);
- if (asyw->state.fb != armw->state.fb || !armw->visible || modeset) {
- asyw->image.w = fb->base.width;
- asyw->image.h = fb->base.height;
- asyw->image.kind = fb->nvbo->kind;
+ if (fb != armw->state.fb || !armw->visible || modeset) {
+ nouveau_framebuffer_get_layout(fb, &tile_mode, &kind);
+
+ asyw->image.w = fb->width;
+ asyw->image.h = fb->height;
+ asyw->image.kind = kind;
ret = nv50_wndw_atomic_check_acquire_rgb(asyw);
if (ret) {
@@ -255,16 +266,16 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw, bool modeset,
if (asyw->image.kind) {
asyw->image.layout = 0;
if (drm->client.device.info.chipset >= 0xc0)
- asyw->image.blockh = fb->nvbo->mode >> 4;
+ asyw->image.blockh = tile_mode >> 4;
else
- asyw->image.blockh = fb->nvbo->mode;
- asyw->image.blocks[0] = fb->base.pitches[0] / 64;
+ asyw->image.blockh = tile_mode;
+ asyw->image.blocks[0] = fb->pitches[0] / 64;
asyw->image.pitch[0] = 0;
} else {
asyw->image.layout = 1;
asyw->image.blockh = 0;
asyw->image.blocks[0] = 0;
- asyw->image.pitch[0] = fb->base.pitches[0];
+ asyw->image.pitch[0] = fb->pitches[0];
}
if (!asyh->state.async_flip)
@@ -471,47 +482,51 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
static void
nv50_wndw_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(old_state->fb);
struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ struct nouveau_bo *nvbo;
NV_ATOMIC(drm, "%s cleanup: %p\n", plane->name, old_state->fb);
if (!old_state->fb)
return;
- nouveau_bo_unpin(fb->nvbo);
+ nvbo = nouveau_gem_object(old_state->fb->obj[0]);
+ nouveau_bo_unpin(nvbo);
}
static int
nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(state->fb);
+ struct drm_framebuffer *fb = state->fb;
struct nouveau_drm *drm = nouveau_drm(plane->dev);
struct nv50_wndw *wndw = nv50_wndw(plane);
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
+ struct nouveau_bo *nvbo;
struct nv50_head_atom *asyh;
struct nv50_wndw_ctxdma *ctxdma;
int ret;
- NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, state->fb);
+ NV_ATOMIC(drm, "%s prepare: %p\n", plane->name, fb);
if (!asyw->state.fb)
return 0;
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM, true);
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
if (ret)
return ret;
if (wndw->ctxdma.parent) {
ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(fb->nvbo);
+ nouveau_bo_unpin(nvbo);
return PTR_ERR(ctxdma);
}
- asyw->image.handle[0] = ctxdma->object.handle;
+ if (asyw->visible)
+ asyw->image.handle[0] = ctxdma->object.handle;
}
- asyw->state.fence = dma_resv_get_excl_rcu(fb->nvbo->bo.base.resv);
- asyw->image.offset[0] = fb->nvbo->bo.offset;
+ asyw->state.fence = dma_resv_get_excl_rcu(nvbo->bo.base.resv);
+ asyw->image.offset[0] = nvbo->bo.offset;
if (wndw->func->prepare) {
asyh = nv50_head_atom_get(asyw->state.state, asyw->state.crtc);
@@ -603,6 +618,29 @@ nv50_wndw_destroy(struct drm_plane *plane)
kfree(wndw);
}
+/* This function assumes the format has already been validated against the plane
+ * and the modifier was validated against the device-wides modifier list at FB
+ * creation time.
+ */
+static bool nv50_plane_format_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ struct nouveau_drm *drm = nouveau_drm(plane->dev);
+ uint8_t i;
+
+ if (drm->client.device.info.chipset < 0xc0) {
+ const struct drm_format_info *info = drm_format_info(format);
+ const uint8_t kind = (modifier >> 12) & 0xff;
+
+ if (!format) return false;
+
+ for (i = 0; i < info->num_planes; i++)
+ if ((info->cpp[i] != 4) && kind != 0x70) return false;
+ }
+
+ return true;
+}
+
const struct drm_plane_funcs
nv50_wndw = {
.update_plane = drm_atomic_helper_update_plane,
@@ -611,6 +649,7 @@ nv50_wndw = {
.reset = nv50_wndw_reset,
.atomic_duplicate_state = nv50_wndw_atomic_duplicate_state,
.atomic_destroy_state = nv50_wndw_atomic_destroy_state,
+ .format_mod_supported = nv50_plane_format_mod_supported,
};
static int
@@ -658,7 +697,8 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev,
for (nformat = 0; format[nformat]; nformat++);
ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw,
- format, nformat, NULL,
+ format, nformat,
+ nouveau_display(dev)->format_modifiers,
type, "%s-%d", name, index);
if (ret) {
kfree(*pwndw);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
index 35c9c52fab26..1d64741595ba 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndwc57e.c
@@ -173,6 +173,23 @@ wndwc57e_ilut(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, int size)
return true;
}
+/****************************************************************
+ * Log2(block height) ----------------------------+ *
+ * Page Kind ----------------------------------+ | *
+ * Gob Height/Page Kind Generation ------+ | | *
+ * Sector layout -------+ | | | *
+ * Compression ------+ | | | | */
+const u64 wndwc57e_modifiers[] = { /* | | | | | */
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 0),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 1),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 2),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 3),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 4),
+ DRM_FORMAT_MOD_NVIDIA_BLOCK_LINEAR_2D(0, 1, 2, 0x06, 5),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static const struct nv50_wndw_func
wndwc57e = {
.acquire = wndwc37e_acquire,
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
index 38bf4f38e869..53800fb46582 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl5070.h
@@ -46,7 +46,8 @@ struct nv50_disp_acquire_v0 {
__u8 version;
__u8 or;
__u8 link;
- __u8 pad03[5];
+ __u8 hda;
+ __u8 pad04[4];
};
struct nv50_disp_dac_load_v0 {
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 30659747ffe8..2c79beb41126 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -89,6 +89,8 @@
#define GV100_DISP /* cl5070.h */ 0x0000c370
#define TU102_DISP /* cl5070.h */ 0x0000c570
+#define GV100_DISP_CAPS 0x0000c373
+
#define NV31_MPEG 0x00003174
#define G82_MPEG 0x00008274
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
index 1218f28c14ba..76288c682e9e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/subdev.h
@@ -24,6 +24,8 @@ struct nvkm_subdev_func {
};
extern const char *nvkm_subdev_name[NVKM_SUBDEV_NR];
+int nvkm_subdev_new_(const struct nvkm_subdev_func *, struct nvkm_device *,
+ int index, struct nvkm_subdev **);
void nvkm_subdev_ctor(const struct nvkm_subdev_func *, struct nvkm_device *,
int index, struct nvkm_subdev *);
void nvkm_subdev_del(struct nvkm_subdev **);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index e2bae1424502..72c91991b96a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -139,7 +139,7 @@ nouveau_abi16_chan_fini(struct nouveau_abi16 *abi16,
if (chan->ntfy) {
nouveau_vma_del(&chan->ntfy_vma);
nouveau_bo_unpin(chan->ntfy);
- drm_gem_object_put_unlocked(&chan->ntfy->bo.base);
+ drm_gem_object_put(&chan->ntfy->bo.base);
}
if (chan->heap.block_size)
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index fe3a10255c36..69a84d0197d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -49,7 +49,6 @@ static struct nouveau_dsm_priv {
bool optimus_flags_detected;
bool optimus_skip_dsm;
acpi_handle dhandle;
- acpi_handle rom_handle;
} nouveau_dsm_priv;
bool nouveau_is_optimus(void) {
@@ -212,37 +211,6 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
.get_client_id = nouveau_dsm_get_client_id,
};
-/*
- * Firmware supporting Windows 8 or later do not use _DSM to put the device into
- * D3cold, they instead rely on disabling power resources on the parent.
- */
-static bool nouveau_pr3_present(struct pci_dev *pdev)
-{
- struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
- struct acpi_device *parent_adev;
-
- if (!parent_pdev)
- return false;
-
- if (!parent_pdev->bridge_d3) {
- /*
- * Parent PCI bridge is currently not power managed.
- * Since userspace can change these afterwards to be on
- * the safe side we stick with _DSM and prevent usage of
- * _PR3 from the bridge.
- */
- pci_d3cold_disable(pdev);
- return false;
- }
-
- parent_adev = ACPI_COMPANION(&parent_pdev->dev);
- if (!parent_adev)
- return false;
-
- return parent_adev->power.flags.power_resources &&
- acpi_has_method(parent_adev->handle, "_PR3");
-}
-
static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
bool *has_mux, bool *has_opt,
bool *has_opt_flags, bool *has_pr3)
@@ -250,6 +218,16 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
acpi_handle dhandle;
bool supports_mux;
int optimus_funcs;
+ struct pci_dev *parent_pdev;
+
+ *has_pr3 = false;
+ parent_pdev = pci_upstream_bridge(pdev);
+ if (parent_pdev) {
+ if (parent_pdev->bridge_d3)
+ *has_pr3 = pci_pr3_present(parent_pdev);
+ else
+ pci_d3cold_disable(pdev);
+ }
dhandle = ACPI_HANDLE(&pdev->dev);
if (!dhandle)
@@ -270,7 +248,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
*has_mux = supports_mux;
*has_opt = !!optimus_funcs;
*has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
- *has_pr3 = false;
if (optimus_funcs) {
uint32_t result;
@@ -280,8 +257,6 @@ static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out
(result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
(result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
(result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
-
- *has_pr3 = nouveau_pr3_present(pdev);
}
}
@@ -385,59 +360,6 @@ void nouveau_unregister_dsm_handler(void) {}
void nouveau_switcheroo_optimus_dsm(void) {}
#endif
-/* retrieve the ROM in 4k blocks */
-static int nouveau_rom_call(acpi_handle rom_handle, uint8_t *bios,
- int offset, int len)
-{
- acpi_status status;
- union acpi_object rom_arg_elements[2], *obj;
- struct acpi_object_list rom_arg;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-
- rom_arg.count = 2;
- rom_arg.pointer = &rom_arg_elements[0];
-
- rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
- rom_arg_elements[0].integer.value = offset;
-
- rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
- rom_arg_elements[1].integer.value = len;
-
- status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
- if (ACPI_FAILURE(status)) {
- pr_info("failed to evaluate ROM got %s\n",
- acpi_format_exception(status));
- return -ENODEV;
- }
- obj = (union acpi_object *)buffer.pointer;
- len = min(len, (int)obj->buffer.length);
- memcpy(bios+offset, obj->buffer.pointer, len);
- kfree(buffer.pointer);
- return len;
-}
-
-bool nouveau_acpi_rom_supported(struct device *dev)
-{
- acpi_status status;
- acpi_handle dhandle, rom_handle;
-
- dhandle = ACPI_HANDLE(dev);
- if (!dhandle)
- return false;
-
- status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
- if (ACPI_FAILURE(status))
- return false;
-
- nouveau_dsm_priv.rom_handle = rom_handle;
- return true;
-}
-
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
- return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
-}
-
void *
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.h b/drivers/gpu/drm/nouveau/nouveau_acpi.h
index 1e6e8a8c0455..330f9b837066 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.h
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.h
@@ -10,8 +10,6 @@ bool nouveau_is_v1_dsm(void);
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
void nouveau_switcheroo_optimus_dsm(void);
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
void *nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline bool nouveau_is_optimus(void) { return false; };
@@ -19,8 +17,6 @@ static inline bool nouveau_is_v1_dsm(void) { return false; };
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline void nouveau_switcheroo_optimus_dsm(void) {}
-static inline bool nouveau_acpi_rom_supported(struct device *dev) { return false; }
-static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline void *nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return NULL; }
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 9a9a7f5003d3..ab2c2b2cab10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -38,6 +38,7 @@
#include "nouveau_reg.h"
#include "nouveau_drv.h"
#include "dispnv04/hw.h"
+#include "dispnv50/disp.h"
#include "nouveau_acpi.h"
#include "nouveau_display.h"
@@ -59,7 +60,6 @@ nouveau_conn_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &connector->probed_modes, head) {
- mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
@@ -80,12 +80,12 @@ nouveau_conn_native_mode(struct drm_connector *connector)
continue;
if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
- mode->vrefresh < high_v)
+ drm_mode_vrefresh(mode) < high_v)
continue;
high_w = mode->hdisplay;
high_h = mode->vdisplay;
- high_v = mode->vrefresh;
+ high_v = drm_mode_vrefresh(mode);
largest = mode;
}
@@ -509,7 +509,11 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
nv_connector->detected_encoder = nv_encoder;
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
- connector->interlace_allowed = true;
+ if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
+ connector->interlace_allowed =
+ nv_encoder->caps.dp_interlace;
+ else
+ connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
@@ -1029,6 +1033,29 @@ get_tmds_link_bandwidth(struct drm_connector *connector)
return 112000 * duallink_scale;
}
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *mode,
+ const unsigned min_clock,
+ const unsigned max_clock,
+ unsigned int *clock_out)
+{
+ unsigned int clock = mode->clock;
+
+ if ((mode->flags & DRM_MODE_FLAG_3D_MASK) ==
+ DRM_MODE_FLAG_3D_FRAME_PACKING)
+ clock *= 2;
+
+ if (clock < min_clock)
+ return MODE_CLOCK_LOW;
+ if (clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (clock_out)
+ *clock_out = clock;
+
+ return MODE_OK;
+}
+
static enum drm_mode_status
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -1037,7 +1064,6 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
- unsigned clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
@@ -1060,25 +1086,14 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case DCB_OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
- max_clock = nv_encoder->dp.link_nr;
- max_clock *= nv_encoder->dp.link_bw;
- clock = clock * (connector->display_info.bpc * 3) / 10;
- break;
+ return nv50_dp_mode_valid(connector, nv_encoder, mode, NULL);
default:
BUG();
return MODE_BAD;
}
- if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
- clock *= 2;
-
- if (clock < min_clock)
- return MODE_CLOCK_LOW;
-
- if (clock > max_clock)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
+ return nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+ NULL);
}
static struct drm_encoder *
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index de84fb4708c7..9e062c7adec8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -195,6 +195,11 @@ int nouveau_conn_atomic_get_property(struct drm_connector *,
const struct drm_connector_state *,
struct drm_property *, u64 *);
struct drm_display_mode *nouveau_conn_native_mode(struct drm_connector *);
+enum drm_mode_status
+nouveau_conn_mode_clock_valid(const struct drm_display_mode *,
+ const unsigned min_clock,
+ const unsigned max_clock,
+ unsigned *clock);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_connector *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 15a3d40edf02..63b5c8cf9ae4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,8 +181,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
}
ret = pm_runtime_get_sync(drm->dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(drm->dev);
return ret;
+ }
+
ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
pm_runtime_put_autosuspend(drm->dev);
if (ret < 0)
@@ -217,7 +220,7 @@ static const struct nouveau_debugfs_files {
{"pstate", &nouveau_pstate_fops},
};
-int
+void
nouveau_drm_debugfs_init(struct drm_minor *minor)
{
struct nouveau_drm *drm = nouveau_drm(minor->dev);
@@ -240,12 +243,10 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
*/
dentry = debugfs_lookup("vbios.rom", minor->debugfs_root);
if (!dentry)
- return 0;
+ return;
d_inode(dentry)->i_size = drm->vbios.length;
dput(dentry);
-
- return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
index 8909c010e8ea..77f0323b38ba 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h
@@ -18,15 +18,13 @@ nouveau_debugfs(struct drm_device *dev)
return nouveau_drm(dev)->debugfs;
}
-extern int nouveau_drm_debugfs_init(struct drm_minor *);
+extern void nouveau_drm_debugfs_init(struct drm_minor *);
extern int nouveau_debugfs_init(struct nouveau_drm *);
extern void nouveau_debugfs_fini(struct nouveau_drm *);
#else
-static inline int
+static inline void
nouveau_drm_debugfs_init(struct drm_minor *minor)
-{
- return 0;
-}
+{}
static inline int
nouveau_debugfs_init(struct nouveau_drm *drm)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 700817dc4fa0..901ac55506d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -31,6 +31,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -179,41 +180,164 @@ nouveau_display_vblank_init(struct drm_device *dev)
return 0;
}
+static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
+ .destroy = drm_gem_fb_destroy,
+ .create_handle = drm_gem_fb_create_handle,
+};
+
static void
-nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
+nouveau_decode_mod(struct nouveau_drm *drm,
+ uint64_t modifier,
+ uint32_t *tile_mode,
+ uint8_t *kind)
+{
+ BUG_ON(!tile_mode || !kind);
+
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ /* tile_mode will not be used in this case */
+ *tile_mode = 0;
+ *kind = 0;
+ } else {
+ /*
+ * Extract the block height and kind from the corresponding
+ * modifier fields. See drm_fourcc.h for details.
+ */
+ *tile_mode = (uint32_t)(modifier & 0xF);
+ *kind = (uint8_t)((modifier >> 12) & 0xFF);
+
+ if (drm->client.device.info.chipset >= 0xc0)
+ *tile_mode <<= 4;
+ }
+}
+
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
+ uint32_t *tile_mode,
+ uint8_t *kind)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ if (fb->flags & DRM_MODE_FB_MODIFIERS) {
+ struct nouveau_drm *drm = nouveau_drm(fb->dev);
- if (fb->nvbo)
- drm_gem_object_put_unlocked(&fb->nvbo->bo.base);
+ nouveau_decode_mod(drm, fb->modifier, tile_mode, kind);
+ } else {
+ const struct nouveau_bo *nvbo = nouveau_gem_object(fb->obj[0]);
- drm_framebuffer_cleanup(drm_fb);
- kfree(fb);
+ *tile_mode = nvbo->mode;
+ *kind = nvbo->kind;
+ }
}
static int
-nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
- struct drm_file *file_priv,
- unsigned int *handle)
+nouveau_validate_decode_mod(struct nouveau_drm *drm,
+ uint64_t modifier,
+ uint32_t *tile_mode,
+ uint8_t *kind)
{
- struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
+ struct nouveau_display *disp = nouveau_display(drm->dev);
+ int mod;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
+ return -EINVAL;
+ }
- return drm_gem_handle_create(file_priv, &fb->nvbo->bo.base, handle);
+ BUG_ON(!disp->format_modifiers);
+
+ for (mod = 0;
+ (disp->format_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
+ (disp->format_modifiers[mod] != modifier);
+ mod++);
+
+ if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
+ return -EINVAL;
+
+ nouveau_decode_mod(drm, modifier, tile_mode, kind);
+
+ return 0;
}
-static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
- .destroy = nouveau_user_framebuffer_destroy,
- .create_handle = nouveau_user_framebuffer_create_handle,
-};
+static inline uint32_t
+nouveau_get_width_in_blocks(uint32_t stride)
+{
+ /* GOBs per block in the x direction is always one, and GOBs are
+ * 64 bytes wide
+ */
+ static const uint32_t log_block_width = 6;
+
+ return (stride + (1 << log_block_width) - 1) >> log_block_width;
+}
+
+static inline uint32_t
+nouveau_get_height_in_blocks(struct nouveau_drm *drm,
+ uint32_t height,
+ uint32_t log_block_height_in_gobs)
+{
+ uint32_t log_gob_height;
+ uint32_t log_block_height;
+
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ log_gob_height = 2;
+ else
+ log_gob_height = 3;
+
+ log_block_height = log_block_height_in_gobs + log_gob_height;
+
+ return (height + (1 << log_block_height) - 1) >> log_block_height;
+}
+
+static int
+nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo,
+ uint32_t offset, uint32_t stride, uint32_t h,
+ uint32_t tile_mode)
+{
+ uint32_t gob_size, bw, bh;
+ uint64_t bl_size;
+
+ BUG_ON(drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA);
+
+ if (drm->client.device.info.chipset >= 0xc0) {
+ if (tile_mode & 0xF)
+ return -EINVAL;
+ tile_mode >>= 4;
+ }
+
+ if (tile_mode & 0xFFFFFFF0)
+ return -EINVAL;
+
+ if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
+ gob_size = 256;
+ else
+ gob_size = 512;
+
+ bw = nouveau_get_width_in_blocks(stride);
+ bh = nouveau_get_height_in_blocks(drm, h, tile_mode);
+
+ bl_size = bw * bh * (1 << tile_mode) * gob_size;
+
+ DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n",
+ offset, stride, h, tile_mode, bw, bh, gob_size, bl_size,
+ nvbo->bo.mem.size);
+
+ if (bl_size + offset > nvbo->bo.mem.size)
+ return -ERANGE;
+
+ return 0;
+}
int
nouveau_framebuffer_new(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
- struct nouveau_bo *nvbo,
- struct nouveau_framebuffer **pfb)
+ struct drm_gem_object *gem,
+ struct drm_framebuffer **pfb)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- struct nouveau_framebuffer *fb;
+ struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct drm_framebuffer *fb;
+ const struct drm_format_info *info;
+ unsigned int width, height, i;
+ uint32_t tile_mode;
+ uint8_t kind;
int ret;
/* YUV overlays have special requirements pre-NV50 */
@@ -236,13 +360,50 @@ nouveau_framebuffer_new(struct drm_device *dev,
return -EINVAL;
}
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ if (nouveau_validate_decode_mod(drm, mode_cmd->modifier[0],
+ &tile_mode, &kind)) {
+ DRM_DEBUG_KMS("Unsupported modifier: 0x%llx\n",
+ mode_cmd->modifier[0]);
+ return -EINVAL;
+ }
+ } else {
+ tile_mode = nvbo->mode;
+ kind = nvbo->kind;
+ }
+
+ info = drm_get_format_info(dev, mode_cmd);
+
+ for (i = 0; i < info->num_planes; i++) {
+ width = drm_format_info_plane_width(info,
+ mode_cmd->width,
+ i);
+ height = drm_format_info_plane_height(info,
+ mode_cmd->height,
+ i);
+
+ if (kind) {
+ ret = nouveau_check_bl_size(drm, nvbo,
+ mode_cmd->offsets[i],
+ mode_cmd->pitches[i],
+ height, tile_mode);
+ if (ret)
+ return ret;
+ } else {
+ uint32_t size = mode_cmd->pitches[i] * height;
+
+ if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size)
+ return -ERANGE;
+ }
+ }
+
if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL)))
return -ENOMEM;
- drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd);
- fb->nvbo = nvbo;
+ drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd);
+ fb->obj[0] = gem;
- ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs);
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret)
kfree(fb);
return ret;
@@ -253,21 +414,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct nouveau_framebuffer *fb;
- struct nouveau_bo *nvbo;
+ struct drm_framebuffer *fb;
struct drm_gem_object *gem;
int ret;
gem = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
- nvbo = nouveau_gem_object(gem);
- ret = nouveau_framebuffer_new(dev, mode_cmd, nvbo, &fb);
+ ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb);
if (ret == 0)
- return &fb->base;
+ return fb;
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return ERR_PTR(ret);
}
@@ -517,6 +676,7 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
+ dev->mode_config.allow_fb_modifiers = true;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
@@ -648,7 +808,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
return ret;
ret = drm_gem_handle_create(file_priv, &bo->bo.base, &args->handle);
- drm_gem_object_put_unlocked(&bo->bo.base);
+ drm_gem_object_put(&bo->bo.base);
return ret;
}
@@ -663,7 +823,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
*poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index de004018ab5c..6e0d900441d6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -8,26 +8,11 @@
#include <drm/drm_framebuffer.h>
-struct nouveau_framebuffer {
- struct drm_framebuffer base;
- struct nouveau_bo *nvbo;
- struct nouveau_vma *vma;
- u32 r_handle;
- u32 r_format;
- u32 r_pitch;
- struct nvif_object h_base[4];
- struct nvif_object h_core;
-};
-
-static inline struct nouveau_framebuffer *
-nouveau_framebuffer(struct drm_framebuffer *fb)
-{
- return container_of(fb, struct nouveau_framebuffer, base);
-}
-
-int nouveau_framebuffer_new(struct drm_device *,
- const struct drm_mode_fb_cmd2 *,
- struct nouveau_bo *, struct nouveau_framebuffer **);
+int
+nouveau_framebuffer_new(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *gem,
+ struct drm_framebuffer **pfb);
struct nouveau_display {
void *priv;
@@ -47,6 +32,8 @@ struct nouveau_display {
struct drm_property *color_vibrance_property;
struct drm_atomic_state *suspend;
+
+ const u64 *format_modifiers;
};
static inline struct nouveau_display *
@@ -75,6 +62,10 @@ int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+void
+nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode,
+ uint8_t *kind);
+
struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *,
const struct drm_mode_fb_cmd2 *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index ad89e09a0be3..e5c230d9ae24 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -25,12 +25,14 @@
#include "nouveau_dma.h"
#include "nouveau_mem.h"
#include "nouveau_bo.h"
+#include "nouveau_svm.h"
#include <nvif/class.h>
#include <nvif/object.h>
#include <nvif/if000c.h>
#include <nvif/if500b.h>
#include <nvif/if900b.h>
+#include <nvif/if000c.h>
#include <linux/sched/mm.h>
#include <linux/hmm.h>
@@ -54,66 +56,69 @@ enum nouveau_aper {
typedef int (*nouveau_migrate_copy_t)(struct nouveau_drm *drm, u64 npages,
enum nouveau_aper, u64 dst_addr,
enum nouveau_aper, u64 src_addr);
+typedef int (*nouveau_clear_page_t)(struct nouveau_drm *drm, u32 length,
+ enum nouveau_aper, u64 dst_addr);
struct nouveau_dmem_chunk {
struct list_head list;
struct nouveau_bo *bo;
struct nouveau_drm *drm;
- unsigned long pfn_first;
unsigned long callocated;
- unsigned long bitmap[BITS_TO_LONGS(DMEM_CHUNK_NPAGES)];
- spinlock_t lock;
+ struct dev_pagemap pagemap;
};
struct nouveau_dmem_migrate {
nouveau_migrate_copy_t copy_func;
+ nouveau_clear_page_t clear_func;
struct nouveau_channel *chan;
};
struct nouveau_dmem {
struct nouveau_drm *drm;
- struct dev_pagemap pagemap;
struct nouveau_dmem_migrate migrate;
- struct list_head chunk_free;
- struct list_head chunk_full;
- struct list_head chunk_empty;
+ struct list_head chunks;
struct mutex mutex;
+ struct page *free_pages;
+ spinlock_t lock;
};
-static inline struct nouveau_dmem *page_to_dmem(struct page *page)
+static struct nouveau_dmem_chunk *nouveau_page_to_chunk(struct page *page)
{
- return container_of(page->pgmap, struct nouveau_dmem, pagemap);
+ return container_of(page->pgmap, struct nouveau_dmem_chunk, pagemap);
+}
+
+static struct nouveau_drm *page_to_drm(struct page *page)
+{
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+
+ return chunk->drm;
}
-static unsigned long nouveau_dmem_page_addr(struct page *page)
+unsigned long nouveau_dmem_page_addr(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ unsigned long off = (page_to_pfn(page) << PAGE_SHIFT) -
+ chunk->pagemap.res.start;
- return (idx << PAGE_SHIFT) + chunk->bo->bo.offset;
+ return chunk->bo->bo.offset + off;
}
static void nouveau_dmem_page_free(struct page *page)
{
- struct nouveau_dmem_chunk *chunk = page->zone_device_data;
- unsigned long idx = page_to_pfn(page) - chunk->pfn_first;
+ struct nouveau_dmem_chunk *chunk = nouveau_page_to_chunk(page);
+ struct nouveau_dmem *dmem = chunk->drm->dmem;
+
+ spin_lock(&dmem->lock);
+ page->zone_device_data = dmem->free_pages;
+ dmem->free_pages = page;
- /*
- * FIXME:
- *
- * This is really a bad example, we need to overhaul nouveau memory
- * management to be more page focus and allow lighter locking scheme
- * to be use in the process.
- */
- spin_lock(&chunk->lock);
- clear_bit(idx, chunk->bitmap);
WARN_ON(!chunk->callocated);
chunk->callocated--;
/*
* FIXME when chunk->callocated reach 0 we should add the chunk to
* a reclaim list so that it can be freed in case of memory pressure.
*/
- spin_unlock(&chunk->lock);
+ spin_unlock(&dmem->lock);
}
static void nouveau_dmem_fence_done(struct nouveau_fence **fence)
@@ -165,8 +170,8 @@ error_free_page:
static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf)
{
- struct nouveau_dmem *dmem = page_to_dmem(vmf->page);
- struct nouveau_drm *drm = dmem->drm;
+ struct nouveau_drm *drm = page_to_drm(vmf->page);
+ struct nouveau_dmem *dmem = drm->dmem;
struct nouveau_fence *fence;
unsigned long src = 0, dst = 0;
dma_addr_t dma_addr = 0;
@@ -209,131 +214,105 @@ static const struct dev_pagemap_ops nouveau_dmem_pagemap_ops = {
};
static int
-nouveau_dmem_chunk_alloc(struct nouveau_drm *drm)
+nouveau_dmem_chunk_alloc(struct nouveau_drm *drm, struct page **ppage)
{
struct nouveau_dmem_chunk *chunk;
+ struct resource *res;
+ struct page *page;
+ void *ptr;
+ unsigned long i, pfn_first;
int ret;
- if (drm->dmem == NULL)
- return -EINVAL;
-
- mutex_lock(&drm->dmem->mutex);
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
+ chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
- list_del(&chunk->list);
- mutex_unlock(&drm->dmem->mutex);
+ /* Allocate unused physical address space for device private pages. */
+ res = request_free_mem_region(&iomem_resource, DMEM_CHUNK_SIZE,
+ "nouveau_dmem");
+ if (IS_ERR(res)) {
+ ret = PTR_ERR(res);
+ goto out_free;
+ }
+
+ chunk->drm = drm;
+ chunk->pagemap.type = MEMORY_DEVICE_PRIVATE;
+ chunk->pagemap.res = *res;
+ chunk->pagemap.ops = &nouveau_dmem_pagemap_ops;
+ chunk->pagemap.owner = drm->dev;
ret = nouveau_bo_new(&drm->client, DMEM_CHUNK_SIZE, 0,
TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL,
&chunk->bo);
if (ret)
- goto out;
+ goto out_release;
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- if (ret) {
- nouveau_bo_ref(NULL, &chunk->bo);
- goto out;
- }
+ if (ret)
+ goto out_bo_free;
- bitmap_zero(chunk->bitmap, DMEM_CHUNK_NPAGES);
- spin_lock_init(&chunk->lock);
+ ptr = memremap_pages(&chunk->pagemap, numa_node_id());
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ goto out_bo_unpin;
+ }
-out:
mutex_lock(&drm->dmem->mutex);
- if (chunk->bo)
- list_add(&chunk->list, &drm->dmem->chunk_empty);
- else
- list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
+ list_add(&chunk->list, &drm->dmem->chunks);
mutex_unlock(&drm->dmem->mutex);
- return ret;
-}
-
-static struct nouveau_dmem_chunk *
-nouveau_dmem_chunk_first_free_locked(struct nouveau_drm *drm)
-{
- struct nouveau_dmem_chunk *chunk;
-
- chunk = list_first_entry_or_null(&drm->dmem->chunk_free,
- struct nouveau_dmem_chunk,
- list);
- if (chunk)
- return chunk;
-
- chunk = list_first_entry_or_null(&drm->dmem->chunk_empty,
- struct nouveau_dmem_chunk,
- list);
- if (chunk->bo)
- return chunk;
-
- return NULL;
-}
-
-static int
-nouveau_dmem_pages_alloc(struct nouveau_drm *drm,
- unsigned long npages,
- unsigned long *pages)
-{
- struct nouveau_dmem_chunk *chunk;
- unsigned long c;
- int ret;
-
- memset(pages, 0xff, npages * sizeof(*pages));
-
- mutex_lock(&drm->dmem->mutex);
- for (c = 0; c < npages;) {
- unsigned long i;
-
- chunk = nouveau_dmem_chunk_first_free_locked(drm);
- if (chunk == NULL) {
- mutex_unlock(&drm->dmem->mutex);
- ret = nouveau_dmem_chunk_alloc(drm);
- if (ret) {
- if (c)
- return 0;
- return ret;
- }
- mutex_lock(&drm->dmem->mutex);
- continue;
- }
-
- spin_lock(&chunk->lock);
- i = find_first_zero_bit(chunk->bitmap, DMEM_CHUNK_NPAGES);
- while (i < DMEM_CHUNK_NPAGES && c < npages) {
- pages[c] = chunk->pfn_first + i;
- set_bit(i, chunk->bitmap);
- chunk->callocated++;
- c++;
-
- i = find_next_zero_bit(chunk->bitmap,
- DMEM_CHUNK_NPAGES, i);
- }
- spin_unlock(&chunk->lock);
+ pfn_first = chunk->pagemap.res.start >> PAGE_SHIFT;
+ page = pfn_to_page(pfn_first);
+ spin_lock(&drm->dmem->lock);
+ for (i = 0; i < DMEM_CHUNK_NPAGES - 1; ++i, ++page) {
+ page->zone_device_data = drm->dmem->free_pages;
+ drm->dmem->free_pages = page;
}
- mutex_unlock(&drm->dmem->mutex);
+ *ppage = page;
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+
+ NV_INFO(drm, "DMEM: registered %ldMB of device memory\n",
+ DMEM_CHUNK_SIZE >> 20);
return 0;
+
+out_bo_unpin:
+ nouveau_bo_unpin(chunk->bo);
+out_bo_free:
+ nouveau_bo_ref(NULL, &chunk->bo);
+out_release:
+ release_mem_region(chunk->pagemap.res.start,
+ resource_size(&chunk->pagemap.res));
+out_free:
+ kfree(chunk);
+out:
+ return ret;
}
static struct page *
nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm)
{
- unsigned long pfns[1];
- struct page *page;
+ struct nouveau_dmem_chunk *chunk;
+ struct page *page = NULL;
int ret;
- /* FIXME stop all the miss-match API ... */
- ret = nouveau_dmem_pages_alloc(drm, 1, pfns);
- if (ret)
- return NULL;
+ spin_lock(&drm->dmem->lock);
+ if (drm->dmem->free_pages) {
+ page = drm->dmem->free_pages;
+ drm->dmem->free_pages = page->zone_device_data;
+ chunk = nouveau_page_to_chunk(page);
+ chunk->callocated++;
+ spin_unlock(&drm->dmem->lock);
+ } else {
+ spin_unlock(&drm->dmem->lock);
+ ret = nouveau_dmem_chunk_alloc(drm, &page);
+ if (ret)
+ return NULL;
+ }
- page = pfn_to_page(pfns[0]);
get_page(page);
lock_page(page);
return page;
@@ -356,12 +335,7 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
return;
mutex_lock(&drm->dmem->mutex);
- list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
- ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
- /* FIXME handle pin failure */
- WARN_ON(ret);
- }
- list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ list_for_each_entry(chunk, &drm->dmem->chunks, list) {
ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
/* FIXME handle pin failure */
WARN_ON(ret);
@@ -378,12 +352,8 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
return;
mutex_lock(&drm->dmem->mutex);
- list_for_each_entry (chunk, &drm->dmem->chunk_free, list) {
- nouveau_bo_unpin(chunk->bo);
- }
- list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
+ list_for_each_entry(chunk, &drm->dmem->chunks, list)
nouveau_bo_unpin(chunk->bo);
- }
mutex_unlock(&drm->dmem->mutex);
}
@@ -397,15 +367,13 @@ nouveau_dmem_fini(struct nouveau_drm *drm)
mutex_lock(&drm->dmem->mutex);
- WARN_ON(!list_empty(&drm->dmem->chunk_free));
- WARN_ON(!list_empty(&drm->dmem->chunk_full));
-
- list_for_each_entry_safe (chunk, tmp, &drm->dmem->chunk_empty, list) {
- if (chunk->bo) {
- nouveau_bo_unpin(chunk->bo);
- nouveau_bo_ref(NULL, &chunk->bo);
- }
+ list_for_each_entry_safe(chunk, tmp, &drm->dmem->chunks, list) {
+ nouveau_bo_unpin(chunk->bo);
+ nouveau_bo_ref(NULL, &chunk->bo);
list_del(&chunk->list);
+ memunmap_pages(&chunk->pagemap);
+ release_mem_region(chunk->pagemap.res.start,
+ resource_size(&chunk->pagemap.res));
kfree(chunk);
}
@@ -472,6 +440,52 @@ nvc0b5_migrate_copy(struct nouveau_drm *drm, u64 npages,
}
static int
+nvc0b5_migrate_clear(struct nouveau_drm *drm, u32 length,
+ enum nouveau_aper dst_aper, u64 dst_addr)
+{
+ struct nouveau_channel *chan = drm->dmem->migrate.chan;
+ u32 launch_dma = (1 << 10) /* REMAP_ENABLE_TRUE */ |
+ (1 << 8) /* DST_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 7) /* SRC_MEMORY_LAYOUT_PITCH. */ |
+ (1 << 2) /* FLUSH_ENABLE_TRUE. */ |
+ (2 << 0) /* DATA_TRANSFER_TYPE_NON_PIPELINED. */;
+ u32 remap = (4 << 0) /* DST_X_CONST_A */ |
+ (5 << 4) /* DST_Y_CONST_B */ |
+ (3 << 16) /* COMPONENT_SIZE_FOUR */ |
+ (1 << 24) /* NUM_DST_COMPONENTS_TWO */;
+ int ret;
+
+ ret = RING_SPACE(chan, 12);
+ if (ret)
+ return ret;
+
+ switch (dst_aper) {
+ case NOUVEAU_APER_VRAM:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 0);
+ break;
+ case NOUVEAU_APER_HOST:
+ BEGIN_IMC0(chan, NvSubCopy, 0x0264, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ launch_dma |= 0x00002000; /* DST_TYPE_PHYSICAL. */
+
+ BEGIN_NVC0(chan, NvSubCopy, 0x0700, 3);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, 0);
+ OUT_RING(chan, remap);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0408, 2);
+ OUT_RING(chan, upper_32_bits(dst_addr));
+ OUT_RING(chan, lower_32_bits(dst_addr));
+ BEGIN_NVC0(chan, NvSubCopy, 0x0418, 1);
+ OUT_RING(chan, length >> 3);
+ BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
+ OUT_RING(chan, launch_dma);
+ return 0;
+}
+
+static int
nouveau_dmem_migrate_init(struct nouveau_drm *drm)
{
switch (drm->ttm.copy.oclass) {
@@ -480,6 +494,7 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
case VOLTA_DMA_COPY_A:
case TURING_DMA_COPY_A:
drm->dmem->migrate.copy_func = nvc0b5_migrate_copy;
+ drm->dmem->migrate.clear_func = nvc0b5_migrate_clear;
drm->dmem->migrate.chan = drm->ttm.chan;
return 0;
default:
@@ -491,9 +506,6 @@ nouveau_dmem_migrate_init(struct nouveau_drm *drm)
void
nouveau_dmem_init(struct nouveau_drm *drm)
{
- struct device *device = drm->dev->dev;
- struct resource *res;
- unsigned long i, size, pfn_first;
int ret;
/* This only make sense on PASCAL or newer */
@@ -505,84 +517,53 @@ nouveau_dmem_init(struct nouveau_drm *drm)
drm->dmem->drm = drm;
mutex_init(&drm->dmem->mutex);
- INIT_LIST_HEAD(&drm->dmem->chunk_free);
- INIT_LIST_HEAD(&drm->dmem->chunk_full);
- INIT_LIST_HEAD(&drm->dmem->chunk_empty);
-
- size = ALIGN(drm->client.device.info.ram_user, DMEM_CHUNK_SIZE);
+ INIT_LIST_HEAD(&drm->dmem->chunks);
+ mutex_init(&drm->dmem->mutex);
+ spin_lock_init(&drm->dmem->lock);
/* Initialize migration dma helpers before registering memory */
ret = nouveau_dmem_migrate_init(drm);
- if (ret)
- goto out_free;
-
- /*
- * FIXME we need some kind of policy to decide how much VRAM we
- * want to register with HMM. For now just register everything
- * and latter if we want to do thing like over commit then we
- * could revisit this.
- */
- res = devm_request_free_mem_region(device, &iomem_resource, size);
- if (IS_ERR(res))
- goto out_free;
- drm->dmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
- drm->dmem->pagemap.res = *res;
- drm->dmem->pagemap.ops = &nouveau_dmem_pagemap_ops;
- drm->dmem->pagemap.owner = drm->dev;
- if (IS_ERR(devm_memremap_pages(device, &drm->dmem->pagemap)))
- goto out_free;
-
- pfn_first = res->start >> PAGE_SHIFT;
- for (i = 0; i < (size / DMEM_CHUNK_SIZE); ++i) {
- struct nouveau_dmem_chunk *chunk;
- struct page *page;
- unsigned long j;
-
- chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
- if (chunk == NULL) {
- nouveau_dmem_fini(drm);
- return;
- }
-
- chunk->drm = drm;
- chunk->pfn_first = pfn_first + (i * DMEM_CHUNK_NPAGES);
- list_add_tail(&chunk->list, &drm->dmem->chunk_empty);
-
- page = pfn_to_page(chunk->pfn_first);
- for (j = 0; j < DMEM_CHUNK_NPAGES; ++j, ++page)
- page->zone_device_data = chunk;
+ if (ret) {
+ kfree(drm->dmem);
+ drm->dmem = NULL;
}
-
- NV_INFO(drm, "DMEM: registered %ldMB of device memory\n", size >> 20);
- return;
-out_free:
- kfree(drm->dmem);
- drm->dmem = NULL;
}
static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
- unsigned long src, dma_addr_t *dma_addr)
+ unsigned long src, dma_addr_t *dma_addr, u64 *pfn)
{
struct device *dev = drm->dev->dev;
struct page *dpage, *spage;
+ unsigned long paddr;
spage = migrate_pfn_to_page(src);
- if (!spage || !(src & MIGRATE_PFN_MIGRATE))
+ if (!(src & MIGRATE_PFN_MIGRATE))
goto out;
dpage = nouveau_dmem_page_alloc_locked(drm);
if (!dpage)
- return 0;
-
- *dma_addr = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, *dma_addr))
- goto out_free_page;
+ goto out;
- if (drm->dmem->migrate.copy_func(drm, 1, NOUVEAU_APER_VRAM,
- nouveau_dmem_page_addr(dpage), NOUVEAU_APER_HOST,
- *dma_addr))
- goto out_dma_unmap;
+ paddr = nouveau_dmem_page_addr(dpage);
+ if (spage) {
+ *dma_addr = dma_map_page(dev, spage, 0, page_size(spage),
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev, *dma_addr))
+ goto out_free_page;
+ if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+ NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
+ goto out_dma_unmap;
+ } else {
+ *dma_addr = DMA_MAPPING_ERROR;
+ if (drm->dmem->migrate.clear_func(drm, page_size(dpage),
+ NOUVEAU_APER_VRAM, paddr))
+ goto out_free_page;
+ }
+ *pfn = NVIF_VMM_PFNMAP_V0_V | NVIF_VMM_PFNMAP_V0_VRAM |
+ ((paddr >> PAGE_SHIFT) << NVIF_VMM_PFNMAP_V0_ADDR_SHIFT);
+ if (src & MIGRATE_PFN_WRITE)
+ *pfn |= NVIF_VMM_PFNMAP_V0_W;
return migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
out_dma_unmap:
@@ -590,19 +571,21 @@ out_dma_unmap:
out_free_page:
nouveau_dmem_page_free_locked(drm, dpage);
out:
+ *pfn = NVIF_VMM_PFNMAP_V0_NONE;
return 0;
}
static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
- struct migrate_vma *args, dma_addr_t *dma_addrs)
+ struct nouveau_svmm *svmm, struct migrate_vma *args,
+ dma_addr_t *dma_addrs, u64 *pfns)
{
struct nouveau_fence *fence;
unsigned long addr = args->start, nr_dma = 0, i;
for (i = 0; addr < args->end; i++) {
args->dst[i] = nouveau_dmem_migrate_copy_one(drm, args->src[i],
- dma_addrs + nr_dma);
- if (args->dst[i])
+ dma_addrs + nr_dma, pfns + i);
+ if (!dma_mapping_error(drm->dev->dev, dma_addrs[nr_dma]))
nr_dma++;
addr += PAGE_SIZE;
}
@@ -610,20 +593,18 @@ static void nouveau_dmem_migrate_chunk(struct nouveau_drm *drm,
nouveau_fence_new(drm->dmem->migrate.chan, false, &fence);
migrate_vma_pages(args);
nouveau_dmem_fence_done(&fence);
+ nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
while (nr_dma--) {
dma_unmap_page(drm->dev->dev, dma_addrs[nr_dma], PAGE_SIZE,
DMA_BIDIRECTIONAL);
}
- /*
- * FIXME optimization: update GPU page table to point to newly migrated
- * memory.
- */
migrate_vma_finalize(args);
}
int
nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
@@ -635,9 +616,13 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
.vma = vma,
.start = start,
};
- unsigned long c, i;
+ unsigned long i;
+ u64 *pfns;
int ret = -ENOMEM;
+ if (drm->dmem == NULL)
+ return -ENODEV;
+
args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
if (!args.src)
goto out;
@@ -649,19 +634,25 @@ nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
if (!dma_addrs)
goto out_free_dst;
- for (i = 0; i < npages; i += c) {
- c = min(SG_MAX_SINGLE_ALLOC, npages);
- args.end = start + (c << PAGE_SHIFT);
+ pfns = nouveau_pfns_alloc(max);
+ if (!pfns)
+ goto out_free_dma;
+
+ for (i = 0; i < npages; i += max) {
+ args.end = start + (max << PAGE_SHIFT);
ret = migrate_vma_setup(&args);
if (ret)
- goto out_free_dma;
+ goto out_free_pfns;
if (args.cpages)
- nouveau_dmem_migrate_chunk(drm, &args, dma_addrs);
+ nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
+ pfns);
args.start = args.end;
}
ret = 0;
+out_free_pfns:
+ nouveau_pfns_free(pfns);
out_free_dma:
kfree(dma_addrs);
out_free_dst:
@@ -671,28 +662,3 @@ out_free_src:
out:
return ret;
}
-
-void
-nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range)
-{
- unsigned long i, npages;
-
- npages = (range->end - range->start) >> PAGE_SHIFT;
- for (i = 0; i < npages; ++i) {
- struct page *page;
- uint64_t addr;
-
- page = hmm_device_entry_to_page(range, range->pfns[i]);
- if (page == NULL)
- continue;
-
- if (!is_device_private_page(page))
- continue;
-
- addr = nouveau_dmem_page_addr(page);
- range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
- range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
- range->pfns[i] |= NVIF_VMM_PFNMAP_V0_VRAM;
- }
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.h b/drivers/gpu/drm/nouveau/nouveau_dmem.h
index 92394be5d649..64da5d3635c8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.h
@@ -25,6 +25,7 @@
struct drm_device;
struct drm_file;
struct nouveau_drm;
+struct nouveau_svmm;
struct hmm_range;
#if IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM)
@@ -34,12 +35,12 @@ void nouveau_dmem_suspend(struct nouveau_drm *);
void nouveau_dmem_resume(struct nouveau_drm *);
int nouveau_dmem_migrate_vma(struct nouveau_drm *drm,
+ struct nouveau_svmm *svmm,
struct vm_area_struct *vma,
unsigned long start,
unsigned long end);
+unsigned long nouveau_dmem_page_addr(struct page *page);
-void nouveau_dmem_convert_pfn(struct nouveau_drm *drm,
- struct hmm_range *range);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_dmem_init(struct nouveau_drm *drm) {}
static inline void nouveau_dmem_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index 2674f1587457..8a0f7994e1ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -98,3 +98,34 @@ nouveau_dp_detect(struct nouveau_encoder *nv_encoder)
return NOUVEAU_DP_SST;
return ret;
}
+
+/* TODO:
+ * - Use the minimum possible BPC here, once we add support for the max bpc
+ * property.
+ * - Validate the mode against downstream port caps (see
+ * drm_dp_downstream_max_clock())
+ * - Validate against the DP caps advertised by the GPU (we don't check these
+ * yet)
+ */
+enum drm_mode_status
+nv50_dp_mode_valid(struct drm_connector *connector,
+ struct nouveau_encoder *outp,
+ const struct drm_display_mode *mode,
+ unsigned *out_clock)
+{
+ const unsigned min_clock = 25000;
+ unsigned max_clock, clock;
+ enum drm_mode_status ret;
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE && !outp->caps.dp_interlace)
+ return MODE_NO_INTERLACE;
+
+ max_clock = outp->dp.link_nr * outp->dp.link_bw;
+ clock = mode->clock * (connector->display_info.bpc * 3) / 10;
+
+ ret = nouveau_conn_mode_clock_valid(mode, min_clock, max_clock,
+ &clock);
+ if (out_clock)
+ *out_clock = clock;
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ca4087f5a15b..ac93d12201dc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -681,8 +681,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
{
struct nvkm_device *device;
struct drm_device *drm_dev;
- struct apertures_struct *aper;
- bool boot = false;
int ret;
if (vga_switcheroo_client_probe_defer(pdev))
@@ -699,32 +697,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
nvkm_device_del(&device);
/* Remove conflicting drivers (vesafb, efifb etc). */
- aper = alloc_apertures(3);
- if (!aper)
- return -ENOMEM;
-
- aper->ranges[0].base = pci_resource_start(pdev, 1);
- aper->ranges[0].size = pci_resource_len(pdev, 1);
- aper->count = 1;
-
- if (pci_resource_len(pdev, 2)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 2);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 2);
- aper->count++;
- }
-
- if (pci_resource_len(pdev, 3)) {
- aper->ranges[aper->count].base = pci_resource_start(pdev, 3);
- aper->ranges[aper->count].size = pci_resource_len(pdev, 3);
- aper->count++;
- }
-
-#ifdef CONFIG_X86
- boot = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- if (nouveau_modeset != 2)
- drm_fb_helper_remove_conflicting_framebuffers(aper, "nouveaufb", boot);
- kfree(aper);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+ if (ret)
+ return ret;
ret = nvkm_device_pci_new(pdev, nouveau_config, nouveau_debug,
true, true, ~0ULL, &device);
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index 3517f920bf89..a72c412ac8b1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -52,6 +52,7 @@ struct nouveau_encoder {
* actually programmed on the hw, not the proposed crtc */
struct drm_crtc *crtc;
u32 ctrl;
+ bool audio;
struct drm_display_mode mode;
int last_dpms;
@@ -66,6 +67,10 @@ struct nouveau_encoder {
} dp;
};
+ struct {
+ bool dp_interlace : 1;
+ } caps;
+
void (*enc_save)(struct drm_encoder *encoder);
void (*enc_restore)(struct drm_encoder *encoder);
void (*update)(struct nouveau_encoder *, u8 head,
@@ -100,6 +105,10 @@ enum nouveau_dp_status {
};
int nouveau_dp_detect(struct nouveau_encoder *);
+enum drm_mode_status nv50_dp_mode_valid(struct drm_connector *,
+ struct nouveau_encoder *,
+ const struct drm_display_mode *,
+ unsigned *clock);
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 24d543a01f43..3d11b84d4cf9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -312,7 +312,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct fb_info *info;
- struct nouveau_framebuffer *fb;
+ struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
@@ -335,7 +335,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
goto out;
}
- ret = nouveau_framebuffer_new(dev, &mode_cmd, nvbo, &fb);
+ ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
if (ret)
goto out_unref;
@@ -353,7 +353,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
chan = nouveau_nofbaccel ? NULL : drm->channel;
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
- ret = nouveau_vma_new(nvbo, chan->vmm, &fb->vma);
+ ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
if (ret) {
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
chan = NULL;
@@ -367,7 +367,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
}
/* setup helper */
- fbcon->helper.fb = &fb->base;
+ fbcon->helper.fb = fb;
if (!chan)
info->flags = FBINFO_HWACCEL_DISABLED;
@@ -376,12 +376,12 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
- info->fix.smem_start = fb->nvbo->bo.mem.bus.base +
- fb->nvbo->bo.mem.bus.offset;
- info->fix.smem_len = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->fix.smem_start = nvbo->bo.mem.bus.base +
+ nvbo->bo.mem.bus.offset;
+ info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
- info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
+ info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
@@ -393,19 +393,19 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
/* To allow resizeing without swapping buffers */
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
- fb->base.width, fb->base.height, fb->nvbo->bo.offset, nvbo);
+ fb->width, fb->height, nvbo->bo.offset, nvbo);
vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unlock:
if (chan)
- nouveau_vma_del(&fb->vma);
- nouveau_bo_unmap(fb->nvbo);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
out_unpin:
- nouveau_bo_unpin(fb->nvbo);
+ nouveau_bo_unpin(nvbo);
out_unref:
- nouveau_bo_ref(NULL, &fb->nvbo);
+ nouveau_bo_ref(NULL, &nvbo);
out:
return ret;
}
@@ -413,16 +413,18 @@ out:
static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
{
- struct nouveau_framebuffer *nouveau_fb = nouveau_framebuffer(fbcon->helper.fb);
+ struct drm_framebuffer *fb = fbcon->helper.fb;
+ struct nouveau_bo *nvbo;
drm_fb_helper_unregister_fbi(&fbcon->helper);
drm_fb_helper_fini(&fbcon->helper);
- if (nouveau_fb && nouveau_fb->nvbo) {
- nouveau_vma_del(&nouveau_fb->vma);
- nouveau_bo_unmap(nouveau_fb->nvbo);
- nouveau_bo_unpin(nouveau_fb->nvbo);
- drm_framebuffer_put(&nouveau_fb->base);
+ if (fb && fb->obj[0]) {
+ nvbo = nouveau_gem_object(fb->obj[0]);
+ nouveau_vma_del(&fbcon->vma);
+ nouveau_bo_unmap(nvbo);
+ nouveau_bo_unpin(nvbo);
+ drm_framebuffer_put(fb);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index 73a7eeba3973..1796d8824580 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -31,6 +31,8 @@
#include "nouveau_display.h"
+struct nouveau_vma;
+
struct nouveau_fbdev {
struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
@@ -41,6 +43,7 @@ struct nouveau_fbdev {
struct nvif_object gdi;
struct nvif_object blit;
struct nvif_object twod;
+ struct nouveau_vma *vma;
struct mutex hotplug_lock;
bool hotplug_waiting;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index f5ece1f94973..db61f3db96ea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -76,8 +76,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
return ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0 && ret != -EACCES)
+ if (ret < 0 && ret != -EACCES) {
+ pm_runtime_put_autosuspend(dev);
goto out;
+ }
ret = nouveau_vma_new(nvbo, vmm, &vma);
pm_runtime_mark_last_busy(dev);
@@ -157,8 +159,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
if (!WARN_ON(ret < 0 && ret != -EACCES)) {
nouveau_gem_object_unmap(nvbo, vma);
pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
}
+ pm_runtime_put_autosuspend(dev);
}
}
ttm_bo_unreserve(&nvbo->bo);
@@ -279,7 +281,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&nvbo->bo.base);
+ drm_gem_object_put(&nvbo->bo.base);
return ret;
}
@@ -358,7 +360,7 @@ validate_fini_no_ticket(struct validate_op *op, struct nouveau_channel *chan,
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
- drm_gem_object_put_unlocked(&nvbo->bo.base);
+ drm_gem_object_put(&nvbo->bo.base);
}
}
@@ -405,14 +407,14 @@ retry:
nvbo = nouveau_gem_object(gem);
if (nvbo == res_bo) {
res_bo = NULL;
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
continue;
}
if (nvbo->reserved_by && nvbo->reserved_by == file_priv) {
NV_PRINTK(err, cli, "multiple instances of buffer %d on "
"validation list\n", b->handle);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
ret = -EINVAL;
break;
}
@@ -929,7 +931,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
ret = lret;
nouveau_bo_sync_for_cpu(nvbo);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return ret;
}
@@ -948,7 +950,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
nvbo = nouveau_gem_object(gem);
nouveau_bo_sync_for_device(nvbo);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
@@ -965,7 +967,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -ENOENT;
ret = nouveau_gem_info(file_priv, gem, req);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 039e23548e08..23cd43a7fd19 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -95,14 +95,3 @@ struct platform_driver nouveau_platform_driver = {
.probe = nouveau_platform_probe,
.remove = nouveau_platform_remove,
};
-
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
-MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
-MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
-MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
-#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 645fedd77e21..ba9f9359c30e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -70,6 +70,12 @@ struct nouveau_svm {
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
+struct nouveau_pfnmap_args {
+ struct nvif_ioctl_v0 i;
+ struct nvif_ioctl_mthd_v0 m;
+ struct nvif_vmm_pfnmap_v0 p;
+};
+
struct nouveau_ivmm {
struct nouveau_svmm *svmm;
u64 inst;
@@ -169,10 +175,10 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
mm = get_task_mm(current);
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
if (!cli->svm.svmm) {
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return -EINVAL;
}
@@ -187,7 +193,8 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
- nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
+ nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
+ next);
addr = next;
}
@@ -198,7 +205,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
*/
args->result = 0;
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
return 0;
@@ -348,7 +355,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
if (ret)
goto out_free;
- down_write(&current->mm->mmap_sem);
+ mmap_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
@@ -357,31 +364,18 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
cli->svm.svmm = svmm;
cli->svm.cli = cli;
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
- up_write(&current->mm->mmap_sem);
+ mmap_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
return ret;
}
-static const u64
-nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
- [HMM_PFN_VALID ] = NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_WRITE ] = NVIF_VMM_PFNMAP_V0_W,
-};
-
-static const u64
-nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
- [HMM_PFN_ERROR ] = ~NVIF_VMM_PFNMAP_V0_V,
- [HMM_PFN_NONE ] = NVIF_VMM_PFNMAP_V0_NONE,
- [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
-};
-
/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
@@ -519,9 +513,45 @@ static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
.invalidate = nouveau_svm_range_invalidate,
};
+static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
+ struct hmm_range *range, u64 *ioctl_addr)
+{
+ unsigned long i, npages;
+
+ /*
+ * The ioctl_addr prepared here is passed through nvif_object_ioctl()
+ * to an eventual DMA map in something like gp100_vmm_pgt_pfn()
+ *
+ * This is all just encoding the internal hmm representation into a
+ * different nouveau internal representation.
+ */
+ npages = (range->end - range->start) >> PAGE_SHIFT;
+ for (i = 0; i < npages; ++i) {
+ struct page *page;
+
+ if (!(range->hmm_pfns[i] & HMM_PFN_VALID)) {
+ ioctl_addr[i] = 0;
+ continue;
+ }
+
+ page = hmm_pfn_to_page(range->hmm_pfns[i]);
+ if (is_device_private_page(page))
+ ioctl_addr[i] = nouveau_dmem_page_addr(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_VRAM;
+ else
+ ioctl_addr[i] = page_to_phys(page) |
+ NVIF_VMM_PFNMAP_V0_V |
+ NVIF_VMM_PFNMAP_V0_HOST;
+ if (range->hmm_pfns[i] & HMM_PFN_WRITE)
+ ioctl_addr[i] |= NVIF_VMM_PFNMAP_V0_W;
+ }
+}
+
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm, void *data, u32 size,
- u64 *pfns, struct svm_notifier *notifier)
+ unsigned long hmm_pfns[], u64 *ioctl_addr,
+ struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
@@ -530,26 +560,27 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.notifier = &notifier->notifier,
.start = notifier->notifier.interval_tree.start,
.end = notifier->notifier.interval_tree.last + 1,
- .pfns = pfns,
- .flags = nouveau_svm_pfn_flags,
- .values = nouveau_svm_pfn_values,
- .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
+ .pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
+ .hmm_pfns = hmm_pfns,
};
struct mm_struct *mm = notifier->notifier.mm;
- long ret;
+ int ret;
while (true) {
if (time_after(jiffies, timeout))
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
- range.default_flags = 0;
- range.pfn_flags_mask = -1UL;
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
ret = hmm_range_fault(&range);
- up_read(&mm->mmap_sem);
- if (ret <= 0) {
- if (ret == 0 || ret == -EBUSY)
+ mmap_read_unlock(mm);
+ if (ret) {
+ /*
+ * FIXME: the input PFN_REQ flags are destroyed on
+ * -EBUSY, we need to regenerate them, also for the
+ * other continue below
+ */
+ if (ret == -EBUSY)
continue;
return ret;
}
@@ -563,7 +594,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
break;
}
- nouveau_dmem_convert_pfn(drm, &range);
+ nouveau_hmm_convert_pfn(drm, &range, ioctl_addr);
svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
@@ -590,6 +621,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
} i;
u64 phys[16];
} args;
+ unsigned long hmm_pfns[ARRAY_SIZE(args.phys)];
struct vm_area_struct *vma;
u64 inst, start, limit;
int fi, fn, pi, fill;
@@ -673,18 +705,18 @@ nouveau_svm_fault(struct nvif_notify *notify)
/* Intersect fault window with the CPU VMA, cancelling
* the fault if the address is invalid.
*/
- down_read(&mm->mmap_sem);
+ mmap_read_lock(mm);
vma = find_vma_intersection(mm, start, limit);
if (!vma) {
SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
mmput(mm);
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
start = max_t(u64, start, vma->vm_start);
limit = min_t(u64, limit, vma->vm_end);
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
if (buffer->fault[fi]->addr != start) {
@@ -705,12 +737,17 @@ nouveau_svm_fault(struct nvif_notify *notify)
* access flags.
*XXX: atomic?
*/
- if (buffer->fault[fn]->access != 0 /* READ. */ &&
- buffer->fault[fn]->access != 3 /* PREFETCH. */) {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
- NVIF_VMM_PFNMAP_V0_W;
- } else {
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
+ switch (buffer->fault[fn]->access) {
+ case 0: /* READ. */
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT;
+ break;
+ case 3: /* PREFETCH. */
+ hmm_pfns[pi++] = 0;
+ break;
+ default:
+ hmm_pfns[pi++] = HMM_PFN_REQ_FAULT |
+ HMM_PFN_REQ_WRITE;
+ break;
}
args.i.p.size = pi << PAGE_SHIFT;
@@ -738,7 +775,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
fill = (buffer->fault[fn ]->addr -
buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
while (--fill)
- args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
+ hmm_pfns[pi++] = 0;
}
SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
@@ -754,7 +791,7 @@ nouveau_svm_fault(struct nvif_notify *notify)
ret = nouveau_range_fault(
svmm, svm->drm, &args,
sizeof(args.i) + pi * sizeof(args.phys[0]),
- args.phys, &notifier);
+ hmm_pfns, args.phys, &notifier);
mmu_interval_notifier_remove(&notifier.notifier);
}
mmput(mm);
@@ -784,6 +821,56 @@ nouveau_svm_fault(struct nvif_notify *notify)
return NVIF_NOTIFY_KEEP;
}
+static struct nouveau_pfnmap_args *
+nouveau_pfns_to_args(void *pfns)
+{
+ return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
+}
+
+u64 *
+nouveau_pfns_alloc(unsigned long npages)
+{
+ struct nouveau_pfnmap_args *args;
+
+ args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
+ if (!args)
+ return NULL;
+
+ args->i.type = NVIF_IOCTL_V0_MTHD;
+ args->m.method = NVIF_VMM_V0_PFNMAP;
+ args->p.page = PAGE_SHIFT;
+
+ return args->p.phys;
+}
+
+void
+nouveau_pfns_free(u64 *pfns)
+{
+ struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+
+ kfree(args);
+}
+
+void
+nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+ unsigned long addr, u64 *pfns, unsigned long npages)
+{
+ struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
+ int ret;
+
+ args->p.addr = addr;
+ args->p.size = npages << PAGE_SHIFT;
+
+ mutex_lock(&svmm->mutex);
+
+ svmm->vmm->vmm.object.client->super = true;
+ ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
+ npages * sizeof(args->p.phys[0]), NULL);
+ svmm->vmm->vmm.object.client->super = false;
+
+ mutex_unlock(&svmm->mutex);
+}
+
static void
nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
{
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.h b/drivers/gpu/drm/nouveau/nouveau_svm.h
index e839d8189461..f0fcd1b72e8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.h
@@ -18,6 +18,11 @@ void nouveau_svmm_fini(struct nouveau_svmm **);
int nouveau_svmm_join(struct nouveau_svmm *, u64 inst);
void nouveau_svmm_part(struct nouveau_svmm *, u64 inst);
int nouveau_svmm_bind(struct drm_device *, void *, struct drm_file *);
+
+u64 *nouveau_pfns_alloc(unsigned long npages);
+void nouveau_pfns_free(u64 *pfns);
+void nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
+ unsigned long addr, u64 *pfns, unsigned long npages);
#else /* IS_ENABLED(CONFIG_DRM_NOUVEAU_SVM) */
static inline void nouveau_svm_init(struct nouveau_drm *drm) {}
static inline void nouveau_svm_fini(struct nouveau_drm *drm) {}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index facd18564e0d..47428f79ede8 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -149,7 +149,6 @@ int
nv50_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct drm_device *dev = nfbdev->helper.dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
@@ -240,8 +239,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma->addr));
- OUT_RING(chan, lower_32_bits(fb->vma->addr));
+ OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NV04(chan, NvSub2D, 0x0230, 2);
OUT_RING(chan, format);
OUT_RING(chan, 1);
@@ -249,8 +248,8 @@ nv50_fbcon_accel_init(struct fb_info *info)
OUT_RING(chan, info->fix.line_length);
OUT_RING(chan, info->var.xres_virtual);
OUT_RING(chan, info->var.yres_virtual);
- OUT_RING(chan, upper_32_bits(fb->vma->addr));
- OUT_RING(chan, lower_32_bits(fb->vma->addr));
+ OUT_RING(chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING(chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING(chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index c0deef4fe727..cb56163ed608 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -150,7 +150,6 @@ nvc0_fbcon_accel_init(struct fb_info *info)
{
struct nouveau_fbdev *nfbdev = info->par;
struct drm_device *dev = nfbdev->helper.dev;
- struct nouveau_framebuffer *fb = nouveau_framebuffer(nfbdev->helper.fb);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_channel *chan = drm->channel;
int ret, format;
@@ -240,8 +239,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma->addr));
- OUT_RING (chan, lower_32_bits(fb->vma->addr));
+ OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
BEGIN_NVC0(chan, NvSub2D, 0x0230, 10);
OUT_RING (chan, format);
OUT_RING (chan, 1);
@@ -251,8 +250,8 @@ nvc0_fbcon_accel_init(struct fb_info *info)
OUT_RING (chan, info->fix.line_length);
OUT_RING (chan, info->var.xres_virtual);
OUT_RING (chan, info->var.yres_virtual);
- OUT_RING (chan, upper_32_bits(fb->vma->addr));
- OUT_RING (chan, lower_32_bits(fb->vma->addr));
+ OUT_RING (chan, upper_32_bits(nfbdev->vma->addr));
+ OUT_RING (chan, lower_32_bits(nfbdev->vma->addr));
FIRE_RING (chan);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/memory.c b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
index 4cc186262d34..38130ef272d6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/memory.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/memory.c
@@ -140,7 +140,7 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
{
struct nvkm_instmem *imem = device->imem;
struct nvkm_memory *memory;
- int ret = -ENOSYS;
+ int ret;
if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
return -ENOSYS;
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 79a8f9d305c5..49d468b45d3f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -221,3 +221,14 @@ nvkm_subdev_ctor(const struct nvkm_subdev_func *func,
__mutex_init(&subdev->mutex, name, &nvkm_subdev_lock_class[index]);
subdev->debug = nvkm_dbgopt(device->dbgopt, name);
}
+
+int
+nvkm_subdev_new_(const struct nvkm_subdev_func *func,
+ struct nvkm_device *device, int index,
+ struct nvkm_subdev **psubdev)
+{
+ if (!(*psubdev = kzalloc(sizeof(**psubdev), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(func, device, index, *psubdev);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 8ebbe1656008..5b90c2a1bf3d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2924,6 +2924,20 @@ nvkm_device_del(struct nvkm_device **pdevice)
}
}
+static inline bool
+nvkm_device_endianness(struct nvkm_device *device)
+{
+ u32 boot1 = nvkm_rd32(device, 0x000004) & 0x01000001;
+#ifdef __BIG_ENDIAN
+ if (!boot1)
+ return false;
+#else
+ if (boot1)
+ return false;
+#endif
+ return true;
+}
+
int
nvkm_device_ctor(const struct nvkm_device_func *func,
const struct nvkm_device_quirk *quirk,
@@ -2934,8 +2948,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
{
struct nvkm_subdev *subdev;
u64 mmio_base, mmio_size;
- u32 boot0, strap;
- void __iomem *map;
+ u32 boot0, boot1, strap;
int ret = -EEXIST, i;
unsigned chipset;
@@ -2961,26 +2974,30 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
mmio_base = device->func->resource_addr(device, 0);
mmio_size = device->func->resource_size(device, 0);
- /* identify the chipset, and determine classes of subdev/engines */
- if (detect) {
- map = ioremap(mmio_base, 0x102000);
- if (ret = -ENOMEM, map == NULL)
+ if (detect || mmio) {
+ device->pri = ioremap(mmio_base, mmio_size);
+ if (device->pri == NULL) {
+ nvdev_error(device, "unable to map PRI\n");
+ ret = -ENOMEM;
goto done;
+ }
+ }
+ /* identify the chipset, and determine classes of subdev/engines */
+ if (detect) {
/* switch mmio to cpu's native endianness */
-#ifndef __BIG_ENDIAN
- if (ioread32_native(map + 0x000004) != 0x00000000) {
-#else
- if (ioread32_native(map + 0x000004) == 0x00000000) {
-#endif
- iowrite32_native(0x01000001, map + 0x000004);
- ioread32_native(map);
+ if (!nvkm_device_endianness(device)) {
+ nvkm_wr32(device, 0x000004, 0x01000001);
+ nvkm_rd32(device, 0x000000);
+ if (!nvkm_device_endianness(device)) {
+ nvdev_error(device,
+ "GPU not supported on big-endian\n");
+ ret = -ENOSYS;
+ goto done;
+ }
}
- /* read boot0 and strapping information */
- boot0 = ioread32_native(map + 0x000000);
- strap = ioread32_native(map + 0x101000);
- iounmap(map);
+ boot0 = nvkm_rd32(device, 0x000000);
/* chipset can be overridden for devel/testing purposes */
chipset = nvkm_longopt(device->cfgopt, "NvChipset", 0);
@@ -3138,6 +3155,17 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
nvdev_info(device, "NVIDIA %s (%08x)\n",
device->chip->name, boot0);
+ /* vGPU detection */
+ boot1 = nvkm_rd32(device, 0x0000004);
+ if (device->card_type >= TU100 && (boot1 & 0x00030000)) {
+ nvdev_info(device, "vGPUs are not supported\n");
+ ret = -ENODEV;
+ goto done;
+ }
+
+ /* read strapping information */
+ strap = nvkm_rd32(device, 0x101000);
+
/* determine frequency of timing crystal */
if ( device->card_type <= NV_10 || device->chipset < 0x17 ||
(device->chipset >= 0x20 && device->chipset < 0x25))
@@ -3158,15 +3186,6 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
if (!device->name)
device->name = device->chip->name;
- if (mmio) {
- device->pri = ioremap(mmio_base, mmio_size);
- if (!device->pri) {
- nvdev_error(device, "unable to map PRI\n");
- ret = -ENOMEM;
- goto done;
- }
- }
-
mutex_init(&device->mutex);
for (i = 0; i < NVKM_SUBDEV_NR; i++) {
@@ -3254,6 +3273,10 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
ret = 0;
done:
+ if (device->pri && (!mmio || ret)) {
+ iounmap(device->pri);
+ device->pri = NULL;
+ }
mutex_unlock(&nv_devices_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index 0d584d0da59c..cf075311cdd2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -39,6 +39,7 @@ nvkm-y += nvkm/engine/disp/sorgf119.o
nvkm-y += nvkm/engine/disp/sorgk104.o
nvkm-y += nvkm/engine/disp/sorgm107.o
nvkm-y += nvkm/engine/disp/sorgm200.o
+nvkm-y += nvkm/engine/disp/sorgp100.o
nvkm-y += nvkm/engine/disp/sorgv100.o
nvkm-y += nvkm/engine/disp/sortu102.o
@@ -47,6 +48,7 @@ nvkm-y += nvkm/engine/disp/dp.o
nvkm-y += nvkm/engine/disp/hdagt215.o
nvkm-y += nvkm/engine/disp/hdagf119.o
+nvkm-y += nvkm/engine/disp/hdagv100.o
nvkm-y += nvkm/engine/disp/hdmi.o
nvkm-y += nvkm/engine/disp/hdmig84.o
@@ -74,6 +76,8 @@ nvkm-y += nvkm/engine/disp/rootgp102.o
nvkm-y += nvkm/engine/disp/rootgv100.o
nvkm-y += nvkm/engine/disp/roottu102.o
+nvkm-y += nvkm/engine/disp/capsgv100.o
+
nvkm-y += nvkm/engine/disp/channv50.o
nvkm-y += nvkm/engine/disp/changf119.o
nvkm-y += nvkm/engine/disp/changv100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
new file mode 100644
index 000000000000..5026e530f4bb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/capsgv100.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#define gv100_disp_caps(p) container_of((p), struct gv100_disp_caps, object)
+#include "rootnv50.h"
+
+struct gv100_disp_caps {
+ struct nvkm_object object;
+ struct nv50_disp *disp;
+};
+
+static int
+gv100_disp_caps_map(struct nvkm_object *object, void *argv, u32 argc,
+ enum nvkm_object_map *type, u64 *addr, u64 *size)
+{
+ struct gv100_disp_caps *caps = gv100_disp_caps(object);
+ struct nvkm_device *device = caps->disp->base.engine.subdev.device;
+ *type = NVKM_OBJECT_MAP_IO;
+ *addr = 0x640000 + device->func->resource_addr(device, 0);
+ *size = 0x1000;
+ return 0;
+}
+
+static const struct nvkm_object_func
+gv100_disp_caps = {
+ .map = gv100_disp_caps_map,
+};
+
+int
+gv100_disp_caps_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
+ struct nv50_disp *disp, struct nvkm_object **pobject)
+{
+ struct gv100_disp_caps *caps;
+
+ if (!(caps = kzalloc(sizeof(*caps), GFP_KERNEL)))
+ return -ENOMEM;
+ *pobject = &caps->object;
+
+ nvkm_object_ctor(&gv100_disp_caps, oclass, &caps->object);
+ caps->disp = disp;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
index fd6216684f6d..8471de3f3b61 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -36,7 +36,7 @@ gp100_disp = {
.super = gf119_disp_super,
.root = &gp100_disp_root_oclass,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
- .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
+ .sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
index 3468ddec1270..a3779c5046ea 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp102.c
@@ -63,7 +63,7 @@ gp102_disp = {
.super = gf119_disp_super,
.root = &gp102_disp_root_oclass,
.head = { .cnt = gf119_head_cnt, .new = gf119_head_new },
- .sor = { .cnt = gf119_sor_cnt, .new = gm200_sor_new },
+ .sor = { .cnt = gf119_sor_cnt, .new = gp100_sor_new },
};
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
index 0fa0ec0a1de0..19d2d58344e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagf119.c
@@ -24,10 +24,18 @@
#include "ior.h"
void
-gf119_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gf119_hda_device_entry(struct nvkm_ior *ior, int head)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 soff = 0x030 * ior->id;
+ const u32 hoff = 0x800 * head;
+ nvkm_mask(device, 0x616548 + hoff, 0x00000070, head << 4);
+}
+
+void
+gf119_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
+{
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 soff = 0x030 * ior->id + (head * 0x04);
int i;
for (i = 0; i < size; i++)
@@ -41,14 +49,14 @@ void
gf119_hda_hpd(struct nvkm_ior *ior, int head, bool present)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 hoff = 0x800 * head;
+ const u32 soff = 0x030 * ior->id + (head * 0x04);
u32 data = 0x80000000;
u32 mask = 0x80000001;
if (present) {
- nvkm_mask(device, 0x616548 + hoff, 0x00000070, 0x00000000);
+ ior->func->hda.device_entry(ior, head);
data |= 0x00000001;
} else {
mask |= 0x00000002;
}
- nvkm_mask(device, 0x10ec10 + ior->id * 0x030, mask, data);
+ nvkm_mask(device, 0x10ec10 + soff, mask, data);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 4509d2ba880e..0d1b81fe1093 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -24,7 +24,7 @@
#include "ior.h"
void
-gt215_hda_eld(struct nvkm_ior *ior, u8 *data, u8 size)
+gt215_hda_eld(struct nvkm_ior *ior, int head, u8 *data, u8 size)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
const u32 soff = ior->id * 0x800;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
index 723af0b2dda0..57d374ecfeef 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml_common_defs.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagv100.c
@@ -1,5 +1,5 @@
/*
- * Copyright 2017 Advanced Micro Devices, Inc.
+ * Copyright 2020 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -18,26 +18,13 @@
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
*/
+#include "ior.h"
-#include "dml_common_defs.h"
-#include "dcn_calc_math.h"
-
-#include "dml_inline_defs.h"
-
-double dml_round(double a)
+void
+gv100_hda_device_entry(struct nvkm_ior *ior, int head)
{
- double round_pt = 0.5;
- double ceil = dml_ceil(a, 1);
- double floor = dml_floor(a, 1);
-
- if (a - floor >= round_pt)
- return ceil;
- else
- return floor;
+ struct nvkm_device *device = ior->disp->engine.subdev.device;
+ const u32 hoff = 0x800 * head;
+ nvkm_mask(device, 0x616528 + hoff, 0x00000070, head << 4);
}
-
-
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
index 9b16a08eb4d9..bf6d41fb0c9f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdmigm200.c
@@ -27,10 +27,10 @@ void
gm200_hdmi_scdc(struct nvkm_ior *ior, int head, u8 scdc)
{
struct nvkm_device *device = ior->disp->engine.subdev.device;
- const u32 hoff = head * 0x800;
+ const u32 soff = nv50_ior_base(ior);
const u32 ctrl = scdc & 0x3;
- nvkm_mask(device, 0x61c5bc + hoff, 0x00000003, ctrl);
+ nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
ior->tmds.high_speed = !!(scdc & 0x2);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
index 009d3a8b7a50..1a200a9ba4e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
@@ -87,7 +87,8 @@ struct nvkm_ior_func {
struct {
void (*hpd)(struct nvkm_ior *, int head, bool present);
- void (*eld)(struct nvkm_ior *, u8 *data, u8 size);
+ void (*eld)(struct nvkm_ior *, int head, u8 *data, u8 size);
+ void (*device_entry)(struct nvkm_ior *, int head);
} hda;
};
@@ -158,10 +159,13 @@ void gv100_hdmi_ctrl(struct nvkm_ior *, int, bool, u8, u8, u8 *, u8 , u8 *, u8);
void gm200_hdmi_scdc(struct nvkm_ior *, int, u8);
void gt215_hda_hpd(struct nvkm_ior *, int, bool);
-void gt215_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gt215_hda_eld(struct nvkm_ior *, int, u8 *, u8);
void gf119_hda_hpd(struct nvkm_ior *, int, bool);
-void gf119_hda_eld(struct nvkm_ior *, u8 *, u8);
+void gf119_hda_eld(struct nvkm_ior *, int, u8 *, u8);
+void gf119_hda_device_entry(struct nvkm_ior *, int);
+
+void gv100_hda_device_entry(struct nvkm_ior *, int);
#define IOR_MSG(i,l,f,a...) do { \
struct nvkm_ior *_ior = (i); \
@@ -197,6 +201,7 @@ int gf119_sor_new(struct nvkm_disp *, int);
int gk104_sor_new(struct nvkm_disp *, int);
int gm107_sor_new(struct nvkm_disp *, int);
int gm200_sor_new(struct nvkm_disp *, int);
+int gp100_sor_new(struct nvkm_disp *, int);
int gv100_sor_cnt(struct nvkm_disp *, unsigned long *);
int gv100_sor_new(struct nvkm_disp *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
index c62030c96fba..dcf08249374a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
@@ -111,8 +111,44 @@ nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
return 0;
}
+static inline int
+nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
+ u8 user, bool hda)
+{
+ struct nvkm_ior *ior;
+
+ /* First preference is to reuse the OR that is currently armed
+ * on HW, if any, in order to prevent unnecessary switching.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->arm.outp == outp)
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ /* Failing that, a completely unused OR is the next best thing. */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->type == type && !ior->arm.outp &&
+ (ior->func->route.set || ior->id == __ffs(outp->info.or)))
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ /* Last resort is to assign an OR that's already active on HW,
+ * but will be released during the next modeset.
+ */
+ list_for_each_entry(ior, &outp->disp->ior, head) {
+ if (!ior->identity && !!ior->func->hda.hpd == hda &&
+ !ior->asy.outp && ior->type == type &&
+ (ior->func->route.set || ior->id == __ffs(outp->info.or)))
+ return nvkm_outp_acquire_ior(outp, user, ior);
+ }
+
+ return -ENOSPC;
+}
+
int
-nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
+nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
{
struct nvkm_ior *ior = outp->ior;
enum nvkm_ior_proto proto;
@@ -137,32 +173,25 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user)
return nvkm_outp_acquire_ior(outp, user, ior);
}
- /* First preference is to reuse the OR that is currently armed
- * on HW, if any, in order to prevent unnecessary switching.
+ /* If we don't need HDA, first try to acquire an OR that doesn't
+ * support it to leave free the ones that do.
*/
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp)
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
+ if (!hda) {
+ if (!nvkm_outp_acquire_hda(outp, type, user, false))
+ return 0;
- /* Failing that, a completely unused OR is the next best thing. */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity &&
- !ior->asy.outp && ior->type == type && !ior->arm.outp &&
- (ior->func->route.set || ior->id == __ffs(outp->info.or)))
- return nvkm_outp_acquire_ior(outp, user, ior);
+ /* Use a HDA-supporting SOR anyway. */
+ return nvkm_outp_acquire_hda(outp, type, user, true);
}
- /* Last resort is to assign an OR that's already active on HW,
- * but will be released during the next modeset.
- */
- list_for_each_entry(ior, &outp->disp->ior, head) {
- if (!ior->identity && !ior->asy.outp && ior->type == type &&
- (ior->func->route.set || ior->id == __ffs(outp->info.or)))
- return nvkm_outp_acquire_ior(outp, user, ior);
- }
+ /* We want HDA, try to acquire an OR that supports it. */
+ if (!nvkm_outp_acquire_hda(outp, type, user, true))
+ return 0;
- return -ENOSPC;
+ /* There weren't any free ORs that support HDA, grab one that
+ * doesn't and at least allow display to work still.
+ */
+ return nvkm_outp_acquire_hda(outp, type, user, false);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
index 721b068b87ef..ee028d30cfe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.h
@@ -32,7 +32,7 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *,
void nvkm_outp_del(struct nvkm_outp **);
void nvkm_outp_init(struct nvkm_outp *);
void nvkm_outp_fini(struct nvkm_outp *);
-int nvkm_outp_acquire(struct nvkm_outp *, u8 user);
+int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda);
void nvkm_outp_release(struct nvkm_outp *, u8 user);
void nvkm_outp_route(struct nvkm_disp *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
index 9c658d632d37..47efb48d769a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgv100.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_root_func
gv100_disp_root = {
.user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{0,0,GV100_DISP_CURSOR }, gv100_disp_curs_new },
{{0,0,GV100_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
{{0,0,GV100_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
index 5f758948d6e1..fb5de44e4b8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.c
@@ -99,7 +99,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
} *args = data;
int ret = -ENOSYS;
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER);
+ ret = nvkm_outp_acquire(outp, NVKM_OUTP_USER, args->v0.hda);
if (ret == 0) {
args->v0.or = outp->ior->id;
args->v0.link = outp->ior->asy.link;
@@ -119,7 +119,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) {
if (args->v0.data & 0xfff00000)
return -EINVAL;
- ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV);
+ ret = nvkm_outp_acquire(outp, NVKM_OUTP_PRIV, false);
if (ret)
return ret;
ret = outp->ior->func->sense(outp->ior, args->v0.data);
@@ -155,7 +155,7 @@ nv50_disp_root_mthd_(struct nvkm_object *object, u32 mthd, void *data, u32 size)
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp.audio(ior, hidx, true);
ior->func->hda.hpd(ior, hidx, true);
- ior->func->hda.eld(ior, data, size);
+ ior->func->hda.eld(ior, hidx, data, size);
} else {
if (outp->info.type == DCB_OUTPUT_DP)
ior->func->dp.audio(ior, hidx, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index a1f942793f98..7070f5408d92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -24,6 +24,9 @@ int nv50_disp_root_new_(const struct nv50_disp_root_func *, struct nvkm_disp *,
const struct nvkm_oclass *, void *data, u32 size,
struct nvkm_object **);
+int gv100_disp_caps_new(const struct nvkm_oclass *, void *, u32,
+ struct nv50_disp *, struct nvkm_object **);
+
extern const struct nvkm_disp_oclass nv50_disp_root_oclass;
extern const struct nvkm_disp_oclass g84_disp_root_oclass;
extern const struct nvkm_disp_oclass g94_disp_root_oclass;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
index 579a5d02308a..d8719d38b98a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/roottu102.c
@@ -27,6 +27,7 @@
static const struct nv50_disp_root_func
tu102_disp_root = {
.user = {
+ {{-1,-1,GV100_DISP_CAPS }, gv100_disp_caps_new },
{{0,0,TU102_DISP_CURSOR }, gv100_disp_curs_new },
{{0,0,TU102_DISP_WINDOW_IMM_CHANNEL_DMA}, gv100_disp_wimm_new },
{{0,0,TU102_DISP_CORE_CHANNEL_DMA }, gv100_disp_core_new },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
index 456a5a143522..3b3643fb1019 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
@@ -177,6 +177,7 @@ gf119_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
index b94090edaebf..0c0925680790 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgk104.c
@@ -43,6 +43,7 @@ gk104_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
index e6965dec09c9..38045c92197f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
@@ -57,6 +57,7 @@ gm107_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
index 384f82652bec..4dd7f382968e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
@@ -89,7 +89,7 @@ gm200_sor_route_get(struct nvkm_outp *outp, int *link)
}
static const struct nvkm_ior_func
-gm200_sor = {
+gm200_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -115,11 +115,46 @@ gm200_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gm200_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
},
};
int
gm200_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x101034);
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gm200_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&gm200_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c
new file mode 100644
index 000000000000..c54f88317a07
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgp100.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "ior.h"
+
+static const struct nvkm_ior_func
+gp100_sor_hda = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+ .hda = {
+ .hpd = gf119_hda_hpd,
+ .eld = gf119_hda_eld,
+ .device_entry = gf119_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gp100_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gf119_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gk104_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = gf119_sor_dp_vcpi,
+ .audio = gf119_sor_dp_audio,
+ .audio_sym = gf119_sor_dp_audio_sym,
+ .watermark = gf119_sor_dp_watermark,
+ },
+};
+
+int
+gp100_sor_new(struct nvkm_disp *disp, int id)
+{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x10ebb0) >> 8;
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gp100_sor_hda, disp, SOR, id);
+ return nvkm_ior_new_(&gp100_sor, disp, SOR, id);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
index b0597ff9a714..4441187e8ec9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c
@@ -78,7 +78,7 @@ gv100_sor_state(struct nvkm_ior *sor, struct nvkm_ior_state *state)
}
static const struct nvkm_ior_func
-gv100_sor = {
+gv100_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -103,12 +103,46 @@ gv100_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+gv100_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = gf119_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
},
};
int
gv100_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda;
+
+ if (!((hda = nvkm_rd32(device, 0x08a15c)) & 0x40000000))
+ hda = nvkm_rd32(device, 0x118fb0) >> 8;
+
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&gv100_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&gv100_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
index 4d5f3791ea7b..59865a934c4b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c
@@ -62,7 +62,7 @@ tu102_sor_dp_links(struct nvkm_ior *sor, struct nvkm_i2c_aux *aux)
}
static const struct nvkm_ior_func
-tu102_sor = {
+tu102_sor_hda = {
.route = {
.get = gm200_sor_route_get,
.set = gm200_sor_route_set,
@@ -88,11 +88,42 @@ tu102_sor = {
.hda = {
.hpd = gf119_hda_hpd,
.eld = gf119_hda_eld,
+ .device_entry = gv100_hda_device_entry,
+ },
+};
+
+static const struct nvkm_ior_func
+tu102_sor = {
+ .route = {
+ .get = gm200_sor_route_get,
+ .set = gm200_sor_route_set,
+ },
+ .state = gv100_sor_state,
+ .power = nv50_sor_power,
+ .clock = gf119_sor_clock,
+ .hdmi = {
+ .ctrl = gv100_hdmi_ctrl,
+ .scdc = gm200_hdmi_scdc,
+ },
+ .dp = {
+ .lanes = { 0, 1, 2, 3 },
+ .links = tu102_sor_dp_links,
+ .power = g94_sor_dp_power,
+ .pattern = gm107_sor_dp_pattern,
+ .drive = gm200_sor_dp_drive,
+ .vcpi = tu102_sor_dp_vcpi,
+ .audio = gv100_sor_dp_audio,
+ .audio_sym = gv100_sor_dp_audio_sym,
+ .watermark = gv100_sor_dp_watermark,
},
};
int
tu102_sor_new(struct nvkm_disp *disp, int id)
{
+ struct nvkm_device *device = disp->engine.subdev.device;
+ u32 hda = nvkm_rd32(device, 0x08a15c);
+ if (hda & BIT(id))
+ return nvkm_ior_new_(&tu102_sor_hda, disp, SOR, id);
return nvkm_ior_new_(&tu102_sor, disp, SOR, id);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 4209b24a46d7..e56880f3e3bd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -319,6 +319,17 @@ gk20a_gr_load_sw(struct gf100_gr *gr, const char *path, int ver)
return 0;
}
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_124_SOC) || IS_ENABLED(CONFIG_ARCH_TEGRA_132_SOC)
+MODULE_FIRMWARE("nvidia/gk20a/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gk20a/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gk20a/sw_nonctx.bin");
+#endif
+
static int
gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
{
@@ -341,7 +352,7 @@ gk20a_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
static const struct gf100_gr_fwif
gk20a_gr_fwif[] = {
- { -1, gk20a_gr_load, &gk20a_gr },
+ { 0, gk20a_gr_load, &gk20a_gr },
{}
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
index 232a9d7c51e5..e770c9497871 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp108.c
@@ -25,6 +25,9 @@
MODULE_FIRMWARE("nvidia/gp108/sec2/desc.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/image.bin");
MODULE_FIRMWARE("nvidia/gp108/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gv100/sec2/sig.bin");
static const struct nvkm_sec2_fwif
gp108_sec2_fwif[] = {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
index b6ebd95c9ba1..a8295653ceab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/tu102.c
@@ -56,6 +56,22 @@ tu102_sec2_nofw(struct nvkm_sec2 *sec2, int ver,
return 0;
}
+MODULE_FIRMWARE("nvidia/tu102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu116/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/tu117/sec2/sig.bin");
+
static const struct nvkm_sec2_fwif
tu102_sec2_fwif[] = {
{ 0, gp102_sec2_load, &tu102_sec2, &gp102_sec2_acr_1 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
index 8eb2a930a9b5..e4866a02e457 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/base.c
@@ -250,6 +250,11 @@ nvkm_acr_oneinit(struct nvkm_subdev *subdev)
list_add_tail(&lsf->head, &acr->lsf);
}
+ /* Ensure the falcon that'll provide ACR functions is booted first. */
+ lsf = nvkm_acr_falcon(device);
+ if (lsf)
+ list_move(&lsf->head, &acr->lsf);
+
if (!acr->wpr_fw || acr->wpr_comp)
wpr_size = acr->func->wpr_layout(acr);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
index aecce2dac558..667fa016496e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/hsfw.c
@@ -100,25 +100,21 @@ nvkm_acr_hsfw_load_image(struct nvkm_acr *acr, const char *name, int ver,
hsfw->data_size = lhdr->data_size;
hsfw->sig.prod.size = fwhdr->sig_prod_size;
- hsfw->sig.prod.data = kmalloc(hsfw->sig.prod.size, GFP_KERNEL);
+ hsfw->sig.prod.data = kmemdup(fw->data + fwhdr->sig_prod_offset + sig,
+ hsfw->sig.prod.size, GFP_KERNEL);
if (!hsfw->sig.prod.data) {
ret = -ENOMEM;
goto done;
}
- memcpy(hsfw->sig.prod.data, fw->data + fwhdr->sig_prod_offset + sig,
- hsfw->sig.prod.size);
-
hsfw->sig.dbg.size = fwhdr->sig_dbg_size;
- hsfw->sig.dbg.data = kmalloc(hsfw->sig.dbg.size, GFP_KERNEL);
+ hsfw->sig.dbg.data = kmemdup(fw->data + fwhdr->sig_dbg_offset + sig,
+ hsfw->sig.dbg.size, GFP_KERNEL);
if (!hsfw->sig.dbg.data) {
ret = -ENOMEM;
goto done;
}
- memcpy(hsfw->sig.dbg.data, fw->data + fwhdr->sig_dbg_offset + sig,
- hsfw->sig.dbg.size);
-
hsfw->sig.patch_loc = loc;
done:
nvkm_firmware_put(fw);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
index 06572f8ce914..f9c427559538 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowacpi.c
@@ -22,22 +22,39 @@
*/
#include "priv.h"
-#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
-int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
-bool nouveau_acpi_rom_supported(struct device *);
-#else
-static inline bool
-nouveau_acpi_rom_supported(struct device *dev)
+static int
+acpi_read_bios(acpi_handle rom_handle, u8 *bios, u32 offset, u32 length)
{
- return false;
-}
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_status status;
+ union acpi_object rom_arg_elements[2], *obj;
+ struct acpi_object_list rom_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
-static inline int
-nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
-{
+ rom_arg.count = 2;
+ rom_arg.pointer = &rom_arg_elements[0];
+
+ rom_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[0].integer.value = offset;
+
+ rom_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ rom_arg_elements[1].integer.value = length;
+
+ status = acpi_evaluate_object(rom_handle, NULL, &rom_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ pr_info("failed to evaluate ROM got %s\n",
+ acpi_format_exception(status));
+ return -ENODEV;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ length = min(length, obj->buffer.length);
+ memcpy(bios+offset, obj->buffer.pointer, length);
+ kfree(buffer.pointer);
+ return length;
+#else
return -EINVAL;
-}
#endif
+}
/* This version of the shadow function disobeys the ACPI spec and tries
* to fetch in units of more than 4KiB at a time. This is a LOT faster
@@ -51,7 +68,7 @@ acpi_read_fast(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
u32 fetch = limit - start;
if (nvbios_extend(bios, limit) >= 0) {
- int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
+ int ret = acpi_read_bios(data, bios->data, start, fetch);
if (ret == fetch)
return fetch;
}
@@ -73,9 +90,8 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
if (nvbios_extend(bios, limit) >= 0) {
while (start + fetch < limit) {
- int ret = nouveau_acpi_get_bios_chunk(bios->data,
- start + fetch,
- 0x1000);
+ int ret = acpi_read_bios(data, bios->data,
+ start + fetch, 0x1000);
if (ret != 0x1000)
break;
fetch += 0x1000;
@@ -88,9 +104,22 @@ acpi_read_slow(void *data, u32 offset, u32 length, struct nvkm_bios *bios)
static void *
acpi_init(struct nvkm_bios *bios, const char *name)
{
- if (!nouveau_acpi_rom_supported(bios->subdev.device->dev))
+#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
+ acpi_status status;
+ acpi_handle dhandle, rom_handle;
+
+ dhandle = ACPI_HANDLE(bios->subdev.device->dev);
+ if (!dhandle)
return ERR_PTR(-ENODEV);
- return NULL;
+
+ status = acpi_get_handle(dhandle, "_ROM", &rom_handle);
+ if (ACPI_FAILURE(status))
+ return ERR_PTR(-ENODEV);
+
+ return rom_handle;
+#else
+ return ERR_PTR(-ENODEV);
+#endif
}
const struct nvbios_source
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index d80dbc8f09b2..2340040942c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -114,9 +114,5 @@ int
gf100_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gf100_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gf100_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
index 3905a80da811..1124dadac145 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf117.c
@@ -43,9 +43,5 @@ int
gf117_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gf117_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gf117_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index 9025ed1bd2a9..f3915f85838e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -117,9 +117,5 @@ int
gk104_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gk104_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gk104_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index 1a4ab825852c..187d544378b0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -81,9 +81,5 @@ int
gk20a_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gk20a_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gk20a_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
index c63328152bfa..0f1f0ad6377e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gm200.c
@@ -32,9 +32,5 @@ int
gm200_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gm200_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gm200_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
index 39db90aa2c80..0347b367cefe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
@@ -51,9 +51,5 @@ int
gp10b_ibus_new(struct nvkm_device *device, int index,
struct nvkm_subdev **pibus)
{
- struct nvkm_subdev *ibus;
- if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
- return -ENOMEM;
- nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
- return 0;
+ return nvkm_subdev_new_(&gp10b_ibus, device, index, pibus);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 41640e0584ac..199f94e15c5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -580,7 +580,7 @@ nvkm_vmm_iter(struct nvkm_vmm *vmm, const struct nvkm_vmm_page *page,
it.pte[it.lvl]++;
}
}
- };
+ }
nvkm_vmm_flush(&it);
return ~0ULL;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
index 5e55ecbd8005..d3f8f916d0db 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.h
@@ -304,7 +304,7 @@ int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32,
FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \
PTEI += _ptes; \
PTEN -= _ptes; \
- }; \
+ } \
nvkm_done((PT)->memory); \
} while(0)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
index 03b355dabab3..abf3eda683f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/ic.c
@@ -36,8 +36,8 @@ probe_monitoring_device(struct nvkm_i2c_bus *bus,
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
- client = i2c_new_device(&bus->i2c, info);
- if (!client)
+ client = i2c_new_client_device(&bus->i2c, info);
+ if (IS_ERR(client))
return false;
if (!client->dev.driver ||
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 3484b5d4a91c..faca5c873bde 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -1163,7 +1163,7 @@ static const struct omap_dss_driver dsicm_dss_driver = {
static int dsicm_probe_of(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
- struct device_node *backlight;
+ struct backlight_device *backlight;
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct display_timing timing;
int err;
@@ -1216,17 +1216,15 @@ static int dsicm_probe_of(struct platform_device *pdev)
ddata->vddi = NULL;
}
- backlight = of_parse_phandle(node, "backlight", 0);
- if (backlight) {
- ddata->extbldev = of_find_backlight_by_node(backlight);
- of_node_put(backlight);
+ backlight = devm_of_find_backlight(&pdev->dev);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
- if (!ddata->extbldev)
- return -EPROBE_DEFER;
- } else {
- /* assume native backlight support */
+ /* If no backlight device is found assume native backlight support */
+ if (backlight)
+ ddata->extbldev = backlight;
+ else
ddata->use_dsi_backlight = true;
- }
/* TODO: ulps */
diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig
index 72ae79c0c9b4..2658c521b702 100644
--- a/drivers/gpu/drm/omapdrm/dss/Kconfig
+++ b/drivers/gpu/drm/omapdrm/dss/Kconfig
@@ -71,7 +71,7 @@ config OMAP4_DSS_HDMI_CEC
depends on OMAP4_DSS_HDMI
select CEC_CORE
default y
- ---help---
+ help
When selected the HDMI transmitter will support the CEC feature.
config OMAP5_DSS_HDMI
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index dbb90f2d2ccd..6639ee9b05d3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -3137,33 +3137,12 @@ static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
dispc_write_reg(dispc, DISPC_TIMING_H(channel), timing_h);
dispc_write_reg(dispc, DISPC_TIMING_V(channel), timing_v);
- if (vm->flags & DISPLAY_FLAGS_VSYNC_HIGH)
- vs = false;
- else
- vs = true;
-
- if (vm->flags & DISPLAY_FLAGS_HSYNC_HIGH)
- hs = false;
- else
- hs = true;
-
- if (vm->flags & DISPLAY_FLAGS_DE_HIGH)
- de = false;
- else
- de = true;
-
- if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- ipc = false;
- else
- ipc = true;
-
- /* always use the 'rf' setting */
- onoff = true;
-
- if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
- rf = true;
- else
- rf = false;
+ vs = !!(vm->flags & DISPLAY_FLAGS_VSYNC_LOW);
+ hs = !!(vm->flags & DISPLAY_FLAGS_HSYNC_LOW);
+ de = !!(vm->flags & DISPLAY_FLAGS_DE_LOW);
+ ipc = !!(vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE);
+ onoff = true; /* always use the 'rf' setting */
+ rf = !!(vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE);
l = FLD_VAL(onoff, 17, 17) |
FLD_VAL(rf, 16, 16) |
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 766553bb2f87..9701843ccf09 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -208,49 +208,6 @@ static const struct venc_config venc_config_ntsc_trm = {
.gen_ctrl = 0x00F90000,
};
-static const struct venc_config venc_config_pal_bdghi = {
- .f_control = 0,
- .vidout_ctrl = 0,
- .sync_ctrl = 0,
- .hfltr_ctrl = 0,
- .x_color = 0,
- .line21 = 0,
- .ln_sel = 21,
- .htrigger_vtrigger = 0,
- .tvdetgp_int_start_stop_x = 0x00140001,
- .tvdetgp_int_start_stop_y = 0x00010001,
- .gen_ctrl = 0x00FB0000,
-
- .llen = 864-1,
- .flens = 625-1,
- .cc_carr_wss_carr = 0x2F7625ED,
- .c_phase = 0xDF,
- .gain_u = 0x111,
- .gain_v = 0x181,
- .gain_y = 0x140,
- .black_level = 0x3e,
- .blank_level = 0x3e,
- .m_control = 0<<2 | 1<<1,
- .bstamp_wss_data = 0x42,
- .s_carr = 0x2a098acb,
- .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0,
- .savid__eavid = 0x06A70108,
- .flen__fal = 23<<16 | 624<<0,
- .lal__phase_reset = 2<<17 | 310<<0,
- .hs_int_start_stop_x = 0x00920358,
- .hs_ext_start_stop_x = 0x000F035F,
- .vs_int_start_x = 0x1a7<<16,
- .vs_int_stop_x__vs_int_start_y = 0x000601A7,
- .vs_int_stop_y__vs_ext_start_x = 0x01AF0036,
- .vs_ext_stop_x__vs_ext_start_y = 0x27101af,
- .vs_ext_stop_y = 0x05,
- .avid_start_stop_x = 0x03530082,
- .avid_start_stop_y = 0x0270002E,
- .fid_int_start_x__fid_int_start_y = 0x0005008A,
- .fid_int_offset_y__fid_ext_start_x = 0x002E0138,
- .fid_ext_start_y__fid_ext_offset_y = 0x01380005,
-};
-
enum venc_videomode {
VENC_MODE_UNKNOWN,
VENC_MODE_PAL,
diff --git a/drivers/gpu/drm/omapdrm/omap_debugfs.c b/drivers/gpu/drm/omapdrm/omap_debugfs.c
index 34dfb33145b4..b57fbe8a0ac2 100644
--- a/drivers/gpu/drm/omapdrm/omap_debugfs.c
+++ b/drivers/gpu/drm/omapdrm/omap_debugfs.c
@@ -80,31 +80,16 @@ static struct drm_info_list omap_dmm_debugfs_list[] = {
{"tiler_map", tiler_map_show, 0},
};
-int omap_debugfs_init(struct drm_minor *minor)
+void omap_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
- int ret;
-
- ret = drm_debugfs_create_files(omap_debugfs_list,
- ARRAY_SIZE(omap_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install omap_debugfs_list\n");
- return ret;
- }
+ drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
if (dmm_is_available())
- ret = drm_debugfs_create_files(omap_dmm_debugfs_list,
- ARRAY_SIZE(omap_dmm_debugfs_list),
- minor->debugfs_root, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install omap_dmm_debugfs_list\n");
- return ret;
- }
-
- return ret;
+ drm_debugfs_create_files(omap_dmm_debugfs_list,
+ ARRAY_SIZE(omap_dmm_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index cdafd7ef1c32..242d28281784 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -503,7 +503,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
args->size = omap_gem_mmap_size(obj);
args->offset = omap_gem_mmap_offset(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 7c4b66efcaa7..8a1fac680138 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -82,6 +82,6 @@ struct omap_drm_private {
};
-int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_init(struct drm_minor *minor);
#endif /* __OMAPDRM_DRV_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 9aeab81dfb90..05f30e2618c9 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -326,7 +326,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
error:
while (--i >= 0)
- drm_gem_object_put_unlocked(bos[i]);
+ drm_gem_object_put(bos[i]);
return fb;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 09a84919ef73..3f6cfc24fb64 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -140,7 +140,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
/* note: if fb creation failed, we can't rely on fb destroy
* to unref the bo:
*/
- drm_gem_object_put_unlocked(fbdev->bo);
+ drm_gem_object_put(fbdev->bo);
ret = PTR_ERR(fb);
goto fail;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index d08ae95ecc0a..d0d12d5dd76c 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -629,7 +629,7 @@ int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
*offset = omap_gem_mmap_offset(obj);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
fail:
return ret;
@@ -1348,7 +1348,7 @@ int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return 0;
}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index a1723c1b5fbf..39055c1f0e2f 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -18,6 +18,16 @@ config DRM_PANEL_ARM_VERSATILE
reference designs. The panel is detected using special registers
in the Versatile family syscon registers.
+config DRM_PANEL_ASUS_Z00T_TM5P5_NT35596
+ tristate "ASUS Z00T TM5P5 NT35596 panel"
+ depends on GPIOLIB && OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the ASUS TMP5P5
+ NT35596 1080x1920 video mode panel as found in some Asus
+ Zenfone 2 Laser Z00T devices.
+
config DRM_PANEL_BOE_HIMAX8279D
tristate "Boe Himax8279d panel"
depends on OF
@@ -137,6 +147,17 @@ config DRM_PANEL_KINGDISPLAY_KD097D04
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_LEADTEK_LTK050H3146W
+ tristate "Leadtek LTK050H3146W panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Leadtek LTK050H3146W
+ TFT-LCD modules. The panel has a 720x1280 resolution and uses
+ 24 bit RGB per pixel. It provides a MIPI DSI interface to
+ the host and has a built-in LED backlight.
+
config DRM_PANEL_LEADTEK_LTK500HD1829
tristate "Leadtek LTK500HD1829 panel"
depends on OF
@@ -433,6 +454,14 @@ config DRM_PANEL_TRULY_NT35597_WQXGA
Say Y here if you want to enable support for Truly NT35597 WQXGA Dual DSI
Video Mode panel
+config DRM_PANEL_VISIONOX_RM69299
+ tristate "Visionox RM69299"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ help
+ Say Y here if you want to enable support for Visionox
+ RM69299 DSI Video Mode panel.
+
config DRM_PANEL_XINPENG_XPP055C272
tristate "Xinpeng XPP055C272 panel driver"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 96a883cd6630..de74f282c433 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
+obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o
+obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o
obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
@@ -46,4 +48,5 @@ obj-$(CONFIG_DRM_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
obj-$(CONFIG_DRM_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
obj-$(CONFIG_DRM_PANEL_TPO_TPG110) += panel-tpo-tpg110.o
obj-$(CONFIG_DRM_PANEL_TRULY_NT35597_WQXGA) += panel-truly-nt35597.o
+obj-$(CONFIG_DRM_PANEL_VISIONOX_RM69299) += panel-visionox-rm69299.o
obj-$(CONFIG_DRM_PANEL_XINPENG_XPP055C272) += panel-xinpeng-xpp055c272.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index 41444a73c980..47b37fef7ee8 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -143,7 +143,6 @@ static const struct versatile_panel_type versatile_panels[] = {
.vsync_start = 240 + 5,
.vsync_end = 240 + 5 + 6,
.vtotal = 240 + 5 + 6 + 5,
- .vrefresh = 116,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
},
@@ -167,7 +166,6 @@ static const struct versatile_panel_type versatile_panels[] = {
.vsync_start = 480 + 11,
.vsync_end = 480 + 11 + 2,
.vtotal = 480 + 11 + 2 + 32,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
},
@@ -190,7 +188,6 @@ static const struct versatile_panel_type versatile_panels[] = {
.vsync_start = 220 + 0,
.vsync_end = 220 + 0 + 2,
.vtotal = 220 + 0 + 2 + 1,
- .vrefresh = 390,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
@@ -214,7 +211,6 @@ static const struct versatile_panel_type versatile_panels[] = {
.vsync_start = 320 + 2,
.vsync_end = 320 + 2 + 2,
.vtotal = 320 + 2 + 2 + 2,
- .vrefresh = 116,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
diff --git a/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
new file mode 100644
index 000000000000..9a5b7644d756
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-asus-z00t-tm5p5-n35596.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct tm5p5_nt35596 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ bool prepared;
+};
+
+static inline struct tm5p5_nt35596 *to_tm5p5_nt35596(struct drm_panel *panel)
+{
+ return container_of(panel, struct tm5p5_nt35596, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void tm5p5_nt35596_reset(struct tm5p5_nt35596 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(15000, 16000);
+}
+
+static int tm5p5_nt35596_on(struct tm5p5_nt35596 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+
+ dsi_generic_write_seq(dsi, 0xff, 0x05);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0xc5, 0x31);
+ dsi_generic_write_seq(dsi, 0xff, 0x04);
+ dsi_generic_write_seq(dsi, 0x01, 0x84);
+ dsi_generic_write_seq(dsi, 0x05, 0x25);
+ dsi_generic_write_seq(dsi, 0x06, 0x01);
+ dsi_generic_write_seq(dsi, 0x07, 0x20);
+ dsi_generic_write_seq(dsi, 0x08, 0x06);
+ dsi_generic_write_seq(dsi, 0x09, 0x08);
+ dsi_generic_write_seq(dsi, 0x0a, 0x10);
+ dsi_generic_write_seq(dsi, 0x0b, 0x10);
+ dsi_generic_write_seq(dsi, 0x0c, 0x10);
+ dsi_generic_write_seq(dsi, 0x0d, 0x14);
+ dsi_generic_write_seq(dsi, 0x0e, 0x14);
+ dsi_generic_write_seq(dsi, 0x0f, 0x14);
+ dsi_generic_write_seq(dsi, 0x10, 0x14);
+ dsi_generic_write_seq(dsi, 0x11, 0x14);
+ dsi_generic_write_seq(dsi, 0x12, 0x14);
+ dsi_generic_write_seq(dsi, 0x17, 0xf3);
+ dsi_generic_write_seq(dsi, 0x18, 0xc0);
+ dsi_generic_write_seq(dsi, 0x19, 0xc0);
+ dsi_generic_write_seq(dsi, 0x1a, 0xc0);
+ dsi_generic_write_seq(dsi, 0x1b, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1c, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1d, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1e, 0xb3);
+ dsi_generic_write_seq(dsi, 0x1f, 0xb3);
+ dsi_generic_write_seq(dsi, 0x20, 0xb3);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0xff, 0x00);
+ dsi_generic_write_seq(dsi, 0xfb, 0x01);
+ dsi_generic_write_seq(dsi, 0x35, 0x01);
+ dsi_generic_write_seq(dsi, 0xd3, 0x06);
+ dsi_generic_write_seq(dsi, 0xd4, 0x04);
+ dsi_generic_write_seq(dsi, 0x5e, 0x0d);
+ dsi_generic_write_seq(dsi, 0x11, 0x00);
+ msleep(100);
+ dsi_generic_write_seq(dsi, 0x29, 0x00);
+ dsi_generic_write_seq(dsi, 0x53, 0x24);
+
+ return 0;
+}
+
+static int tm5p5_nt35596_off(struct tm5p5_nt35596 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(60);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+
+ dsi_dcs_write_seq(dsi, 0x4f, 0x01);
+
+ return 0;
+}
+
+static int tm5p5_nt35596_prepare(struct drm_panel *panel)
+{
+ struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ tm5p5_nt35596_reset(ctx);
+
+ ret = tm5p5_nt35596_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int tm5p5_nt35596_unprepare(struct drm_panel *panel)
+{
+ struct tm5p5_nt35596 *ctx = to_tm5p5_nt35596(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = tm5p5_nt35596_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode tm5p5_nt35596_mode = {
+ .clock = (1080 + 100 + 8 + 16) * (1920 + 4 + 2 + 4) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 100,
+ .hsync_end = 1080 + 100 + 8,
+ .htotal = 1080 + 100 + 8 + 16,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 4,
+ .vsync_end = 1920 + 4 + 2,
+ .vtotal = 1920 + 4 + 2 + 4,
+ .width_mm = 68,
+ .height_mm = 121,
+};
+
+static int tm5p5_nt35596_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &tm5p5_nt35596_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs tm5p5_nt35596_panel_funcs = {
+ .prepare = tm5p5_nt35596_prepare,
+ .unprepare = tm5p5_nt35596_unprepare,
+ .get_modes = tm5p5_nt35596_get_modes,
+};
+
+static int tm5p5_nt35596_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int ret;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
+ brightness = 0;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int tm5p5_nt35596_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = bl->props.brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness & 0xff;
+}
+
+static const struct backlight_ops tm5p5_nt35596_bl_ops = {
+ .update_status = tm5p5_nt35596_bl_update_status,
+ .get_brightness = tm5p5_nt35596_bl_get_brightness,
+};
+
+static struct backlight_device *
+tm5p5_nt35596_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 255,
+ .max_brightness = 255,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &tm5p5_nt35596_bl_ops, &props);
+}
+
+static int tm5p5_nt35596_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tm5p5_nt35596 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd";
+ ctx->supplies[1].supply = "vddio";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_EOT_PACKET |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &tm5p5_nt35596_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ctx->panel.backlight = tm5p5_nt35596_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight)) {
+ ret = PTR_ERR(ctx->panel.backlight);
+ dev_err(dev, "Failed to create backlight: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tm5p5_nt35596_remove(struct mipi_dsi_device *dsi)
+{
+ struct tm5p5_nt35596 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev,
+ "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id tm5p5_nt35596_of_match[] = {
+ { .compatible = "asus,z00t-tm5p5-n35596" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, tm5p5_nt35596_of_match);
+
+static struct mipi_dsi_driver tm5p5_nt35596_driver = {
+ .probe = tm5p5_nt35596_probe,
+ .remove = tm5p5_nt35596_remove,
+ .driver = {
+ .name = "panel-tm5p5-nt35596",
+ .of_match_table = tm5p5_nt35596_of_match,
+ },
+};
+module_mipi_dsi_driver(tm5p5_nt35596_driver);
+
+MODULE_AUTHOR("Konrad Dybcio <konradybcio@gmail.com>");
+MODULE_DESCRIPTION("DRM driver for tm5p5 nt35596 1080p video mode dsi panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
index 74d58ee7d04c..7c27bd5e3486 100644
--- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c
+++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c
@@ -229,7 +229,7 @@ static int boe_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
DRM_DEV_ERROR(pinfo->base.dev, "failed to add mode %ux%u@%u\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
@@ -262,7 +262,6 @@ static const struct drm_display_mode default_display_mode = {
.vsync_start = 1920 + 10,
.vsync_end = 1920 + 10 + 14,
.vtotal = 1920 + 10 + 14 + 4,
- .vrefresh = 60,
};
/* 8 inch */
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index 48a164257d18..db5b866357f2 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -594,7 +594,6 @@ static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
.vsync_start = 1920 + 10,
.vsync_end = 1920 + 10 + 14,
.vtotal = 1920 + 10 + 14 + 4,
- .vrefresh = 60,
};
static const struct panel_desc boe_tv101wum_nl6_desc = {
@@ -622,7 +621,6 @@ static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,
.vtotal = 1920 + 16 + 4 + 16,
- .vrefresh = 60,
};
static const struct panel_desc auo_kd101n80_45na_desc = {
@@ -650,7 +648,6 @@ static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
.vsync_start = 1920 + 20,
.vsync_end = 1920 + 20 + 4,
.vtotal = 1920 + 20 + 4 + 10,
- .vrefresh = 60,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
@@ -678,7 +675,6 @@ static const struct drm_display_mode auo_b101uan08_3_default_mode = {
.vsync_start = 1920 + 34,
.vsync_end = 1920 + 34 + 2,
.vtotal = 1920 + 34 + 2 + 24,
- .vrefresh = 60,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
@@ -696,6 +692,33 @@ static const struct panel_desc auo_b101uan08_3_desc = {
.init_cmds = auo_b101uan08_3_init_cmd,
};
+static const struct drm_display_mode boe_tv105wum_nw0_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv105wum_nw0_desc = {
+ .modes = &boe_tv105wum_nw0_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 141,
+ .height_mm = 226,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
static int boe_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
@@ -706,7 +729,7 @@ static int boe_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
@@ -834,6 +857,9 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "auo,b101uan08.3",
.data = &auo_b101uan08_3_desc
},
+ { .compatible = "boe,tv105wum-nw0",
+ .data = &boe_tv105wum_nw0_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
index 711ded453c44..2338d22e23b1 100644
--- a/drivers/gpu/drm/panel/panel-elida-kd35t133.c
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -197,7 +197,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 480 + 2,
.vsync_end = 480 + 2 + 1,
.vtotal = 480 + 2 + 1 + 2,
- .vrefresh = 60,
.clock = 17000,
.width_mm = 42,
.height_mm = 82,
@@ -213,7 +212,7 @@ static int kd35t133_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
index fddbfddf6566..54610651ecdb 100644
--- a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -392,7 +392,6 @@ static int k101_im2ba02_unprepare(struct drm_panel *panel)
static const struct drm_display_mode k101_im2ba02_default_mode = {
.clock = 70000,
- .vrefresh = 60,
.hdisplay = 800,
.hsync_start = 800 + 20,
@@ -420,7 +419,7 @@ static int k101_im2ba02_get_modes(struct drm_panel *panel,
DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
k101_im2ba02_default_mode.hdisplay,
k101_im2ba02_default_mode.vdisplay,
- k101_im2ba02_default_mode.vrefresh);
+ drm_mode_vrefresh(&k101_im2ba02_default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
index 95b789ab9d29..19a6274b10f5 100644
--- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -153,7 +153,6 @@ static const struct drm_display_mode feiyang_default_mode = {
.vsync_start = 600 + 12,
.vsync_end = 600 + 12 + 2,
.vtotal = 600 + 12 + 2 + 21,
- .vrefresh = 60,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
@@ -169,7 +168,7 @@ static int feiyang_get_modes(struct drm_panel *panel,
DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
feiyang_default_mode.hdisplay,
feiyang_default_mode.vdisplay,
- feiyang_default_mode.vrefresh);
+ drm_mode_vrefresh(&feiyang_default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index 09935520e606..67a64d1999f6 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -379,7 +379,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
"can't set up VCOM amplitude (%d)\n", ret);
return ret;
}
- };
+ }
if (ili->vcom_high != U8_MAX) {
ret = regmap_write(ili->regmap, ILI9322_VCOM_HIGH,
@@ -388,7 +388,7 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
dev_err(ili->dev, "can't set up VCOM high (%d)\n", ret);
return ret;
}
- };
+ }
/* Set up gamma correction */
for (i = 0; i < ARRAY_SIZE(ili->gamma); i++) {
@@ -549,7 +549,6 @@ static const struct drm_display_mode srgb_320x240_mode = {
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 1,
.vtotal = 262,
- .vrefresh = 60,
.flags = 0,
};
@@ -563,7 +562,6 @@ static const struct drm_display_mode srgb_360x240_mode = {
.vsync_start = 240 + 21,
.vsync_end = 240 + 21 + 1,
.vtotal = 262,
- .vrefresh = 60,
.flags = 0,
};
@@ -578,7 +576,6 @@ static const struct drm_display_mode prgb_320x240_mode = {
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 1,
.vtotal = 262,
- .vrefresh = 60,
.flags = 0,
};
@@ -593,7 +590,6 @@ static const struct drm_display_mode yuv_640x320_mode = {
.vsync_start = 320 + 4,
.vsync_end = 320 + 4 + 1,
.vtotal = 320 + 4 + 1 + 18,
- .vrefresh = 60,
.flags = 0,
};
@@ -607,7 +603,6 @@ static const struct drm_display_mode yuv_720x360_mode = {
.vsync_start = 360 + 4,
.vsync_end = 360 + 4 + 1,
.vtotal = 360 + 4 + 1 + 18,
- .vrefresh = 60,
.flags = 0,
};
@@ -622,7 +617,6 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = {
.vsync_start = 480 + 4,
.vsync_end = 480 + 4 + 1,
.vtotal = 500,
- .vrefresh = 60,
.flags = 0,
};
@@ -637,7 +631,6 @@ static const struct drm_display_mode itu_r_bt_656_720_mode = {
.vsync_start = 480 + 4,
.vsync_end = 480 + 4 + 1,
.vtotal = 500,
- .vrefresh = 60,
.flags = 0,
};
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index f54077c216a3..3ed8635a6fbd 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -370,7 +370,6 @@ static int ili9881c_unprepare(struct drm_panel *panel)
static const struct drm_display_mode bananapi_default_mode = {
.clock = 62000,
- .vrefresh = 60,
.hdisplay = 720,
.hsync_start = 720 + 10,
@@ -394,7 +393,7 @@ static int ili9881c_get_modes(struct drm_panel *panel,
dev_err(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
bananapi_default_mode.hdisplay,
bananapi_default_mode.vdisplay,
- bananapi_default_mode.vrefresh);
+ drm_mode_vrefresh(&bananapi_default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
index 7419f1f0acee..fdf030f4cf92 100644
--- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c
+++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c
@@ -223,7 +223,6 @@ static const struct drm_display_mode innolux_p079zca_mode = {
.vsync_start = 1024 + 20,
.vsync_end = 1024 + 20 + 4,
.vtotal = 1024 + 20 + 4 + 20,
- .vrefresh = 60,
};
static const struct panel_desc innolux_p079zca_panel_desc = {
@@ -257,7 +256,6 @@ static const struct drm_display_mode innolux_p097pfg_mode = {
.vsync_start = 2048 + 100,
.vsync_end = 2048 + 100 + 2,
.vtotal = 2048 + 100 + 2 + 18,
- .vrefresh = 60,
};
/*
@@ -401,7 +399,7 @@ static int innolux_panel_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ m->hdisplay, m->vdisplay, drm_mode_vrefresh(m));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
index 4bfd8c877c8e..1e3fd6633981 100644
--- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
+++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c
@@ -296,7 +296,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1920 + 3,
.vsync_end = 1920 + 3 + 5,
.vtotal = 1920 + 3 + 5 + 6,
- .vrefresh = 60,
.flags = 0,
};
@@ -311,7 +310,7 @@ static int jdi_panel_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
index bac1a2a06c92..0d397af23afe 100644
--- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
+++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c
@@ -318,7 +318,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 2048 + 95,
.vsync_end = 2048 + 95 + 2,
.vtotal = 2048 + 95 + 2 + 23,
- .vrefresh = 60,
};
static int kingdisplay_panel_get_modes(struct drm_panel *panel,
@@ -330,7 +329,7 @@ static int kingdisplay_panel_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
new file mode 100644
index 000000000000..5a7a31c8513e
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk050h3146w.c
@@ -0,0 +1,691 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct ltk050h3146w_cmd {
+ char cmd;
+ char data;
+};
+
+struct ltk050h3146w;
+struct ltk050h3146w_desc {
+ const struct drm_display_mode *mode;
+ int (*init)(struct ltk050h3146w *ctx);
+};
+
+struct ltk050h3146w {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vci;
+ struct regulator *iovcc;
+ const struct ltk050h3146w_desc *panel_desc;
+ bool prepared;
+};
+
+static const struct ltk050h3146w_cmd page1_cmds[] = {
+ { 0x22, 0x0A }, /* BGR SS GS */
+ { 0x31, 0x00 }, /* column inversion */
+ { 0x53, 0xA2 }, /* VCOM1 */
+ { 0x55, 0xA2 }, /* VCOM2 */
+ { 0x50, 0x81 }, /* VREG1OUT=5V */
+ { 0x51, 0x85 }, /* VREG2OUT=-5V */
+ { 0x62, 0x0D }, /* EQT Time setting */
+/*
+ * The vendor init selected page 1 here _again_
+ * Is this supposed to be page 2?
+ */
+ { 0xA0, 0x00 },
+ { 0xA1, 0x1A },
+ { 0xA2, 0x28 },
+ { 0xA3, 0x13 },
+ { 0xA4, 0x16 },
+ { 0xA5, 0x29 },
+ { 0xA6, 0x1D },
+ { 0xA7, 0x1E },
+ { 0xA8, 0x84 },
+ { 0xA9, 0x1C },
+ { 0xAA, 0x28 },
+ { 0xAB, 0x75 },
+ { 0xAC, 0x1A },
+ { 0xAD, 0x19 },
+ { 0xAE, 0x4D },
+ { 0xAF, 0x22 },
+ { 0xB0, 0x28 },
+ { 0xB1, 0x54 },
+ { 0xB2, 0x66 },
+ { 0xB3, 0x39 },
+ { 0xC0, 0x00 },
+ { 0xC1, 0x1A },
+ { 0xC2, 0x28 },
+ { 0xC3, 0x13 },
+ { 0xC4, 0x16 },
+ { 0xC5, 0x29 },
+ { 0xC6, 0x1D },
+ { 0xC7, 0x1E },
+ { 0xC8, 0x84 },
+ { 0xC9, 0x1C },
+ { 0xCA, 0x28 },
+ { 0xCB, 0x75 },
+ { 0xCC, 0x1A },
+ { 0xCD, 0x19 },
+ { 0xCE, 0x4D },
+ { 0xCF, 0x22 },
+ { 0xD0, 0x28 },
+ { 0xD1, 0x54 },
+ { 0xD2, 0x66 },
+ { 0xD3, 0x39 },
+};
+
+static const struct ltk050h3146w_cmd page3_cmds[] = {
+ { 0x01, 0x00 },
+ { 0x02, 0x00 },
+ { 0x03, 0x73 },
+ { 0x04, 0x00 },
+ { 0x05, 0x00 },
+ { 0x06, 0x0a },
+ { 0x07, 0x00 },
+ { 0x08, 0x00 },
+ { 0x09, 0x01 },
+ { 0x0a, 0x00 },
+ { 0x0b, 0x00 },
+ { 0x0c, 0x01 },
+ { 0x0d, 0x00 },
+ { 0x0e, 0x00 },
+ { 0x0f, 0x1d },
+ { 0x10, 0x1d },
+ { 0x11, 0x00 },
+ { 0x12, 0x00 },
+ { 0x13, 0x00 },
+ { 0x14, 0x00 },
+ { 0x15, 0x00 },
+ { 0x16, 0x00 },
+ { 0x17, 0x00 },
+ { 0x18, 0x00 },
+ { 0x19, 0x00 },
+ { 0x1a, 0x00 },
+ { 0x1b, 0x00 },
+ { 0x1c, 0x00 },
+ { 0x1d, 0x00 },
+ { 0x1e, 0x40 },
+ { 0x1f, 0x80 },
+ { 0x20, 0x06 },
+ { 0x21, 0x02 },
+ { 0x22, 0x00 },
+ { 0x23, 0x00 },
+ { 0x24, 0x00 },
+ { 0x25, 0x00 },
+ { 0x26, 0x00 },
+ { 0x27, 0x00 },
+ { 0x28, 0x33 },
+ { 0x29, 0x03 },
+ { 0x2a, 0x00 },
+ { 0x2b, 0x00 },
+ { 0x2c, 0x00 },
+ { 0x2d, 0x00 },
+ { 0x2e, 0x00 },
+ { 0x2f, 0x00 },
+ { 0x30, 0x00 },
+ { 0x31, 0x00 },
+ { 0x32, 0x00 },
+ { 0x33, 0x00 },
+ { 0x34, 0x04 },
+ { 0x35, 0x00 },
+ { 0x36, 0x00 },
+ { 0x37, 0x00 },
+ { 0x38, 0x3C },
+ { 0x39, 0x35 },
+ { 0x3A, 0x01 },
+ { 0x3B, 0x40 },
+ { 0x3C, 0x00 },
+ { 0x3D, 0x01 },
+ { 0x3E, 0x00 },
+ { 0x3F, 0x00 },
+ { 0x40, 0x00 },
+ { 0x41, 0x88 },
+ { 0x42, 0x00 },
+ { 0x43, 0x00 },
+ { 0x44, 0x1F },
+ { 0x50, 0x01 },
+ { 0x51, 0x23 },
+ { 0x52, 0x45 },
+ { 0x53, 0x67 },
+ { 0x54, 0x89 },
+ { 0x55, 0xab },
+ { 0x56, 0x01 },
+ { 0x57, 0x23 },
+ { 0x58, 0x45 },
+ { 0x59, 0x67 },
+ { 0x5a, 0x89 },
+ { 0x5b, 0xab },
+ { 0x5c, 0xcd },
+ { 0x5d, 0xef },
+ { 0x5e, 0x11 },
+ { 0x5f, 0x01 },
+ { 0x60, 0x00 },
+ { 0x61, 0x15 },
+ { 0x62, 0x14 },
+ { 0x63, 0x0E },
+ { 0x64, 0x0F },
+ { 0x65, 0x0C },
+ { 0x66, 0x0D },
+ { 0x67, 0x06 },
+ { 0x68, 0x02 },
+ { 0x69, 0x07 },
+ { 0x6a, 0x02 },
+ { 0x6b, 0x02 },
+ { 0x6c, 0x02 },
+ { 0x6d, 0x02 },
+ { 0x6e, 0x02 },
+ { 0x6f, 0x02 },
+ { 0x70, 0x02 },
+ { 0x71, 0x02 },
+ { 0x72, 0x02 },
+ { 0x73, 0x02 },
+ { 0x74, 0x02 },
+ { 0x75, 0x01 },
+ { 0x76, 0x00 },
+ { 0x77, 0x14 },
+ { 0x78, 0x15 },
+ { 0x79, 0x0E },
+ { 0x7a, 0x0F },
+ { 0x7b, 0x0C },
+ { 0x7c, 0x0D },
+ { 0x7d, 0x06 },
+ { 0x7e, 0x02 },
+ { 0x7f, 0x07 },
+ { 0x80, 0x02 },
+ { 0x81, 0x02 },
+ { 0x82, 0x02 },
+ { 0x83, 0x02 },
+ { 0x84, 0x02 },
+ { 0x85, 0x02 },
+ { 0x86, 0x02 },
+ { 0x87, 0x02 },
+ { 0x88, 0x02 },
+ { 0x89, 0x02 },
+ { 0x8A, 0x02 },
+};
+
+static const struct ltk050h3146w_cmd page4_cmds[] = {
+ { 0x70, 0x00 },
+ { 0x71, 0x00 },
+ { 0x82, 0x0F }, /* VGH_MOD clamp level=15v */
+ { 0x84, 0x0F }, /* VGH clamp level 15V */
+ { 0x85, 0x0D }, /* VGL clamp level (-10V) */
+ { 0x32, 0xAC },
+ { 0x8C, 0x80 },
+ { 0x3C, 0xF5 },
+ { 0xB5, 0x07 }, /* GAMMA OP */
+ { 0x31, 0x45 }, /* SOURCE OP */
+ { 0x3A, 0x24 }, /* PS_EN OFF */
+ { 0x88, 0x33 }, /* LVD */
+};
+
+static inline
+struct ltk050h3146w *panel_to_ltk050h3146w(struct drm_panel *panel)
+{
+ return container_of(panel, struct ltk050h3146w, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int ltk050h3146w_init_sequence(struct ltk050h3146w *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, 0xdf, 0x93, 0x65, 0xf8);
+ dsi_dcs_write_seq(dsi, 0xb0, 0x01, 0x03, 0x02, 0x00, 0x64, 0x06,
+ 0x01);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x00, 0xb5);
+ dsi_dcs_write_seq(dsi, 0xb3, 0x00, 0xb5);
+ dsi_dcs_write_seq(dsi, 0xb7, 0x00, 0xbf, 0x00, 0x00, 0xbf, 0x00);
+
+ dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xc4, 0x23, 0x07);
+ dsi_dcs_write_seq(dsi, 0xbb, 0x02, 0x01, 0x24, 0x00, 0x28, 0x0f,
+ 0x28, 0x04, 0xcc, 0xcc, 0xcc);
+ dsi_dcs_write_seq(dsi, 0xbc, 0x0f, 0x04);
+ dsi_dcs_write_seq(dsi, 0xbe, 0x1e, 0xf2);
+ dsi_dcs_write_seq(dsi, 0xc0, 0x26, 0x03);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x00, 0x12);
+ dsi_dcs_write_seq(dsi, 0xc3, 0x04, 0x02, 0x02, 0x76, 0x01, 0x80,
+ 0x80);
+ dsi_dcs_write_seq(dsi, 0xc4, 0x24, 0x80, 0xb4, 0x81, 0x12, 0x0f,
+ 0x16, 0x00, 0x00);
+ dsi_dcs_write_seq(dsi, 0xc8, 0x7f, 0x72, 0x67, 0x5d, 0x5d, 0x50,
+ 0x56, 0x41, 0x59, 0x57, 0x55, 0x70, 0x5b, 0x5f,
+ 0x4f, 0x47, 0x38, 0x23, 0x08, 0x7f, 0x72, 0x67,
+ 0x5d, 0x5d, 0x50, 0x56, 0x41, 0x59, 0x57, 0x55,
+ 0x70, 0x5b, 0x5f, 0x4f, 0x47, 0x38, 0x23, 0x08);
+ dsi_dcs_write_seq(dsi, 0xd0, 0x1e, 0x1f, 0x57, 0x58, 0x48, 0x4a,
+ 0x44, 0x46, 0x40, 0x1f, 0x42, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd1, 0x1e, 0x1f, 0x57, 0x58, 0x49, 0x4b,
+ 0x45, 0x47, 0x41, 0x1f, 0x43, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd2, 0x1f, 0x1e, 0x17, 0x18, 0x07, 0x05,
+ 0x0b, 0x09, 0x03, 0x1f, 0x01, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd3, 0x1f, 0x1e, 0x17, 0x18, 0x06, 0x04,
+ 0x0a, 0x08, 0x02, 0x1f, 0x00, 0x1f, 0x1f, 0x1f,
+ 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f);
+ dsi_dcs_write_seq(dsi, 0xd4, 0x00, 0x00, 0x00, 0x0c, 0x06, 0x20,
+ 0x01, 0x02, 0x00, 0x60, 0x15, 0xb0, 0x30, 0x03,
+ 0x04, 0x00, 0x60, 0x72, 0x0a, 0x00, 0x60, 0x08);
+ dsi_dcs_write_seq(dsi, 0xd5, 0x00, 0x06, 0x06, 0x00, 0x30, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0xbc, 0x50, 0x00, 0x05,
+ 0x21, 0x00, 0x60);
+ dsi_dcs_write_seq(dsi, 0xdd, 0x2c, 0xa3, 0x00);
+ dsi_dcs_write_seq(dsi, 0xde, 0x02);
+ dsi_dcs_write_seq(dsi, 0xb2, 0x32, 0x1c);
+ dsi_dcs_write_seq(dsi, 0xb7, 0x3b, 0x70, 0x00, 0x04);
+ dsi_dcs_write_seq(dsi, 0xc1, 0x11);
+ dsi_dcs_write_seq(dsi, 0xbb, 0x21, 0x22, 0x23, 0x24, 0x36, 0x37);
+ dsi_dcs_write_seq(dsi, 0xc2, 0x20, 0x38, 0x1e, 0x84);
+ dsi_dcs_write_seq(dsi, 0xde, 0x00);
+
+ ret = mipi_dsi_dcs_set_tear_on(dsi, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(60);
+
+ return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 42,
+ .hsync_end = 720 + 42 + 8,
+ .htotal = 720 + 42 + 8 + 42,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 12,
+ .vsync_end = 1280 + 12 + 4,
+ .vtotal = 1280 + 12 + 4 + 18,
+ .clock = 64018,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_data = {
+ .mode = &ltk050h3146w_mode,
+ .init = ltk050h3146w_init_sequence,
+};
+
+static int ltk050h3146w_a2_select_page(struct ltk050h3146w *ctx, int page)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ u8 d[3] = { 0x98, 0x81, page };
+
+ return mipi_dsi_dcs_write(dsi, 0xff, d, ARRAY_SIZE(d));
+}
+
+static int ltk050h3146w_a2_write_page(struct ltk050h3146w *ctx, int page,
+ const struct ltk050h3146w_cmd *cmds,
+ int num)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int i, ret;
+
+ ret = ltk050h3146w_a2_select_page(ctx, page);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to select page %d: %d\n",
+ page, ret);
+ return ret;
+ }
+
+ for (i = 0; i < num; i++) {
+ ret = mipi_dsi_generic_write(dsi, &cmds[i],
+ sizeof(struct ltk050h3146w_cmd));
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to write page %d init cmds: %d\n",
+ page, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ltk050h3146w_a2_init_sequence(struct ltk050h3146w *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor without much
+ * documentation.
+ */
+ ret = ltk050h3146w_a2_write_page(ctx, 3, page3_cmds,
+ ARRAY_SIZE(page3_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_write_page(ctx, 4, page4_cmds,
+ ARRAY_SIZE(page4_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_write_page(ctx, 1, page1_cmds,
+ ARRAY_SIZE(page1_cmds));
+ if (ret < 0)
+ return ret;
+
+ ret = ltk050h3146w_a2_select_page(ctx, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to select page 0: %d\n", ret);
+ return ret;
+ }
+
+ /* vendor code called this without param, where there should be one */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set tear on: %d\n",
+ ret);
+ return ret;
+ }
+
+ msleep(60);
+
+ return 0;
+}
+
+static const struct drm_display_mode ltk050h3146w_a2_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 42,
+ .hsync_end = 720 + 42 + 10,
+ .htotal = 720 + 42 + 10 + 60,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 18,
+ .vsync_end = 1280 + 18 + 4,
+ .vtotal = 1280 + 18 + 4 + 12,
+ .clock = 65595,
+ .width_mm = 62,
+ .height_mm = 110,
+};
+
+static const struct ltk050h3146w_desc ltk050h3146w_a2_data = {
+ .mode = &ltk050h3146w_a2_mode,
+ .init = ltk050h3146w_a2_init_sequence,
+};
+
+static int ltk050h3146w_unprepare(struct drm_panel *panel)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vci);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int ltk050h3146w_prepare(struct drm_panel *panel)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vci);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vci supply: %d\n", ret);
+ return ret;
+ }
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vci;
+ }
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+
+ ret = ctx->panel_desc->init(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ /* T9: 120ms */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vci:
+ regulator_disable(ctx->vci);
+ return ret;
+}
+
+static int ltk050h3146w_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct ltk050h3146w *ctx = panel_to_ltk050h3146w(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, ctx->panel_desc->mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs ltk050h3146w_funcs = {
+ .unprepare = ltk050h3146w_unprepare,
+ .prepare = ltk050h3146w_prepare,
+ .get_modes = ltk050h3146w_get_modes,
+};
+
+static int ltk050h3146w_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct ltk050h3146w *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->panel_desc = of_device_get_match_data(dev);
+ if (!ctx->panel_desc)
+ return -EINVAL;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vci = devm_regulator_get(dev, "vci");
+ if (IS_ERR(ctx->vci)) {
+ ret = PTR_ERR(ctx->vci);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vci regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &ltk050h3146w_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ltk050h3146w_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int ltk050h3146w_remove(struct mipi_dsi_device *dsi)
+{
+ struct ltk050h3146w *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ltk050h3146w_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id ltk050h3146w_of_match[] = {
+ {
+ .compatible = "leadtek,ltk050h3146w",
+ .data = &ltk050h3146w_data,
+ },
+ {
+ .compatible = "leadtek,ltk050h3146w-a2",
+ .data = &ltk050h3146w_a2_data,
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, ltk050h3146w_of_match);
+
+static struct mipi_dsi_driver ltk050h3146w_driver = {
+ .driver = {
+ .name = "panel-leadtek-ltk050h3146w",
+ .of_match_table = ltk050h3146w_of_match,
+ },
+ .probe = ltk050h3146w_probe,
+ .remove = ltk050h3146w_remove,
+ .shutdown = ltk050h3146w_shutdown,
+};
+module_mipi_dsi_driver(ltk050h3146w_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <heiko.stuebner@theobroma-systems.com>");
+MODULE_DESCRIPTION("DRM driver for Leadtek LTK050H3146W MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
index 76ecf2de9c44..0f6a248c47a5 100644
--- a/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
+++ b/drivers/gpu/drm/panel/panel-leadtek-ltk500hd1829.c
@@ -376,8 +376,7 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1280 + 30,
.vsync_end = 1280 + 30 + 4,
.vtotal = 1280 + 30 + 4 + 12,
- .vrefresh = 60,
- .clock = 41600,
+ .clock = 69217,
.width_mm = 62,
.height_mm = 110,
};
@@ -392,7 +391,7 @@ static int ltk500hd1829_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_DEV_ERROR(ctx->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
index e90efeaba4ad..14456b9cd5c0 100644
--- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c
+++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c
@@ -134,7 +134,6 @@ static const struct drm_display_mode lb035q02_mode = {
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 2,
.vtotal = 240 + 4 + 2 + 18,
- .vrefresh = 60,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 70,
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index 5907f2503755..aedc485d0727 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -206,7 +206,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 800 + 15,
.vsync_end = 800 + 15 + 15,
.vtotal = 800 + 15 + 15 + 15,
- .vrefresh = 60,
};
static int lg4573_get_modes(struct drm_panel *panel,
@@ -218,7 +217,7 @@ static int lg4573_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
index c4f83f6384e1..f894971c1c7c 100644
--- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
@@ -116,7 +116,6 @@ static const struct drm_display_mode nl8048_mode = {
.vsync_start = 480 + 3,
.vsync_end = 480 + 3 + 1,
.vtotal = 480 + 3 + 1 + 4,
- .vrefresh = 60,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 89,
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
index 4a8fa908a2cf..e98d54df00e7 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -1028,7 +1028,6 @@ static const struct nt35510_config nt35510_hydis_hva40wv1 = {
.vsync_start = 800 + 2, /* VFP = 2 */
.vsync_end = 800 + 2 + 0, /* VSync = 0 */
.vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
- .vrefresh = 60, /* Calculated */
.flags = 0,
},
/* 0x09: AVDD = 5.6V */
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
index a470810f7dbe..79be3dc4e817 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c
@@ -49,7 +49,8 @@ enum nt39016_regs {
#define NT39016_SYSTEM_STANDBY BIT(1)
struct nt39016_panel_info {
- struct drm_display_mode display_mode;
+ const struct drm_display_mode *display_modes;
+ unsigned int num_modes;
u16 width_mm, height_mm;
u32 bus_format, bus_flags;
};
@@ -212,15 +213,22 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
struct nt39016 *panel = to_nt39016(drm_panel);
const struct nt39016_panel_info *panel_info = panel->panel_info;
struct drm_display_mode *mode;
+ unsigned int i;
- mode = drm_mode_duplicate(connector->dev, &panel_info->display_mode);
- if (!mode)
- return -ENOMEM;
+ for (i = 0; i < panel_info->num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel_info->display_modes[i]);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
- drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER;
+ if (panel_info->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
+ drm_mode_probed_add(connector, mode);
+ }
connector->display_info.bpc = 8;
connector->display_info.width_mm = panel_info->width_mm;
@@ -230,7 +238,7 @@ static int nt39016_get_modes(struct drm_panel *drm_panel,
&panel_info->bus_format, 1);
connector->display_info.bus_flags = panel_info->bus_flags;
- return 1;
+ return panel_info->num_modes;
}
static const struct drm_panel_funcs nt39016_funcs = {
@@ -316,8 +324,8 @@ static int nt39016_remove(struct spi_device *spi)
return 0;
}
-static const struct nt39016_panel_info kd035g6_info = {
- .display_mode = {
+static const struct drm_display_mode kd035g6_display_modes[] = {
+ { /* 60 Hz */
.clock = 6000,
.hdisplay = 320,
.hsync_start = 320 + 10,
@@ -327,9 +335,25 @@ static const struct nt39016_panel_info kd035g6_info = {
.vsync_start = 240 + 5,
.vsync_end = 240 + 5 + 1,
.vtotal = 240 + 5 + 1 + 4,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
+ { /* 50 Hz */
+ .clock = 5400,
+ .hdisplay = 320,
+ .hsync_start = 320 + 42,
+ .hsync_end = 320 + 42 + 50,
+ .htotal = 320 + 42 + 50 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 5,
+ .vsync_end = 240 + 5 + 1,
+ .vtotal = 240 + 5 + 1 + 4,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct nt39016_panel_info kd035g6_info = {
+ .display_modes = kd035g6_display_modes,
+ .num_modes = ARRAY_SIZE(kd035g6_display_modes),
.width_mm = 71,
.height_mm = 53,
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 09deb99981a4..ecd76b5391d3 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -170,7 +170,6 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel,
lcd_mode->vpw;
mode->vtotal = lcd_mode->vactive + lcd_mode->vfp +
lcd_mode->vpw + lcd_mode->vbp;
- mode->vrefresh = lcd_mode->refresh;
/* Always make the first mode preferred */
if (i == 0)
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index bb0c992171e8..895ee3d1371e 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -81,7 +81,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 800 + 15,
.vsync_end = 800 + 15 + 10,
.vtotal = 800 + 15 + 10 + 14,
- .vrefresh = 50,
.flags = 0,
.width_mm = 52,
.height_mm = 86,
@@ -358,7 +357,7 @@ static int otm8009a_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
index 3a0229d60095..11b3d01aca56 100644
--- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
+++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c
@@ -102,7 +102,6 @@ static const struct drm_display_mode default_mode_osd101t2587 = {
.vsync_start = 1200 + 24,
.vsync_end = 1200 + 24 + 6,
.vtotal = 1200 + 24 + 6 + 48,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -117,7 +116,7 @@ static int osd101t2587_panel_get_modes(struct drm_panel *panel,
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
osd101t2587->default_mode->hdisplay,
osd101t2587->default_mode->vdisplay,
- osd101t2587->default_mode->vrefresh);
+ drm_mode_vrefresh(osd101t2587->default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
index 69693451462e..627dfcf8adb4 100644
--- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
+++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c
@@ -149,7 +149,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1200 + 24,
.vsync_end = 1200 + 24 + 6,
.vtotal = 1200 + 24 + 6 + 48,
- .vrefresh = 60,
};
static int wuxga_nt_panel_get_modes(struct drm_panel *panel,
@@ -161,7 +160,7 @@ static int wuxga_nt_panel_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
index 8f078b7dd89e..e50ee26474cf 100644
--- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
+++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c
@@ -209,7 +209,6 @@ static const struct drm_display_mode rpi_touchscreen_modes[] = {
.vsync_start = 480 + 7,
.vsync_end = 480 + 7 + 2,
.vtotal = 480 + 7 + 2 + 21,
- .vrefresh = 60,
},
};
@@ -322,7 +321,8 @@ static int rpi_touchscreen_get_modes(struct drm_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ m->hdisplay, m->vdisplay,
+ drm_mode_vrefresh(m));
continue;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
index 313637d53d28..d001c52e0ca9 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c
@@ -218,7 +218,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1920 + 10,
.vsync_end = 1920 + 10 + 2,
.vtotal = 1920 + 10 + 2 + 4,
- .vrefresh = 60,
.width_mm = 68,
.height_mm = 121,
.flags = DRM_MODE_FLAG_NHSYNC |
@@ -445,7 +444,7 @@ static int rad_panel_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_DEV_ERROR(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index e8982948e0ea..81ae8be62d15 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -92,7 +92,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1280 + 12,
.vsync_end = 1280 + 12 + 4,
.vtotal = 1280 + 12 + 4 + 12,
- .vrefresh = 50,
.flags = 0,
.width_mm = 68,
.height_mm = 122,
@@ -339,7 +338,7 @@ static int rm68200_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
index 38ff742bc120..da4e373291f9 100644
--- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -223,7 +223,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1440 + 20,
.vsync_end = 1440 + 20 + 4,
.vtotal = 1440 + 20 + 4 + 12,
- .vrefresh = 60,
.clock = 75276,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 65,
@@ -240,7 +239,7 @@ static int jh057n_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
@@ -360,7 +359,7 @@ static int jh057n_probe(struct mipi_dsi_device *dsi)
DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh,
+ drm_mode_vrefresh(&default_mode),
mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
jh057n_debugfs_init(ctx);
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
index ef18559e237e..a7b0b3e39e1a 100644
--- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -103,7 +103,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 600 + 12,
.vsync_end = 600 + 12 + 10,
.vtotal = 600 + 12 + 10 + 13,
- .vrefresh = 60,
.width_mm = 154,
.height_mm = 85,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 2150043dcf6b..f02645d396ac 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -37,12 +37,6 @@ static const struct drm_display_mode samsung_s6d16d0_mode = {
.vsync_start = 480 + 1,
.vsync_end = 480 + 1 + 1,
.vtotal = 480 + 1 + 1 + 1,
- /*
- * This depends on the clocking HS vs LP rate, this value
- * is calculated as:
- * vrefresh = (clock * 1000) / (htotal*vtotal)
- */
- .vrefresh = 816,
.width_mm = 84,
.height_mm = 48,
};
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
index 36ebd5a4ac7b..80ef122e7466 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -617,7 +617,6 @@ static const struct drm_display_mode s6e3ha2_mode = {
.vsync_start = 2560 + 1,
.vsync_end = 2560 + 1 + 1,
.vtotal = 2560 + 1 + 1 + 15,
- .vrefresh = 60,
.flags = 0,
};
@@ -636,7 +635,6 @@ static const struct drm_display_mode s6e3hf2_mode = {
.vsync_start = 2560 + 1,
.vsync_end = 2560 + 1 + 1,
.vtotal = 2560 + 1 + 1 + 15,
- .vrefresh = 60,
.flags = 0,
};
@@ -655,7 +653,7 @@ static int s6e3ha2_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
ctx->desc->mode->hdisplay, ctx->desc->mode->vdisplay,
- ctx->desc->mode->vrefresh);
+ drm_mode_vrefresh(ctx->desc->mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
index a3570e0a90a8..1247656d73bf 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c
@@ -52,7 +52,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 320 + 150,
.vsync_end = 320 + 150 + 1,
.vtotal = 320 + 150 + 1 + 2,
- .vrefresh = 30,
.flags = 0,
};
@@ -409,7 +408,7 @@ static int s6e63j0x03_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index a5f76eb4fa25..64421347bfd4 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -117,7 +117,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 800 + 28,
.vsync_end = 800 + 28 + 2,
.vtotal = 800 + 28 + 2 + 1,
- .vrefresh = 60,
.width_mm = 53,
.height_mm = 89,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
@@ -371,7 +370,7 @@ static int s6e63m0_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_ERROR("failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
index 9d843fcc3a22..485eabecfcc9 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -177,7 +177,6 @@ static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
.vsync_start = 960 + 14,
.vsync_end = 960 + 14 + 2,
.vtotal = 960 + 14 + 2 + 8,
- .vrefresh = 60,
.width_mm = 56,
.height_mm = 100,
};
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 40fcbbbacb2c..e417dc4921c2 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -92,7 +92,8 @@ static int seiko_panel_get_fixed_modes(struct seiko_panel *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ m->hdisplay, m->vdisplay,
+ drm_mode_vrefresh(m));
continue;
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
index b5d1977221a7..f07324b705b3 100644
--- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -269,7 +269,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1600 + 4,
.vsync_end = 1600 + 4 + 8,
.vtotal = 1600 + 4 + 8 + 32,
- .vrefresh = 60,
};
static int sharp_panel_get_modes(struct drm_panel *panel,
@@ -281,7 +280,7 @@ static int sharp_panel_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
index 1cf3f02435c1..d7bf13b9e1d6 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c
@@ -93,7 +93,6 @@ static const struct drm_display_mode ls037v7dw01_mode = {
.vsync_start = 640 + 1,
.vsync_end = 640 + 1 + 1,
.vtotal = 640 + 1 + 1 + 1,
- .vrefresh = 58,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 56,
diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
index ce586c6d70c7..b2e58935529c 100644
--- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
+++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c
@@ -201,7 +201,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 960 + 3,
.vsync_end = 960 + 3 + 15,
.vtotal = 960 + 3 + 15 + 1,
- .vrefresh = 60,
};
static int sharp_nt_panel_get_modes(struct drm_panel *panel,
@@ -213,7 +212,7 @@ static int sharp_nt_panel_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3ad828eaefe1..6764ac630e22 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,6 +23,7 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -108,6 +109,7 @@ struct panel_simple {
struct i2c_adapter *ddc;
struct gpio_desc *enable_gpio;
+ struct gpio_desc *hpd_gpio;
struct drm_display_mode override_mode;
};
@@ -161,7 +163,8 @@ static unsigned int panel_simple_get_display_modes(struct panel_simple *panel,
mode = drm_mode_duplicate(connector->dev, m);
if (!mode) {
dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ m->hdisplay, m->vdisplay,
+ drm_mode_vrefresh(m));
continue;
}
@@ -259,11 +262,37 @@ static int panel_simple_unprepare(struct drm_panel *panel)
return 0;
}
+static int panel_simple_get_hpd_gpio(struct device *dev,
+ struct panel_simple *p, bool from_probe)
+{
+ int err;
+
+ p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(p->hpd_gpio)) {
+ err = PTR_ERR(p->hpd_gpio);
+
+ /*
+ * If we're called from probe we won't consider '-EPROBE_DEFER'
+ * to be an error--we'll leave the error code in "hpd_gpio".
+ * When we try to use it we'll try again. This allows for
+ * circular dependencies where the component providing the
+ * hpd gpio needs the panel to init before probing.
+ */
+ if (err != -EPROBE_DEFER || !from_probe) {
+ dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int panel_simple_prepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
unsigned int delay;
int err;
+ int hpd_asserted;
if (p->prepared)
return 0;
@@ -282,6 +311,26 @@ static int panel_simple_prepare(struct drm_panel *panel)
if (delay)
msleep(delay);
+ if (p->hpd_gpio) {
+ if (IS_ERR(p->hpd_gpio)) {
+ err = panel_simple_get_hpd_gpio(panel->dev, p, false);
+ if (err)
+ return err;
+ }
+
+ err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+ hpd_asserted, hpd_asserted,
+ 1000, 2000000);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+
+ if (err) {
+ dev_err(panel->dev,
+ "error waiting for hpd GPIO: %d\n", err);
+ return err;
+ }
+ }
+
p->prepared = true;
return 0;
@@ -462,6 +511,11 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->desc = desc;
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+ if (!panel->no_hpd) {
+ err = panel_simple_get_hpd_gpio(dev, panel, true);
+ if (err)
+ return err;
+ }
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -549,7 +603,6 @@ static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -574,7 +627,6 @@ static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
.vsync_start = 480 + 2,
.vsync_end = 480 + 2 + 45,
.vtotal = 480 + 2 + 45 + 0,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -625,7 +677,6 @@ static const struct drm_display_mode auo_b101aw03_mode = {
.vsync_start = 600 + 16,
.vsync_end = 600 + 16 + 6,
.vtotal = 600 + 16 + 6 + 16,
- .vrefresh = 60,
};
static const struct panel_desc auo_b101aw03 = {
@@ -670,7 +721,6 @@ static const struct drm_display_mode auo_b101xtn01_mode = {
.vsync_start = 768 + 14,
.vsync_end = 768 + 14 + 42,
.vtotal = 768 + 14 + 42,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -694,7 +744,6 @@ static const struct drm_display_mode auo_b116xak01_mode = {
.vsync_start = 768 + 4,
.vsync_end = 768 + 4 + 6,
.vtotal = 768 + 4 + 6 + 15,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -723,7 +772,6 @@ static const struct drm_display_mode auo_b116xw03_mode = {
.vsync_start = 768 + 10,
.vsync_end = 768 + 10 + 12,
.vtotal = 768 + 10 + 12 + 6,
- .vrefresh = 60,
};
static const struct panel_desc auo_b116xw03 = {
@@ -746,7 +794,6 @@ static const struct drm_display_mode auo_b133xtn01_mode = {
.vsync_start = 768 + 3,
.vsync_end = 768 + 3 + 6,
.vtotal = 768 + 3 + 6 + 13,
- .vrefresh = 60,
};
static const struct panel_desc auo_b133xtn01 = {
@@ -769,7 +816,6 @@ static const struct drm_display_mode auo_b133htn01_mode = {
.vsync_start = 1080 + 25,
.vsync_end = 1080 + 25 + 10,
.vtotal = 1080 + 25 + 10 + 10,
- .vrefresh = 60,
};
static const struct panel_desc auo_b133htn01 = {
@@ -825,7 +871,6 @@ static const struct drm_display_mode auo_g101evn010_mode = {
.vsync_start = 800 + 8,
.vsync_end = 800 + 8 + 2,
.vtotal = 800 + 8 + 2 + 6,
- .vrefresh = 60,
};
static const struct panel_desc auo_g101evn010 = {
@@ -836,7 +881,8 @@ static const struct panel_desc auo_g101evn010 = {
.width = 216,
.height = 135,
},
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
};
static const struct drm_display_mode auo_g104sn02_mode = {
@@ -849,7 +895,6 @@ static const struct drm_display_mode auo_g104sn02_mode = {
.vsync_start = 600 + 10,
.vsync_end = 600 + 10 + 35,
.vtotal = 600 + 10 + 35 + 2,
- .vrefresh = 60,
};
static const struct panel_desc auo_g104sn02 = {
@@ -862,6 +907,30 @@ static const struct panel_desc auo_g104sn02 = {
},
};
+static const struct drm_display_mode auo_g121ean01_mode = {
+ .clock = 66700,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 58,
+ .hsync_end = 1280 + 58 + 8,
+ .htotal = 1280 + 58 + 8 + 70,
+ .vdisplay = 800,
+ .vsync_start = 800 + 6,
+ .vsync_end = 800 + 6 + 4,
+ .vtotal = 800 + 6 + 4 + 10,
+};
+
+static const struct panel_desc auo_g121ean01 = {
+ .modes = &auo_g121ean01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 261,
+ .height = 163,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g133han01_timings = {
.pixelclock = { 134000000, 141200000, 149000000 },
.hactive = { 1920, 1920, 1920 },
@@ -892,6 +961,30 @@ static const struct panel_desc auo_g133han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct drm_display_mode auo_g156xtn01_mode = {
+ .clock = 76000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 33,
+ .hsync_end = 1366 + 33 + 67,
+ .htotal = 1560,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 4,
+ .vtotal = 806,
+};
+
+static const struct panel_desc auo_g156xtn01 = {
+ .modes = &auo_g156xtn01_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 344,
+ .height = 194,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_g185han01_timings = {
.pixelclock = { 120000000, 144000000, 175000000 },
.hactive = { 1920, 1920, 1920 },
@@ -922,6 +1015,36 @@ static const struct panel_desc auo_g185han01 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
+static const struct display_timing auo_g190ean01_timings = {
+ .pixelclock = { 90000000, 108000000, 135000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 126, 184, 1266 },
+ .hback_porch = { 84, 122, 844 },
+ .hsync_len = { 70, 102, 704 },
+ .vactive = { 1024, 1024, 1024 },
+ .vfront_porch = { 4, 26, 76 },
+ .vback_porch = { 2, 8, 25 },
+ .vsync_len = { 2, 8, 25 },
+};
+
+static const struct panel_desc auo_g190ean01 = {
+ .timings = &auo_g190ean01_timings,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 376,
+ .height = 301,
+ },
+ .delay = {
+ .prepare = 50,
+ .enable = 200,
+ .disable = 110,
+ .unprepare = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing auo_p320hvn03_timings = {
.pixelclock = { 106000000, 148500000, 164000000 },
.hactive = { 1920, 1920, 1920 },
@@ -961,7 +1084,6 @@ static const struct drm_display_mode auo_t215hvn01_mode = {
.vsync_start = 1080 + 4,
.vsync_end = 1080 + 4 + 5,
.vtotal = 1080 + 4 + 5 + 36,
- .vrefresh = 60,
};
static const struct panel_desc auo_t215hvn01 = {
@@ -988,7 +1110,6 @@ static const struct drm_display_mode avic_tm070ddh03_mode = {
.vsync_start = 600 + 17,
.vsync_end = 600 + 17 + 1,
.vtotal = 600 + 17 + 1 + 17,
- .vrefresh = 60,
};
static const struct panel_desc avic_tm070ddh03 = {
@@ -1038,7 +1159,6 @@ static const struct drm_display_mode boe_hv070wsa_mode = {
.vsync_start = 600 + 10,
.vsync_end = 600 + 10 + 10,
.vtotal = 600 + 10 + 10 + 10,
- .vrefresh = 60,
};
static const struct panel_desc boe_hv070wsa = {
@@ -1061,7 +1181,6 @@ static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
.vsync_start = 800 + 3,
.vsync_end = 800 + 3 + 5,
.vtotal = 800 + 3 + 5 + 24,
- .vrefresh = 60,
},
{
.clock = 57500,
@@ -1073,7 +1192,6 @@ static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
.vsync_start = 800 + 3,
.vsync_end = 800 + 3 + 5,
.vtotal = 800 + 3 + 5 + 24,
- .vrefresh = 48,
},
};
@@ -1092,6 +1210,37 @@ static const struct panel_desc boe_nv101wxmn51 = {
},
};
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+ .clock = 147840,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 6,
+ .vtotal = 1080 + 3 + 6 + 31,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+ .modes = &boe_nv133fhm_n61_modes,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
{
.clock = 148500,
@@ -1103,7 +1252,6 @@ static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
.vsync_start = 1080 + 3,
.vsync_end = 1080 + 3 + 5,
.vtotal = 1125,
- .vrefresh = 60,
},
};
@@ -1134,7 +1282,6 @@ static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.vsync_start = 272 + 8,
.vsync_end = 272 + 8 + 8,
.vtotal = 272 + 8 + 8 + 8,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1159,7 +1306,6 @@ static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
.vsync_start = 480 + 29,
.vsync_end = 480 + 29 + 13,
.vtotal = 480 + 29 + 13 + 3,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1183,7 +1329,6 @@ static const struct drm_display_mode chunghwa_claa070wp03xg_mode = {
.vsync_start = 1280 + 1,
.vsync_end = 1280 + 1 + 7,
.vtotal = 1280 + 1 + 7 + 15,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -1207,7 +1352,6 @@ static const struct drm_display_mode chunghwa_claa101wa01a_mode = {
.vsync_start = 768 + 4,
.vsync_end = 768 + 4 + 4,
.vtotal = 768 + 4 + 4 + 4,
- .vrefresh = 60,
};
static const struct panel_desc chunghwa_claa101wa01a = {
@@ -1230,7 +1374,6 @@ static const struct drm_display_mode chunghwa_claa101wb01_mode = {
.vsync_start = 768 + 16,
.vsync_end = 768 + 16 + 8,
.vtotal = 768 + 16 + 8 + 16,
- .vrefresh = 60,
};
static const struct panel_desc chunghwa_claa101wb01 = {
@@ -1253,7 +1396,6 @@ static const struct drm_display_mode dataimage_scf0700c48ggu18_mode = {
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 2,
.vtotal = 480 + 10 + 2 + 33,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -1340,7 +1482,6 @@ static const struct drm_display_mode edt_et035012dm6_mode = {
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 4,
.vtotal = 240 + 4 + 4 + 14,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -1372,7 +1513,6 @@ static const struct drm_display_mode edt_etm043080dh6gp_mode = {
.vsync_start = 288 + 2,
.vsync_end = 288 + 2 + 4,
.vtotal = 288 + 2 + 4 + 10,
- .vrefresh = 60,
};
static const struct panel_desc edt_etm043080dh6gp = {
@@ -1397,7 +1537,6 @@ static const struct drm_display_mode edt_etm0430g0dh6_mode = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1421,7 +1560,6 @@ static const struct drm_display_mode edt_et057090dhu_mode = {
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 3,
.vtotal = 480 + 10 + 3 + 32,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -1447,7 +1585,6 @@ static const struct drm_display_mode edt_etm0700g0dh6_mode = {
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 2,
.vtotal = 480 + 10 + 2 + 33,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1512,7 +1649,6 @@ static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
.vsync_start = 480 + 37,
.vsync_end = 480 + 37 + 2,
.vtotal = 480 + 37 + 2 + 8,
- .vrefresh = 60,
};
static const struct panel_desc foxlink_fl500wvr00_a0t = {
@@ -1536,7 +1672,6 @@ static const struct drm_display_mode frida_frd350h54004_mode = {
.vsync_start = 240 + 2,
.vsync_end = 240 + 2 + 6,
.vtotal = 240 + 2 + 6 + 2,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -1563,7 +1698,6 @@ static const struct drm_display_mode friendlyarm_hd702e_mode = {
.vsync_start = 1280 + 4,
.vsync_end = 1280 + 4 + 8,
.vtotal = 1280 + 4 + 8 + 4,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -1586,7 +1720,6 @@ static const struct drm_display_mode giantplus_gpg482739qs5_mode = {
.vsync_start = 272 + 8,
.vsync_end = 272 + 8 + 1,
.vtotal = 272 + 8 + 1 + 8,
- .vrefresh = 60,
};
static const struct panel_desc giantplus_gpg482739qs5 = {
@@ -1690,7 +1823,6 @@ static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
.vsync_start = 480 + 16,
.vsync_end = 480 + 16 + 13,
.vtotal = 480 + 16 + 13 + 16,
- .vrefresh = 60,
};
static const struct panel_desc hitachi_tx23d38vm0caa = {
@@ -1717,7 +1849,6 @@ static const struct drm_display_mode innolux_at043tn24_mode = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1743,7 +1874,6 @@ static const struct drm_display_mode innolux_at070tn92_mode = {
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 10,
.vtotal = 480 + 22 + 23 + 10,
- .vrefresh = 60,
};
static const struct panel_desc innolux_at070tn92 = {
@@ -1854,7 +1984,6 @@ static const struct drm_display_mode innolux_g121x1_l03_mode = {
.vsync_start = 768 + 38,
.vsync_end = 768 + 38 + 1,
.vtotal = 768 + 38 + 1 + 0,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -1916,7 +2045,6 @@ static const struct drm_display_mode innolux_n156bge_l21_mode = {
.vsync_start = 768 + 2,
.vsync_end = 768 + 2 + 6,
.vtotal = 768 + 2 + 6 + 12,
- .vrefresh = 60,
};
static const struct panel_desc innolux_n156bge_l21 = {
@@ -1939,7 +2067,6 @@ static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
.vsync_start = 1440 + 3,
.vsync_end = 1440 + 3 + 10,
.vtotal = 1440 + 3 + 10 + 27,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -1967,7 +2094,6 @@ static const struct drm_display_mode innolux_zj070na_01p_mode = {
.vsync_start = 600 + 16,
.vsync_end = 600 + 16 + 4,
.vtotal = 600 + 16 + 4 + 16,
- .vrefresh = 60,
};
static const struct panel_desc innolux_zj070na_01p = {
@@ -1980,6 +2106,36 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+ .clock = 138778,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 24,
+ .hsync_end = 1920 + 24 + 48,
+ .htotal = 1920 + 24 + 48 + 88,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 12,
+ .vtotal = 1080 + 3 + 12 + 17,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+ .modes = &ivo_m133nwf4_r0_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent_delay = 200,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2003,6 +2159,37 @@ static const struct panel_desc koe_tx14d24vm1bpa = {
},
};
+static const struct display_timing koe_tx26d202vm0bwa_timing = {
+ .pixelclock = { 151820000, 156720000, 159780000 },
+ .hactive = { 1920, 1920, 1920 },
+ .hfront_porch = { 105, 130, 142 },
+ .hback_porch = { 45, 70, 82 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 1200, 1200, 1200},
+ .vfront_porch = { 3, 5, 10 },
+ .vback_porch = { 2, 5, 10 },
+ .vsync_len = { 5, 5, 5 },
+};
+
+static const struct panel_desc koe_tx26d202vm0bwa = {
+ .timings = &koe_tx26d202vm0bwa_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 1000,
+ .enable = 1000,
+ .unprepare = 1000,
+ .disable = 1000,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct display_timing koe_tx31d200vm0baa_timing = {
.pixelclock = { 39600000, 43200000, 48000000 },
.hactive = { 1280, 1280, 1280 },
@@ -2063,7 +2250,6 @@ static const struct drm_display_mode lemaker_bl035_rgb_002_mode = {
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 3,
.vtotal = 240 + 4 + 3 + 15,
- .vrefresh = 60,
};
static const struct panel_desc lemaker_bl035_rgb_002 = {
@@ -2087,7 +2273,6 @@ static const struct drm_display_mode lg_lb070wv8_mode = {
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 25,
.vtotal = 480 + 10 + 25 + 10,
- .vrefresh = 60,
};
static const struct panel_desc lg_lb070wv8 = {
@@ -2112,7 +2297,6 @@ static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
.vsync_start = 2048 + 8,
.vsync_end = 2048 + 8 + 4,
.vtotal = 2048 + 8 + 4 + 8,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2135,7 +2319,6 @@ static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
.vsync_start = 1536 + 3,
.vsync_end = 1536 + 3 + 1,
.vtotal = 1536 + 3 + 1 + 9,
- .vrefresh = 60,
};
static const struct panel_desc lg_lp097qx1_spa1 = {
@@ -2157,7 +2340,6 @@ static const struct drm_display_mode lg_lp120up1_mode = {
.vsync_start = 1280 + 4,
.vsync_end = 1280 + 4 + 4,
.vtotal = 1280 + 4 + 4 + 12,
- .vrefresh = 60,
};
static const struct panel_desc lg_lp120up1 = {
@@ -2168,6 +2350,7 @@ static const struct panel_desc lg_lp120up1 = {
.width = 267,
.height = 183,
},
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
};
static const struct drm_display_mode lg_lp129qe_mode = {
@@ -2180,7 +2363,6 @@ static const struct drm_display_mode lg_lp129qe_mode = {
.vsync_start = 1700 + 3,
.vsync_end = 1700 + 3 + 10,
.vtotal = 1700 + 3 + 10 + 36,
- .vrefresh = 60,
};
static const struct panel_desc lg_lp129qe = {
@@ -2261,7 +2443,6 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.vsync_start = 480 + 0,
.vsync_end = 480 + 48 + 1,
.vtotal = 480 + 48 + 1 + 0,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -2276,7 +2457,6 @@ static const struct drm_display_mode logicpd_type_28_mode = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 11,
.vtotal = 272 + 2 + 11 + 3,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -2356,7 +2536,6 @@ static const struct drm_display_mode nec_nl4827hc19_05b_mode = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 4,
.vtotal = 272 + 2 + 4 + 2,
- .vrefresh = 74,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2382,7 +2561,6 @@ static const struct drm_display_mode netron_dy_e231732_mode = {
.vsync_start = 600 + 127,
.vsync_end = 600 + 127 + 20,
.vtotal = 600 + 127 + 20 + 3,
- .vrefresh = 60,
};
static const struct panel_desc netron_dy_e231732 = {
@@ -2406,7 +2584,6 @@ static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
.vsync_start = 1080 + 3,
.vsync_end = 1080 + 3 + 5,
.vtotal = 1080 + 3 + 5 + 23,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
}, {
.clock = 110920,
@@ -2418,7 +2595,6 @@ static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
.vsync_start = 1080 + 3,
.vsync_end = 1080 + 3 + 5,
.vtotal = 1080 + 3 + 5 + 23,
- .vrefresh = 48,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
}
};
@@ -2450,7 +2626,6 @@ static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 10,
.vtotal = 272 + 2 + 10 + 2,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2558,7 +2733,6 @@ static const struct drm_display_mode olimex_lcd_olinuxino_43ts_mode = {
.vsync_start = 272 + 8,
.vsync_end = 272 + 8 + 5,
.vtotal = 272 + 8 + 5 + 3,
- .vrefresh = 60,
};
static const struct panel_desc olimex_lcd_olinuxino_43ts = {
@@ -2586,7 +2760,6 @@ static const struct drm_display_mode ontat_yx700wv03_mode = {
.vsync_start = 483,
.vsync_end = 493,
.vtotal = 500,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2615,7 +2788,6 @@ static const struct drm_display_mode ortustech_com37h3m_mode = {
.vsync_start = 640 + 4,
.vsync_end = 640 + 4 + 2,
.vtotal = 640 + 4 + 2 + 4,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2642,7 +2814,6 @@ static const struct drm_display_mode ortustech_com43h4m85ulc_mode = {
.vsync_start = 800 + 3,
.vsync_end = 800 + 3 + 3,
.vtotal = 800 + 3 + 3 + 3,
- .vrefresh = 60,
};
static const struct panel_desc ortustech_com43h4m85ulc = {
@@ -2668,7 +2839,6 @@ static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 13,
.vtotal = 480 + 22 + 13 + 10,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2696,7 +2866,6 @@ static const struct drm_display_mode pda_91_00156_a0_mode = {
.vsync_start = 480 + 1,
.vsync_end = 480 + 1 + 23,
.vtotal = 480 + 1 + 23 + 22,
- .vrefresh = 60,
};
static const struct panel_desc pda_91_00156_a0 = {
@@ -2720,7 +2889,6 @@ static const struct drm_display_mode qd43003c0_40_mode = {
.vsync_start = 272 + 4,
.vsync_end = 272 + 4 + 10,
.vtotal = 272 + 4 + 10 + 2,
- .vrefresh = 60,
};
static const struct panel_desc qd43003c0_40 = {
@@ -2774,7 +2942,6 @@ static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
.vsync_start = 800 + 2,
.vsync_end = 800 + 2 + 5,
.vtotal = 800 + 2 + 5 + 16,
- .vrefresh = 60,
};
static const struct panel_desc rocktech_rk101ii01d_ct = {
@@ -2803,7 +2970,6 @@ static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.vsync_start = 1600 + 2,
.vsync_end = 1600 + 2 + 5,
.vtotal = 1600 + 2 + 5 + 57,
- .vrefresh = 60,
};
static const struct panel_desc samsung_lsn122dl01_c01 = {
@@ -2825,7 +2991,6 @@ static const struct drm_display_mode samsung_ltn101nt05_mode = {
.vsync_start = 600 + 3,
.vsync_end = 600 + 3 + 6,
.vtotal = 600 + 3 + 6 + 61,
- .vrefresh = 60,
};
static const struct panel_desc samsung_ltn101nt05 = {
@@ -2848,7 +3013,6 @@ static const struct drm_display_mode samsung_ltn140at29_301_mode = {
.vsync_start = 768 + 2,
.vsync_end = 768 + 2 + 5,
.vtotal = 768 + 2 + 5 + 17,
- .vrefresh = 60,
};
static const struct panel_desc samsung_ltn140at29_301 = {
@@ -2895,7 +3059,6 @@ static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
.vsync_start = 1280 + 3,
.vsync_end = 1280 + 3 + 10,
.vtotal = 1280 + 3 + 10 + 57,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
};
@@ -2921,7 +3084,6 @@ static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
.vsync_start = 480 + 8,
.vsync_end = 480 + 8 + 2,
.vtotal = 480 + 8 + 2 + 35,
- .vrefresh = 60,
.flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
@@ -2948,7 +3110,6 @@ static const struct drm_display_mode sharp_lq035q7db03_mode = {
.vsync_start = 320 + 9,
.vsync_end = 320 + 9 + 1,
.vtotal = 320 + 9 + 1 + 7,
- .vrefresh = 60,
};
static const struct panel_desc sharp_lq035q7db03 = {
@@ -3052,7 +3213,6 @@ static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
.vsync_start = 480 + 1,
.vsync_end = 480 + 1 + 23,
.vtotal = 480 + 1 + 23 + 22,
- .vrefresh = 60,
};
static const struct panel_desc shelly_sca07010_bfn_lnn = {
@@ -3065,6 +3225,31 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode starry_kr070pe2t_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 209,
+ .hsync_end = 800 + 209 + 1,
+ .htotal = 800 + 209 + 1 + 45,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 1,
+ .vtotal = 480 + 22 + 1 + 22,
+};
+
+static const struct panel_desc starry_kr070pe2t = {
+ .modes = &starry_kr070pe2t_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode starry_kr122ea0sra_mode = {
.clock = 147000,
.hdisplay = 1920,
@@ -3075,7 +3260,6 @@ static const struct drm_display_mode starry_kr122ea0sra_mode = {
.vsync_start = 1200 + 15,
.vsync_end = 1200 + 15 + 2,
.vtotal = 1200 + 15 + 2 + 18,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -3103,7 +3287,6 @@ static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.vsync_start = 480 + 13,
.vsync_end = 480 + 13 + 2,
.vtotal = 480 + 13 + 2 + 29,
- .vrefresh = 62,
};
static const struct panel_desc tfc_s9700rtwv43tr_01b = {
@@ -3179,7 +3362,6 @@ static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = {
.vsync_start = 240 + 3,
.vsync_end = 240 + 3 + 1,
.vtotal = 240 + 3 + 1 + 17,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
},
};
@@ -3207,7 +3389,6 @@ static const struct drm_display_mode ti_nspire_classic_lcd_mode[] = {
.vsync_start = 240 + 0,
.vsync_end = 240 + 0 + 1,
.vtotal = 240 + 0 + 1 + 0,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
};
@@ -3236,7 +3417,6 @@ static const struct drm_display_mode toshiba_lt089ac29000_mode = {
.vsync_start = 768 + 20,
.vsync_end = 768 + 20 + 7,
.vtotal = 768 + 20 + 7 + 3,
- .vrefresh = 60,
};
static const struct panel_desc toshiba_lt089ac29000 = {
@@ -3261,7 +3441,6 @@ static const struct drm_display_mode tpk_f07a_0102_mode = {
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 2,
.vtotal = 480 + 10 + 2 + 33,
- .vrefresh = 60,
};
static const struct panel_desc tpk_f07a_0102 = {
@@ -3284,7 +3463,6 @@ static const struct drm_display_mode tpk_f10a_0102_mode = {
.vsync_start = 600 + 20,
.vsync_end = 600 + 20 + 5,
.vtotal = 600 + 20 + 5 + 25,
- .vrefresh = 60,
};
static const struct panel_desc tpk_f10a_0102 = {
@@ -3343,7 +3521,6 @@ static const struct drm_display_mode vl050_8048nt_c01_mode = {
.vsync_start = 480 + 22,
.vsync_end = 480 + 22 + 10,
.vtotal = 480 + 22 + 10 + 23,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -3369,7 +3546,6 @@ static const struct drm_display_mode winstar_wf35ltiacd_mode = {
.vsync_start = 240 + 4,
.vsync_end = 240 + 4 + 3,
.vtotal = 240 + 4 + 3 + 15,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -3395,7 +3571,6 @@ static const struct drm_display_mode arm_rtsm_mode[] = {
.vsync_start = 768 + 3,
.vsync_end = 768 + 3 + 6,
.vtotal = 768 + 3 + 6 + 29,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
},
};
@@ -3455,12 +3630,21 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,g104sn02",
.data = &auo_g104sn02,
}, {
+ .compatible = "auo,g121ean01",
+ .data = &auo_g121ean01,
+ }, {
.compatible = "auo,g133han01",
.data = &auo_g133han01,
}, {
+ .compatible = "auo,g156xtn01",
+ .data = &auo_g156xtn01,
+ }, {
.compatible = "auo,g185han01",
.data = &auo_g185han01,
}, {
+ .compatible = "auo,g190ean01",
+ .data = &auo_g190ean01,
+ }, {
.compatible = "auo,p320hvn03",
.data = &auo_p320hvn03,
}, {
@@ -3479,6 +3663,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,nv101wxmn51",
.data = &boe_nv101wxmn51,
}, {
+ .compatible = "boe,nv133fhm-n61",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n62",
+ .data = &boe_nv133fhm_n61,
+ }, {
.compatible = "boe,nv140fhmn49",
.data = &boe_nv140fhmn49,
}, {
@@ -3587,9 +3777,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
+ .compatible = "ivo,m133nwf4-r0",
+ .data = &ivo_m133nwf4_r0,
+ }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
+ .compatible = "koe,tx26d202vm0bwa",
+ .data = &koe_tx26d202vm0bwa,
+ }, {
.compatible = "koe,tx31d200vm0baa",
.data = &koe_tx31d200vm0baa,
}, {
@@ -3716,6 +3912,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "shelly,sca07010-bfn-lnn",
.data = &shelly_sca07010_bfn_lnn,
}, {
+ .compatible = "starry,kr070pe2t",
+ .data = &starry_kr070pe2t,
+ }, {
.compatible = "starry,kr122ea0sra",
.data = &starry_kr122ea0sra,
}, {
@@ -3825,7 +4024,6 @@ static const struct drm_display_mode auo_b080uan01_mode = {
.vsync_start = 1920 + 9,
.vsync_end = 1920 + 9 + 2,
.vtotal = 1920 + 9 + 2 + 8,
- .vrefresh = 60,
};
static const struct panel_desc_dsi auo_b080uan01 = {
@@ -3837,6 +4035,7 @@ static const struct panel_desc_dsi auo_b080uan01 = {
.width = 108,
.height = 272,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
@@ -3853,7 +4052,6 @@ static const struct drm_display_mode boe_tv080wum_nl0_mode = {
.vsync_start = 1920 + 21,
.vsync_end = 1920 + 21 + 3,
.vtotal = 1920 + 21 + 3 + 18,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -3865,6 +4063,7 @@ static const struct panel_desc_dsi boe_tv080wum_nl0 = {
.width = 107,
.height = 172,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_BURST |
@@ -3883,7 +4082,6 @@ static const struct drm_display_mode lg_ld070wx3_sl01_mode = {
.vsync_start = 1280 + 28,
.vsync_end = 1280 + 28 + 1,
.vtotal = 1280 + 28 + 1 + 14,
- .vrefresh = 60,
};
static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
@@ -3895,6 +4093,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
.width = 94,
.height = 151,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS,
.format = MIPI_DSI_FMT_RGB888,
@@ -3911,7 +4110,6 @@ static const struct drm_display_mode lg_lh500wx1_sd03_mode = {
.vsync_start = 1280 + 8,
.vsync_end = 1280 + 8 + 4,
.vtotal = 1280 + 8 + 4 + 12,
- .vrefresh = 60,
};
static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
@@ -3923,6 +4121,7 @@ static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
.width = 62,
.height = 110,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO,
.format = MIPI_DSI_FMT_RGB888,
@@ -3939,7 +4138,6 @@ static const struct drm_display_mode panasonic_vvx10f004b00_mode = {
.vsync_start = 1200 + 17,
.vsync_end = 1200 + 17 + 2,
.vtotal = 1200 + 17 + 2 + 16,
- .vrefresh = 60,
};
static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
@@ -3951,6 +4149,7 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.width = 217,
.height = 136,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_CLOCK_NON_CONTINUOUS,
@@ -3968,7 +4167,6 @@ static const struct drm_display_mode lg_acx467akm_7_mode = {
.vsync_start = 1920 + 2,
.vsync_end = 1920 + 2 + 2,
.vtotal = 1920 + 2 + 2 + 2,
- .vrefresh = 60,
};
static const struct panel_desc_dsi lg_acx467akm_7 = {
@@ -3980,6 +4178,7 @@ static const struct panel_desc_dsi lg_acx467akm_7 = {
.width = 62,
.height = 110,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = 0,
.format = MIPI_DSI_FMT_RGB888,
@@ -3996,7 +4195,6 @@ static const struct drm_display_mode osd101t2045_53ts_mode = {
.vsync_start = 1200 + 16,
.vsync_end = 1200 + 16 + 2,
.vtotal = 1200 + 16 + 2 + 16,
- .vrefresh = 60,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
};
@@ -4009,6 +4207,7 @@ static const struct panel_desc_dsi osd101t2045_53ts = {
.width = 217,
.height = 136,
},
+ .connector_type = DRM_MODE_CONNECTOR_DSI,
},
.flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
index 4b4f2558e3b4..692041ae4eb6 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c
@@ -272,7 +272,7 @@ static int st7701_get_modes(struct drm_panel *panel,
DRM_DEV_ERROR(&st7701->dsi->dev,
"failed to add mode %ux%ux@%u\n",
desc_mode->hdisplay, desc_mode->vdisplay,
- desc_mode->vrefresh);
+ drm_mode_vrefresh(desc_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
index cc02c54c1b2e..3513ae40efa8 100644
--- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -165,7 +165,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 320 + 8,
.vsync_end = 320 + 8 + 4,
.vtotal = 320 + 8 + 4 + 4,
- .vrefresh = 60,
};
static int st7789v_get_modes(struct drm_panel *panel,
@@ -177,7 +176,7 @@ static int st7789v_get_modes(struct drm_panel *panel,
if (!mode) {
dev_err(panel->dev, "failed to add mode %ux%ux@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index c91e55b2d7a3..97a1b4790d3c 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -57,7 +57,6 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = {
.vsync_start = 864 + 14,
.vsync_end = 864 + 14 + 1,
.vtotal = 864 + 14 + 1 + 11,
- .vrefresh = 60,
.width_mm = 48,
.height_mm = 84,
.flags = DRM_MODE_FLAG_PVSYNC,
@@ -81,7 +80,6 @@ static const struct drm_display_mode sony_acx424akp_cmd_mode = {
* Some desired refresh rate, experiments at the maximum "pixel"
* clock speed (HS clock 420 MHz) yields around 117Hz.
*/
- .vrefresh = 60,
.width_mm = 48,
.height_mm = 84,
};
diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
index 5c4b6f6e5c2d..fc6a7e451abe 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c
@@ -514,7 +514,6 @@ static const struct drm_display_mode acx565akm_mode = {
.vsync_start = 480 + 3,
.vsync_end = 480 + 3 + 3,
.vtotal = 480 + 3 + 3 + 4,
- .vrefresh = 57,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 77,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index aeca15dfeb3c..58d683cc5215 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -281,7 +281,6 @@ static const struct drm_display_mode td028ttec1_mode = {
.vsync_start = 640 + 4,
.vsync_end = 640 + 4 + 2,
.vtotal = 640 + 4 + 2 + 2,
- .vrefresh = 66,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 43,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
index 75f1f1f1b6de..9b2a356c4d9a 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
@@ -339,7 +339,6 @@ static const struct drm_display_mode td043mtea1_mode = {
.vsync_start = 480 + 39,
.vsync_end = 480 + 39 + 1,
.vtotal = 480 + 39 + 1 + 34,
- .vrefresh = 60,
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 94,
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 8472d018c16f..c7a2f0ae5ba5 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -112,7 +112,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vsync_start = 480 + 10,
.vsync_end = 480 + 10 + 1,
.vtotal = 480 + 10 + 1 + 35,
- .vrefresh = 60,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
@@ -129,7 +128,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vsync_start = 480 + 18,
.vsync_end = 480 + 18 + 1,
.vtotal = 480 + 18 + 1 + 27,
- .vrefresh = 60,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
@@ -146,7 +144,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vsync_start = 272 + 2,
.vsync_end = 272 + 2 + 1,
.vtotal = 272 + 2 + 1 + 12,
- .vrefresh = 60,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
@@ -163,7 +160,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vsync_start = 640 + 4,
.vsync_end = 640 + 4 + 1,
.vtotal = 640 + 4 + 1 + 8,
- .vrefresh = 60,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
@@ -180,7 +176,6 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vsync_start = 240 + 2,
.vsync_end = 240 + 2 + 1,
.vtotal = 240 + 2 + 1 + 20,
- .vrefresh = 60,
},
.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c
index 012ca62bf30e..9b9c167b8dc8 100644
--- a/drivers/gpu/drm/panel/panel-truly-nt35597.c
+++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c
@@ -490,9 +490,7 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx)
{
struct device *dev = ctx->dev;
int ret, i;
- const struct nt35597_config *config;
- config = ctx->config;
for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
ctx->supplies[i].supply = regulator_names[i];
@@ -536,7 +534,6 @@ static const struct drm_display_mode qcom_sdm845_mtp_2k_mode = {
.vsync_start = 2560 + 8,
.vsync_end = 2560 + 8 + 1,
.vtotal = 2560 + 8 + 1 + 7,
- .vrefresh = 60,
.flags = 0,
};
diff --git a/drivers/gpu/drm/panel/panel-visionox-rm69299.c b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
new file mode 100644
index 000000000000..a12976b497ce
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-visionox-rm69299.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct visionox_rm69299 {
+ struct drm_panel panel;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ struct mipi_dsi_device *dsi;
+ bool prepared;
+ bool enabled;
+};
+
+static inline struct visionox_rm69299 *panel_to_ctx(struct drm_panel *panel)
+{
+ return container_of(panel, struct visionox_rm69299, panel);
+}
+
+static int visionox_rm69299_power_on(struct visionox_rm69299 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Reset sequence of visionox panel requires the panel to be
+ * out of reset for 10ms, followed by being held in reset
+ * for 10ms and then out again
+ */
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(10000, 20000);
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int visionox_rm69299_power_off(struct visionox_rm69299 *ctx)
+{
+ gpiod_set_value(ctx->reset_gpio, 0);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int visionox_rm69299_unprepare(struct drm_panel *panel)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ ctx->dsi->mode_flags = 0;
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "set_display_off cmd failed ret = %d\n", ret);
+
+ /* 120ms delay required here as per DCS spec */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "enter_sleep cmd failed ret = %d\n", ret);
+ }
+
+ ret = visionox_rm69299_power_off(ctx);
+
+ ctx->prepared = false;
+ return ret;
+}
+
+static int visionox_rm69299_prepare(struct drm_panel *panel)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = visionox_rm69299_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xfe, 0x00 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 0 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0xc2, 0x08 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 1 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x35, 0x00 }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 2 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write_buffer(ctx->dsi, (u8[]) { 0x51, 0xff }, 2);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "cmd set tx 3 failed, ret = %d\n", ret);
+ goto power_off;
+ }
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "exit_sleep_mode cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending exit sleep DCS command */
+ msleep(120);
+
+ ret = mipi_dsi_dcs_write(ctx->dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "set_display_on cmd failed ret = %d\n", ret);
+ goto power_off;
+ }
+
+ /* Per DSI spec wait 120ms after sending set_display_on DCS command */
+ msleep(120);
+
+ ctx->prepared = true;
+
+ return 0;
+
+power_off:
+ return ret;
+}
+
+static const struct drm_display_mode visionox_rm69299_1080x2248_60hz = {
+ .name = "1080x2248",
+ .clock = 158695,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 26,
+ .hsync_end = 1080 + 26 + 2,
+ .htotal = 1080 + 26 + 2 + 36,
+ .vdisplay = 2248,
+ .vsync_start = 2248 + 56,
+ .vsync_end = 2248 + 56 + 4,
+ .vtotal = 2248 + 56 + 4 + 4,
+ .flags = 0,
+};
+
+static int visionox_rm69299_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct visionox_rm69299 *ctx = panel_to_ctx(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->panel.dev,
+ "failed to create a new display mode\n");
+ return 0;
+ }
+
+ connector->display_info.width_mm = 74;
+ connector->display_info.height_mm = 131;
+ drm_mode_copy(mode, &visionox_rm69299_1080x2248_60hz);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs visionox_rm69299_drm_funcs = {
+ .unprepare = visionox_rm69299_unprepare,
+ .prepare = visionox_rm69299_prepare,
+ .get_modes = visionox_rm69299_get_modes,
+};
+
+static int visionox_rm69299_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct visionox_rm69299 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->panel.dev = dev;
+ ctx->dsi = dsi;
+
+ ctx->supplies[0].supply = "vdda";
+ ctx->supplies[1].supply = "vdd3p3";
+
+ ret = devm_regulator_bulk_get(ctx->panel.dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ ctx->reset_gpio = devm_gpiod_get(ctx->panel.dev,
+ "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ drm_panel_init(&ctx->panel, dev, &visionox_rm69299_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &visionox_rm69299_drm_funcs;
+ drm_panel_add(&ctx->panel);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_LPM |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS;
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "dsi attach failed ret = %d\n", ret);
+ goto err_dsi_attach;
+ }
+
+ ret = regulator_set_load(ctx->supplies[0].consumer, 32000);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "regulator set load failed for vdda supply ret = %d\n",
+ ret);
+ goto err_set_load;
+ }
+
+ ret = regulator_set_load(ctx->supplies[1].consumer, 13200);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "regulator set load failed for vdd3p3 supply ret = %d\n",
+ ret);
+ goto err_set_load;
+ }
+
+ return 0;
+
+err_set_load:
+ mipi_dsi_detach(dsi);
+err_dsi_attach:
+ drm_panel_remove(&ctx->panel);
+ return ret;
+}
+
+static int visionox_rm69299_remove(struct mipi_dsi_device *dsi)
+{
+ struct visionox_rm69299 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(ctx->dsi);
+ mipi_dsi_device_unregister(ctx->dsi);
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id visionox_rm69299_of_match[] = {
+ { .compatible = "visionox,rm69299-1080p-display", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, visionox_rm69299_of_match);
+
+static struct mipi_dsi_driver visionox_rm69299_driver = {
+ .driver = {
+ .name = "panel-visionox-rm69299",
+ .of_match_table = visionox_rm69299_of_match,
+ },
+ .probe = visionox_rm69299_probe,
+ .remove = visionox_rm69299_remove,
+};
+module_mipi_dsi_driver(visionox_rm69299_driver);
+
+MODULE_DESCRIPTION("Visionox RM69299 DSI Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
index 1645aceab597..8a3b2f906e63 100644
--- a/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
+++ b/drivers/gpu/drm/panel/panel-xinpeng-xpp055c272.c
@@ -243,7 +243,6 @@ static const struct drm_display_mode default_mode = {
.vsync_start = 1280 + 22,
.vsync_end = 1280 + 22 + 4,
.vtotal = 1280 + 22 + 4 + 11,
- .vrefresh = 60,
.clock = 64000,
.width_mm = 68,
.height_mm = 121,
@@ -259,7 +258,7 @@ static int xpp055c272_get_modes(struct drm_panel *panel,
if (!mode) {
DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
default_mode.hdisplay, default_mode.vdisplay,
- default_mode.vrefresh);
+ drm_mode_vrefresh(&default_mode));
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 8136babd3ba9..b172087eee6a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -101,7 +101,9 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
pfdev->comp->num_supplies,
pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(pfdev->dev, "failed to get regulators: %d\n",
+ ret);
return ret;
}
@@ -213,10 +215,8 @@ int panfrost_device_init(struct panfrost_device *pfdev)
}
err = panfrost_regulator_init(pfdev);
- if (err) {
- dev_err(pfdev->dev, "regulator init failed %d\n", err);
+ if (err)
goto err_out0;
- }
err = panfrost_reset_init(pfdev);
if (err) {
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 882fecc33fdb..ada51df9a7a3 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -99,7 +99,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
mapping = panfrost_gem_mapping_get(bo, priv);
if (!mapping) {
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
return -EINVAL;
}
@@ -317,7 +317,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
if (!ret)
ret = timeout ? -ETIMEDOUT : -EBUSY;
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return ret;
}
@@ -351,7 +351,7 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
out:
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return ret;
}
@@ -372,7 +372,7 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
bo = to_panfrost_bo(gem_obj);
mapping = panfrost_gem_mapping_get(bo, priv);
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
if (!mapping)
return -EINVAL;
@@ -438,7 +438,7 @@ out_unlock_mappings:
mutex_unlock(&bo->mappings.lock);
mutex_unlock(&pfdev->shrinker_lock);
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return ret;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 17b654e1eb94..33355dd302f1 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -46,7 +46,7 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
sg_free_table(&bo->sgts[i]);
}
}
- kfree(bo->sgts);
+ kvfree(bo->sgts);
}
drm_gem_shmem_free_object(obj);
@@ -93,7 +93,7 @@ static void panfrost_gem_mapping_release(struct kref *kref)
mapping = container_of(kref, struct panfrost_gem_mapping, refcount);
panfrost_gem_teardown_mapping(mapping);
- drm_gem_object_put_unlocked(&mapping->obj->base.base);
+ drm_gem_object_put(&mapping->obj->base.base);
kfree(mapping);
}
@@ -261,7 +261,7 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
*/
ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(&shmem->base);
+ drm_gem_object_put(&shmem->base);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 7914b1570841..360146f6f3d9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -145,17 +145,17 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
u64 jc_head = job->jc;
int ret;
+ panfrost_devfreq_record_busy(pfdev);
+
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
return;
if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) {
- pm_runtime_put_sync_autosuspend(pfdev->dev);
return;
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -281,7 +281,7 @@ static void panfrost_job_cleanup(struct kref *ref)
if (job->bos) {
for (i = 0; i < job->bo_count; i++)
- drm_gem_object_put_unlocked(job->bos[i]);
+ drm_gem_object_put(job->bos[i]);
kvfree(job->bos);
}
@@ -410,12 +410,12 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
for (i = 0; i < NUM_JOB_SLOTS; i++) {
if (pfdev->jobs[i]) {
pm_runtime_put_noidle(pfdev->dev);
+ panfrost_devfreq_record_idle(pfdev);
pfdev->jobs[i] = NULL;
}
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index ed28aeba6d59..1a49e619aacf 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -486,7 +486,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
if (!pages) {
- kfree(bo->sgts);
+ kvfree(bo->sgts);
bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock);
ret = -ENOMEM;
@@ -538,7 +538,7 @@ err_map:
err_pages:
drm_gem_shmem_put_pages(&bo->base);
err_bo:
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 6913578d5aa7..ec4695cf3caf 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -156,7 +156,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff);
/* The BO ref is retained by the mapping. */
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put(&bo->base);
return 0;
@@ -167,7 +167,7 @@ err_put_mapping:
err_close_bo:
panfrost_gem_close(&bo->base, file_priv);
err_put_bo:
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put(&bo->base);
return ret;
}
diff --git a/drivers/gpu/drm/pl111/Makefile b/drivers/gpu/drm/pl111/Makefile
index 0c70f0e91d21..67d430d433e0 100644
--- a/drivers/gpu/drm/pl111/Makefile
+++ b/drivers/gpu/drm/pl111/Makefile
@@ -3,7 +3,6 @@ pl111_drm-y += pl111_display.o \
pl111_versatile.o \
pl111_drv.o
-pl111_drm-$(CONFIG_ARCH_VEXPRESS) += pl111_vexpress.o
pl111_drm-$(CONFIG_ARCH_NOMADIK) += pl111_nomadik.o
pl111_drm-$(CONFIG_DEBUG_FS) += pl111_debugfs.o
diff --git a/drivers/gpu/drm/pl111/pl111_debugfs.c b/drivers/gpu/drm/pl111/pl111_debugfs.c
index 3c8e82016854..26ca8cdf3e60 100644
--- a/drivers/gpu/drm/pl111/pl111_debugfs.c
+++ b/drivers/gpu/drm/pl111/pl111_debugfs.c
@@ -51,10 +51,10 @@ static const struct drm_info_list pl111_debugfs_list[] = {
{"regs", pl111_debugfs_regs, 0},
};
-int
+void
pl111_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(pl111_debugfs_list,
- ARRAY_SIZE(pl111_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(pl111_debugfs_list,
+ ARRAY_SIZE(pl111_debugfs_list),
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/pl111/pl111_drm.h b/drivers/gpu/drm/pl111/pl111_drm.h
index 77d2da9a8a7c..ba399bcb792f 100644
--- a/drivers/gpu/drm/pl111/pl111_drm.h
+++ b/drivers/gpu/drm/pl111/pl111_drm.h
@@ -84,6 +84,6 @@ struct pl111_drm_dev_private {
int pl111_display_init(struct drm_device *dev);
irqreturn_t pl111_irq(int irq, void *data);
-int pl111_debugfs_init(struct drm_minor *minor);
+void pl111_debugfs_init(struct drm_minor *minor);
#endif /* _PL111_DRM_H_ */
diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c
index aa8aa8d9e405..da0c39dae874 100644
--- a/drivers/gpu/drm/pl111/pl111_drv.c
+++ b/drivers/gpu/drm/pl111/pl111_drv.c
@@ -90,10 +90,13 @@ static int pl111_modeset_init(struct drm_device *dev)
struct drm_panel *panel = NULL;
struct drm_bridge *bridge = NULL;
bool defer = false;
- int ret = 0;
+ int ret;
int i;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
mode_config = &dev->mode_config;
mode_config->funcs = &mode_config_funcs;
mode_config->min_width = 1;
@@ -154,7 +157,7 @@ static int pl111_modeset_init(struct drm_device *dev)
DRM_MODE_CONNECTOR_Unknown);
if (IS_ERR(bridge)) {
ret = PTR_ERR(bridge);
- goto out_config;
+ goto finish;
}
} else if (bridge) {
dev_info(dev->dev, "Using non-panel bridge\n");
@@ -197,8 +200,6 @@ static int pl111_modeset_init(struct drm_device *dev)
out_bridge:
if (panel)
drm_panel_bridge_remove(bridge);
-out_config:
- drm_mode_config_cleanup(dev);
finish:
return ret;
}
@@ -343,7 +344,6 @@ static int pl111_amba_remove(struct amba_device *amba_dev)
drm_dev_unregister(drm);
if (priv->panel)
drm_panel_bridge_remove(priv->bridge);
- drm_mode_config_cleanup(drm);
drm_dev_put(drm);
of_reserved_mem_device_release(dev);
@@ -444,6 +444,7 @@ static const struct amba_id pl111_id_table[] = {
},
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, pl111_id_table);
static struct amba_driver pl111_amba_driver __maybe_unused = {
.drv = {
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 4f325c410b5d..64f01a4e6767 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -8,9 +8,9 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/regmap.h>
+#include <linux/vexpress.h>
#include "pl111_versatile.h"
-#include "pl111_vexpress.h"
#include "pl111_drm.h"
static struct regmap *versatile_syscon_map;
@@ -361,13 +361,110 @@ static const struct pl111_variant_data pl111_vexpress = {
.broken_clockdivider = true,
};
+#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
+#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
+
+static int pl111_vexpress_clcd_init(struct device *dev, struct device_node *np,
+ struct pl111_drm_dev_private *priv)
+{
+ struct platform_device *pdev;
+ struct device_node *root;
+ struct device_node *child;
+ struct device_node *ct_clcd = NULL;
+ struct regmap *map;
+ bool has_coretile_clcd = false;
+ bool has_coretile_hdlcd = false;
+ bool mux_motherboard = true;
+ u32 val;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_VEXPRESS_CONFIG))
+ return -ENODEV;
+
+ /*
+ * Check if we have a CLCD or HDLCD on the core tile by checking if a
+ * CLCD or HDLCD is available in the root of the device tree.
+ */
+ root = of_find_node_by_path("/");
+ if (!root)
+ return -EINVAL;
+
+ for_each_available_child_of_node(root, child) {
+ if (of_device_is_compatible(child, "arm,pl111")) {
+ has_coretile_clcd = true;
+ ct_clcd = child;
+ break;
+ }
+ if (of_device_is_compatible(child, "arm,hdlcd")) {
+ has_coretile_hdlcd = true;
+ of_node_put(child);
+ break;
+ }
+ }
+
+ of_node_put(root);
+
+ /*
+ * If there is a coretile HDLCD and it has a driver,
+ * do not mux the CLCD on the motherboard to the DVI.
+ */
+ if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
+ mux_motherboard = false;
+
+ /*
+ * On the Vexpress CA9 we let the CLCD on the coretile
+ * take precedence, so also in this case do not mux the
+ * motherboard to the DVI.
+ */
+ if (has_coretile_clcd)
+ mux_motherboard = false;
+
+ if (mux_motherboard) {
+ dev_info(dev, "DVI muxed to motherboard CLCD\n");
+ val = VEXPRESS_FPGAMUX_MOTHERBOARD;
+ } else if (ct_clcd == dev->of_node) {
+ dev_info(dev,
+ "DVI muxed to daughterboard 1 (core tile) CLCD\n");
+ val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
+ } else {
+ dev_info(dev, "core tile graphics present\n");
+ dev_info(dev, "this device will be deactivated\n");
+ return -ENODEV;
+ }
+
+ /* Call into deep Vexpress configuration API */
+ pdev = of_find_device_by_node(np);
+ if (!pdev) {
+ dev_err(dev, "can't find the sysreg device, deferring\n");
+ return -EPROBE_DEFER;
+ }
+
+ map = devm_regmap_init_vexpress_config(&pdev->dev);
+ if (IS_ERR(map)) {
+ platform_device_put(pdev);
+ return PTR_ERR(map);
+ }
+
+ ret = regmap_write(map, 0, val);
+ platform_device_put(pdev);
+ if (ret) {
+ dev_err(dev, "error setting DVI muxmode\n");
+ return -ENODEV;
+ }
+
+ priv->variant = &pl111_vexpress;
+ dev_info(dev, "initializing Versatile Express PL111\n");
+
+ return 0;
+}
+
int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
{
const struct of_device_id *clcd_id;
enum versatile_clcd versatile_clcd_type;
struct device_node *np;
struct regmap *map;
- int ret;
np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match,
&clcd_id);
@@ -378,6 +475,15 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /* Versatile Express special handling */
+ if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
+ int ret = pl111_vexpress_clcd_init(dev, np, priv);
+ of_node_put(np);
+ if (ret)
+ dev_err(dev, "Versatile Express init failed - %d", ret);
+ return ret;
+ }
+
/*
* On the Integrator, check if we should use the IM-PD1 instead,
* if we find it, it will take precedence. This is on the Integrator/AP
@@ -390,37 +496,8 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
}
- /* Versatile Express special handling */
- if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
- struct platform_device *pdev;
-
- /* Registers a driver for the muxfpga */
- ret = vexpress_muxfpga_init();
- if (ret) {
- dev_err(dev, "unable to initialize muxfpga driver\n");
- of_node_put(np);
- return ret;
- }
-
- /* Call into deep Vexpress configuration API */
- pdev = of_find_device_by_node(np);
- if (!pdev) {
- dev_err(dev, "can't find the sysreg device, deferring\n");
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- map = dev_get_drvdata(&pdev->dev);
- if (!map) {
- dev_err(dev, "sysreg has not yet probed\n");
- platform_device_put(pdev);
- of_node_put(np);
- return -EPROBE_DEFER;
- }
- } else {
- map = syscon_node_to_regmap(np);
- }
+ map = syscon_node_to_regmap(np);
of_node_put(np);
-
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
return PTR_ERR(map);
@@ -466,13 +543,6 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_disable = pl111_realview_clcd_disable;
dev_info(dev, "set up callbacks for RealView PL111\n");
break;
- case VEXPRESS_CLCD_V2M:
- priv->variant = &pl111_vexpress;
- dev_info(dev, "initializing Versatile Express PL111\n");
- ret = pl111_vexpress_clcd_init(dev, priv, map);
- if (ret)
- return ret;
- break;
default:
dev_info(dev, "unknown Versatile system controller\n");
break;
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
deleted file mode 100644
index 350570fe06b5..000000000000
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ /dev/null
@@ -1,138 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Versatile Express PL111 handling
- * Copyright (C) 2018 Linus Walleij
- *
- * This module binds to the "arm,vexpress-muxfpga" device on the
- * Versatile Express configuration bus and sets up which CLCD instance
- * gets muxed out on the DVI bridge.
- */
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/regmap.h>
-#include <linux/vexpress.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include "pl111_drm.h"
-#include "pl111_vexpress.h"
-
-#define VEXPRESS_FPGAMUX_MOTHERBOARD 0x00
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_1 0x01
-#define VEXPRESS_FPGAMUX_DAUGHTERBOARD_2 0x02
-
-int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map)
-{
- struct device_node *root;
- struct device_node *child;
- struct device_node *ct_clcd = NULL;
- bool has_coretile_clcd = false;
- bool has_coretile_hdlcd = false;
- bool mux_motherboard = true;
- u32 val;
- int ret;
-
- /*
- * Check if we have a CLCD or HDLCD on the core tile by checking if a
- * CLCD or HDLCD is available in the root of the device tree.
- */
- root = of_find_node_by_path("/");
- if (!root)
- return -EINVAL;
-
- for_each_available_child_of_node(root, child) {
- if (of_device_is_compatible(child, "arm,pl111")) {
- has_coretile_clcd = true;
- ct_clcd = child;
- break;
- }
- if (of_device_is_compatible(child, "arm,hdlcd")) {
- has_coretile_hdlcd = true;
- of_node_put(child);
- break;
- }
- }
-
- of_node_put(root);
-
- /*
- * If there is a coretile HDLCD and it has a driver,
- * do not mux the CLCD on the motherboard to the DVI.
- */
- if (has_coretile_hdlcd && IS_ENABLED(CONFIG_DRM_HDLCD))
- mux_motherboard = false;
-
- /*
- * On the Vexpress CA9 we let the CLCD on the coretile
- * take precedence, so also in this case do not mux the
- * motherboard to the DVI.
- */
- if (has_coretile_clcd)
- mux_motherboard = false;
-
- if (mux_motherboard) {
- dev_info(dev, "DVI muxed to motherboard CLCD\n");
- val = VEXPRESS_FPGAMUX_MOTHERBOARD;
- } else if (ct_clcd == dev->of_node) {
- dev_info(dev,
- "DVI muxed to daughterboard 1 (core tile) CLCD\n");
- val = VEXPRESS_FPGAMUX_DAUGHTERBOARD_1;
- } else {
- dev_info(dev, "core tile graphics present\n");
- dev_info(dev, "this device will be deactivated\n");
- return -ENODEV;
- }
-
- ret = regmap_write(map, 0, val);
- if (ret) {
- dev_err(dev, "error setting DVI muxmode\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-/*
- * This sets up the regmap pointer that will then be retrieved by
- * the detection code in pl111_versatile.c and passed in to the
- * pl111_vexpress_clcd_init() function above.
- */
-static int vexpress_muxfpga_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct regmap *map;
-
- map = devm_regmap_init_vexpress_config(&pdev->dev);
- if (IS_ERR(map))
- return PTR_ERR(map);
- dev_set_drvdata(dev, map);
-
- return 0;
-}
-
-static const struct of_device_id vexpress_muxfpga_match[] = {
- { .compatible = "arm,vexpress-muxfpga", },
- {}
-};
-
-static struct platform_driver vexpress_muxfpga_driver = {
- .driver = {
- .name = "vexpress-muxfpga",
- .of_match_table = of_match_ptr(vexpress_muxfpga_match),
- },
- .probe = vexpress_muxfpga_probe,
-};
-
-int vexpress_muxfpga_init(void)
-{
- int ret;
-
- ret = platform_driver_register(&vexpress_muxfpga_driver);
- /* -EBUSY just means this driver is already registered */
- if (ret == -EBUSY)
- ret = 0;
- return ret;
-}
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.h b/drivers/gpu/drm/pl111/pl111_vexpress.h
deleted file mode 100644
index 5d3681bb4c00..000000000000
--- a/drivers/gpu/drm/pl111/pl111_vexpress.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-
-struct device;
-struct pl111_drm_dev_private;
-struct regmap;
-
-#ifdef CONFIG_ARCH_VEXPRESS
-
-int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map);
-
-int vexpress_muxfpga_init(void);
-
-#else
-
-static inline int pl111_vexpress_clcd_init(struct device *dev,
- struct pl111_drm_dev_private *priv,
- struct regmap *map)
-{
- return -ENODEV;
-}
-
-static inline int vexpress_muxfpga_init(void)
-{
- return 0;
-}
-
-#endif
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index d1086b2a6892..798f9dd7ad75 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -377,7 +377,7 @@ void qxl_io_destroy_primary(struct qxl_device *qdev)
{
wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC);
qdev->primary_bo->is_primary = false;
- drm_gem_object_put_unlocked(&qdev->primary_bo->tbo.base);
+ drm_gem_object_put(&qdev->primary_bo->tbo.base);
qdev->primary_bo = NULL;
}
@@ -480,9 +480,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
return ret;
ret = qxl_release_reserve_list(release, true);
- if (ret)
+ if (ret) {
+ qxl_release_free(qdev, release);
return ret;
-
+ }
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@@ -499,8 +500,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -542,9 +543,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
index a4f4175bbdbe..524d35b648d8 100644
--- a/drivers/gpu/drm/qxl/qxl_debugfs.c
+++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
@@ -39,7 +39,7 @@ static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(node->minor->dev);
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
@@ -53,7 +53,7 @@ static int
qxl_debugfs_buffers_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct qxl_device *qdev = node->minor->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(node->minor->dev);
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
@@ -79,36 +79,29 @@ static struct drm_info_list qxl_debugfs_list[] = {
#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
#endif
-int
+void
qxl_debugfs_init(struct drm_minor *minor)
{
#if defined(CONFIG_DEBUG_FS)
- int r;
- struct qxl_device *dev =
- (struct qxl_device *) minor->dev->dev_private;
+ struct qxl_device *dev = to_qxl(minor->dev);
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
- r = qxl_ttm_debugfs_init(dev);
- if (r) {
- DRM_ERROR("Failed to init TTM debugfs\n");
- return r;
- }
+ qxl_ttm_debugfs_init(dev);
#endif
- return 0;
}
-int qxl_debugfs_add_files(struct qxl_device *qdev,
- struct drm_info_list *files,
- unsigned int nfiles)
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned int nfiles)
{
unsigned int i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
/* Already registered */
- return 0;
+ return;
}
}
@@ -116,7 +109,7 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
- return -EINVAL;
+ return;
}
qdev->debugfs[qdev->debugfs_count].files = files;
qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
@@ -126,5 +119,4 @@ int qxl_debugfs_add_files(struct qxl_device *qdev,
qdev->ddev.primary->debugfs_root,
qdev->ddev.primary);
#endif
- return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 09583a08e141..099dca48b0ff 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,7 @@ static int qxl_add_mode(struct drm_connector *connector,
bool preferred)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_display_mode *mode = NULL;
int rc;
@@ -242,7 +242,7 @@ static int qxl_add_mode(struct drm_connector *connector,
static int qxl_add_monitors_config_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
int h = output->index;
struct qxl_head *head;
@@ -310,7 +310,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
const char *reason)
{
struct drm_device *dev = crtc->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
struct qxl_head head;
int oldcount, i = qcrtc->index;
@@ -400,7 +400,7 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
unsigned int num_clips)
{
/* TODO: vmwgfx where this was cribbed from had locking. Why? */
- struct qxl_device *qdev = fb->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(fb->dev);
struct drm_clip_rect norect;
struct qxl_bo *qobj;
bool is_primary;
@@ -462,7 +462,7 @@ static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = {
static int qxl_primary_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo;
if (!state->crtc || !state->fb)
@@ -476,7 +476,7 @@ static int qxl_primary_atomic_check(struct drm_plane *plane,
static int qxl_primary_apply_cursor(struct drm_plane *plane)
{
struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_framebuffer *fb = plane->state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_cursor_cmd *cmd;
@@ -510,8 +510,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
return ret;
@@ -523,7 +523,7 @@ out_free_release:
static void qxl_primary_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
struct qxl_bo *primary;
struct drm_clip_rect norect = {
@@ -554,7 +554,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
static void qxl_primary_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
if (old_state->fb) {
struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]);
@@ -570,7 +570,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *dev = plane->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_framebuffer *fb = plane->state->fb;
struct qxl_crtc *qcrtc = to_qxl_crtc(plane->state->crtc);
struct qxl_release *release;
@@ -652,8 +652,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
if (old_cursor_bo != NULL)
qxl_bo_unpin(old_cursor_bo);
@@ -679,7 +679,7 @@ out_free_release:
static void qxl_cursor_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct qxl_release *release;
struct qxl_cursor_cmd *cmd;
int ret;
@@ -700,8 +700,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
}
static void qxl_update_dumb_head(struct qxl_device *qdev,
@@ -762,7 +762,7 @@ static void qxl_calc_dumb_shadow(struct qxl_device *qdev,
static int qxl_plane_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
- struct qxl_device *qdev = plane->dev->dev_private;
+ struct qxl_device *qdev = to_qxl(plane->dev);
struct drm_gem_object *obj;
struct qxl_bo *user_bo;
struct qxl_surface surf;
@@ -783,7 +783,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
qdev->dumb_shadow_bo->surf.width != surf.width ||
qdev->dumb_shadow_bo->surf.height != surf.height) {
if (qdev->dumb_shadow_bo) {
- drm_gem_object_put_unlocked
+ drm_gem_object_put
(&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
}
@@ -793,7 +793,7 @@ static int qxl_plane_prepare_fb(struct drm_plane *plane,
}
if (user_bo->shadow != qdev->dumb_shadow_bo) {
if (user_bo->shadow) {
- drm_gem_object_put_unlocked
+ drm_gem_object_put
(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
@@ -828,7 +828,7 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
qxl_bo_unpin(user_bo);
if (old_state->fb != plane->state->fb && user_bo->shadow) {
- drm_gem_object_put_unlocked(&user_bo->shadow->tbo.base);
+ drm_gem_object_put(&user_bo->shadow->tbo.base);
user_bo->shadow = NULL;
}
}
@@ -923,7 +923,7 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
{
struct qxl_crtc *qxl_crtc;
struct drm_plane *primary, *cursor;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
int r;
qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL);
@@ -965,7 +965,7 @@ free_mem:
static int qxl_conn_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *output = drm_connector_to_qxl_output(connector);
unsigned int pwidth = 1024;
unsigned int pheight = 768;
@@ -991,7 +991,7 @@ static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *ddev = connector->dev;
- struct qxl_device *qdev = ddev->dev_private;
+ struct qxl_device *qdev = to_qxl(ddev);
if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0)
return MODE_BAD;
@@ -1021,7 +1021,7 @@ static enum drm_connector_status qxl_conn_detect(
struct qxl_output *output =
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
- struct qxl_device *qdev = ddev->dev_private;
+ struct qxl_device *qdev = to_qxl(ddev);
bool connected = false;
/* The first monitor is always connected */
@@ -1071,7 +1071,7 @@ static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
static int qdev_output_init(struct drm_device *dev, int num_output)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 5bebf1ea1c5d..3599db096973 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -209,9 +209,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
goto out_release_backoff;
rects = drawable_set_clipping(qdev, num_clips, clips_bo);
- if (!rects)
+ if (!rects) {
+ ret = -EINVAL;
goto out_release_backoff;
-
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@@ -242,8 +243,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
}
qxl_bo_kunmap(clips_bo);
- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
out_release_backoff:
if (ret)
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 4fda3f9b29f4..13872b882775 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -81,13 +81,16 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -EINVAL; /* TODO: ENODEV ? */
}
- qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
- if (!qdev)
+ qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver,
+ struct qxl_device, ddev);
+ if (IS_ERR(qdev)) {
+ pr_err("Unable to init drm dev");
return -ENOMEM;
+ }
ret = pci_enable_device(pdev);
if (ret)
- goto free_dev;
+ return ret;
ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
@@ -101,7 +104,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- ret = qxl_device_init(qdev, &qxl_driver, pdev);
+ ret = qxl_device_init(qdev, pdev);
if (ret)
goto put_vga;
@@ -128,14 +131,13 @@ put_vga:
vga_put(pdev, VGA_RSRC_LEGACY_IO);
disable_pci:
pci_disable_device(pdev);
-free_dev:
- kfree(qdev);
+
return ret;
}
static void qxl_drm_release(struct drm_device *dev)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
/*
* TODO: qxl_device_fini() call should be in qxl_pci_remove(),
@@ -144,8 +146,6 @@ static void qxl_drm_release(struct drm_device *dev)
*/
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
- dev->dev_private = NULL;
- kfree(qdev);
}
static void
@@ -157,7 +157,6 @@ qxl_pci_remove(struct pci_dev *pdev)
drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
- drm_dev_put(dev);
}
DEFINE_DRM_GEM_FOPS(qxl_fops);
@@ -165,7 +164,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops);
static int qxl_drm_freeze(struct drm_device *dev)
{
struct pci_dev *pdev = dev->pdev;
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
int ret;
ret = drm_mode_config_helper_suspend(dev);
@@ -187,7 +186,7 @@ static int qxl_drm_freeze(struct drm_device *dev)
static int qxl_drm_resume(struct drm_device *dev, bool thaw)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (!thaw) {
@@ -246,7 +245,7 @@ static int qxl_pm_restore(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = drm_dev->dev_private;
+ struct qxl_device *qdev = to_qxl(drm_dev);
qxl_io_reset(qdev);
return qxl_drm_resume(drm_dev, false);
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 27e45a2d6b52..31e35f787df2 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -190,13 +190,8 @@ struct qxl_debugfs {
unsigned int num_files;
};
-int qxl_debugfs_add_files(struct qxl_device *rdev,
- struct drm_info_list *files,
- unsigned int nfiles);
int qxl_debugfs_fence_init(struct qxl_device *rdev);
-struct qxl_device;
-
struct qxl_device {
struct drm_device ddev;
@@ -276,11 +271,12 @@ struct qxl_device {
int monitors_config_height;
};
+#define to_qxl(dev) container_of(dev, struct qxl_device, ddev)
+
extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
-int qxl_device_init(struct qxl_device *qdev, struct drm_driver *drv,
- struct pci_dev *pdev);
+int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev);
void qxl_device_fini(struct qxl_device *qdev);
int qxl_modeset_init(struct qxl_device *qdev);
@@ -442,8 +438,8 @@ int qxl_garbage_collect(struct qxl_device *qdev);
/* debugfs */
-int qxl_debugfs_init(struct drm_minor *minor);
-int qxl_ttm_debugfs_init(struct qxl_device *qdev);
+void qxl_debugfs_init(struct drm_minor *minor);
+void qxl_ttm_debugfs_init(struct qxl_device *qdev);
/* qxl_prime.c */
int qxl_gem_prime_pin(struct drm_gem_object *obj);
@@ -461,9 +457,9 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
int qxl_irq_init(struct qxl_device *qdev);
irqreturn_t qxl_irq_handler(int irq, void *arg);
-int qxl_debugfs_add_files(struct qxl_device *qdev,
- struct drm_info_list *files,
- unsigned int nfiles);
+void qxl_debugfs_add_files(struct qxl_device *qdev,
+ struct drm_info_list *files,
+ unsigned int nfiles);
int qxl_surface_id_alloc(struct qxl_device *qdev,
struct qxl_bo *surf);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 272d19b677d8..c04cd5a2553c 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -32,7 +32,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct qxl_bo *qobj;
uint32_t handle;
int r;
@@ -83,6 +83,6 @@ int qxl_mode_dumb_mmap(struct drm_file *file_priv,
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
*offset_p = qxl_bo_mmap_offset(qobj);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index 69f37db1027a..48e096285b4c 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -34,7 +34,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
struct qxl_device *qdev;
struct ttm_buffer_object *tbo;
- qdev = (struct qxl_device *)gobj->dev->dev_private;
+ qdev = to_qxl(gobj->dev);
qxl_surface_evict(qdev, qobj, false);
@@ -97,7 +97,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
return r;
/* drop reference from allocate - handle holds it now */
*qobj = gem_to_qxl_bo(gobj);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index 43688ecdd8a0..60ab7151b84d 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
break;
default:
DRM_ERROR("unsupported image bit depth\n");
- return -EINVAL; /* TODO: cleanup */
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
+ return -EINVAL;
}
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width;
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 8117a45b3610..8f605d5cc149 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -36,7 +36,7 @@
static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
struct qxl_bo *qobj;
@@ -64,7 +64,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data,
static int qxl_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_map *qxl_map = data;
return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
@@ -125,7 +125,7 @@ static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle,
qobj = gem_to_qxl_bo(gobj);
ret = qxl_release_list_add(release, qobj);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (ret)
return ret;
@@ -261,11 +261,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
apply_surf_reloc(qdev, &reloc_info[i]);
}
+ qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
- if (ret)
- qxl_release_backoff_reserve_list(release);
- else
- qxl_release_fence_buffer_objects(release);
out_free_bos:
out_free_release:
@@ -279,7 +276,7 @@ out_free_reloc:
static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_execbuffer *execbuffer = data;
struct drm_qxl_command user_cmd;
int cmd_num;
@@ -304,7 +301,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_update_area *update_area = data;
struct qxl_rect area = {.left = update_area->left,
.top = update_area->top,
@@ -347,14 +344,14 @@ out2:
qxl_bo_unreserve(qobj);
out:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return ret;
}
static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_getparam *param = data;
switch (param->param) {
@@ -373,7 +370,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_clientcap *param = data;
int byte, idx;
@@ -394,7 +391,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
- struct qxl_device *qdev = dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_alloc_surf *param = data;
struct qxl_bo *qobj;
int handle;
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 8435af108632..1ba5a702d763 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -32,7 +32,7 @@
irqreturn_t qxl_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
- struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
+ struct qxl_device *qdev = to_qxl(dev);
uint32_t pending;
pending = xchg(&qdev->ram_header->int_pending, 0);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 70b20ee4741a..a6d873052cd4 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <drm/drm_drv.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "qxl_drv.h"
@@ -107,20 +108,12 @@ static void qxl_gc_work(struct work_struct *work)
}
int qxl_device_init(struct qxl_device *qdev,
- struct drm_driver *drv,
struct pci_dev *pdev)
{
int r, sb;
- r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
- if (r) {
- pr_err("Unable to init drm dev");
- goto error;
- }
-
qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
- qdev->ddev.dev_private = qdev;
mutex_init(&qdev->gem.mutex);
mutex_init(&qdev->update_area_mutex);
@@ -136,8 +129,7 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
if (!qdev->vram_mapping) {
pr_err("Unable to create vram_mapping");
- r = -ENOMEM;
- goto error;
+ return -ENOMEM;
}
if (pci_resource_len(pdev, 4) > 0) {
@@ -218,7 +210,7 @@ int qxl_device_init(struct qxl_device *qdev,
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
- qdev->io_base + QXL_IO_NOTIFY_CMD,
+ qdev->io_base + QXL_IO_NOTIFY_CURSOR,
false,
&qdev->cursor_event);
@@ -291,7 +283,6 @@ surface_mapping_free:
io_mapping_free(qdev->surface_mapping);
vram_mapping_free:
io_mapping_free(qdev->vram_mapping);
-error:
return r;
}
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index ab72dc3476e9..80e7a17aaddd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -33,7 +33,7 @@ static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
struct qxl_device *qdev;
bo = to_qxl_bo(tbo);
- qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private;
+ qdev = to_qxl(bo->tbo.base.dev);
qxl_surface_evict(qdev, bo, false);
WARN_ON_ONCE(bo->map_count > 0);
@@ -224,7 +224,7 @@ void qxl_bo_unref(struct qxl_bo **bo)
if ((*bo) == NULL)
return;
- drm_gem_object_put_unlocked(&(*bo)->tbo.base);
+ drm_gem_object_put(&(*bo)->tbo.base);
*bo = NULL;
}
@@ -326,7 +326,7 @@ void qxl_bo_force_delete(struct qxl_device *qdev)
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->tbo.base);
+ drm_gem_object_put(&bo->tbo.base);
}
}
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 2feca734c7b1..4fae3e393da1 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -243,7 +243,7 @@ static int qxl_release_validate_bo(struct qxl_bo *bo)
return ret;
/* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->tbo.base.dev->dev_private, bo);
+ ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo);
if (ret)
return ret;
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 62a5e424971b..f09a712b1ed2 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -243,7 +243,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
if (!qxl_ttm_bo_is_qxl_bo(bo))
return;
qbo = to_qxl_bo(bo);
- qdev = qbo->tbo.base.dev->dev_private;
+ qdev = to_qxl(qbo->tbo.base.dev);
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
@@ -322,7 +322,7 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
}
#endif
-int qxl_ttm_debugfs_init(struct qxl_device *qdev)
+void qxl_ttm_debugfs_init(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
@@ -343,8 +343,6 @@ int qxl_ttm_debugfs_init(struct qxl_device *qdev)
qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV].priv;
}
- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
-#else
- return 0;
+ qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
#endif
}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 9b4072f97215..3e76ae5a17ee 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -32,9 +32,10 @@
*/
#include <linux/export.h>
+#include <linux/pci.h>
#include <drm/drm_device.h>
-#include <drm/drm_pci.h>
+#include <drm/drm_legacy.h>
#include <drm/drm_print.h>
#include "ati_pcigart.h"
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c693b2ca0329..11c97edde54d 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -3,42 +3,13 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-ccflags-y := -Idrivers/gpu/drm/amd/include
-
hostprogs := mkregtable
-clean-files := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
+targets := rn50_reg_safe.h r100_reg_safe.h r200_reg_safe.h rv515_reg_safe.h r300_reg_safe.h r420_reg_safe.h rs600_reg_safe.h r600_reg_safe.h evergreen_reg_safe.h cayman_reg_safe.h
-quiet_cmd_mkregtable = MKREGTABLE $@
+quiet_cmd_mkregtable = MKREG $@
cmd_mkregtable = $(obj)/mkregtable $< > $@
-$(obj)/rn50_reg_safe.h: $(src)/reg_srcs/rn50 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r100_reg_safe.h: $(src)/reg_srcs/r100 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r200_reg_safe.h: $(src)/reg_srcs/r200 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/evergreen_reg_safe.h: $(src)/reg_srcs/evergreen $(obj)/mkregtable
- $(call if_changed,mkregtable)
-
-$(obj)/cayman_reg_safe.h: $(src)/reg_srcs/cayman $(obj)/mkregtable
+$(obj)/%_reg_safe.h: $(src)/reg_srcs/% $(obj)/mkregtable FORCE
$(call if_changed,mkregtable)
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 2c27627b6659..f15b20da5315 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1211,8 +1211,7 @@ static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32
SDEBUG("<<\n");
free:
- if (ws)
- kfree(ectx.ws);
+ kfree(ectx.ws);
return ret;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index a9257bed3484..134aa2b01f90 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -65,13 +65,6 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
{ 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
};
-static const struct ci_pt_defaults defaults_bonaire_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
- { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
- { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
-};
-
static const struct ci_pt_defaults defaults_saturn_xt =
{
1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
@@ -79,13 +72,6 @@ static const struct ci_pt_defaults defaults_saturn_xt =
{ 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
};
-static const struct ci_pt_defaults defaults_saturn_pro =
-{
- 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
- { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
- { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
-};
-
static const struct ci_pt_config_reg didt_config_ci[] =
{
{ 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 848ef68d9086..5d2591725189 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2111,7 +2111,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
ucOverdriveThermalController];
info.addr = power_info->info.ucOverdriveControllerAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
num_modes = power_info->info.ucNumOfPowerModeEntries;
@@ -2351,7 +2351,7 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
const char *name = pp_lib_thermal_controller_names[controller->ucType];
info.addr = controller->ucI2cAddress >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index c3e49c973812..d3c04df7e75d 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2704,7 +2704,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
const char *name = thermal_controller_names[thermal_controller];
info.addr = i2c_addr >> 1;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
} else {
@@ -2721,7 +2721,7 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
const char *name = "f75375";
info.addr = 0x28;
strlcpy(info.type, name, sizeof(info.type));
- i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
+ i2c_new_client_device(&rdev->pm.i2c_bus->adapter, &info);
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
name, info.addr);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0d0ab8e0ff3b..33ae1b883268 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -196,12 +196,12 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
&p->validated);
if (need_mmap_lock)
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
if (need_mmap_lock)
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
return r;
}
@@ -443,7 +443,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (bo == NULL)
continue;
- drm_gem_object_put_unlocked(&bo->tbo.base);
+ drm_gem_object_put(&bo->tbo.base);
}
}
kfree(parser->track);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 9180bb51b913..3507805b34bc 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -309,7 +309,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
robj = gem_to_radeon_bo(obj);
ret = radeon_bo_reserve(robj, false);
if (ret != 0) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
/* Only 27 bit offset for legacy cursor */
@@ -319,7 +319,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
radeon_bo_unreserve(robj);
if (ret) {
DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -354,7 +354,7 @@ unpin:
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- drm_gem_object_put_unlocked(radeon_crtc->cursor_bo);
+ drm_gem_object_put(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 35db79a168bf..6222bdb74825 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -281,7 +281,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
} else
DRM_ERROR("failed to reserve buffer after flip\n");
- drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
+ drm_gem_object_put(&work->old_rbo->tbo.base);
kfree(work);
}
@@ -613,7 +613,7 @@ pflip_cleanup:
radeon_bo_unreserve(new_rbo);
cleanup:
- drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
+ drm_gem_object_put(&work->old_rbo->tbo.base);
dma_fence_put(work->fence);
kfree(work);
return r;
@@ -1337,14 +1337,14 @@ radeon_user_framebuffer_create(struct drm_device *dev,
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (fb == NULL) {
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(-ENOMEM);
}
ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj);
if (ret) {
kfree(fb);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 59f8186a2415..bbb0883e8ce6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -36,6 +36,7 @@
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <linux/pci.h>
#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
@@ -44,7 +45,6 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
-#include <drm/drm_pci.h>
#include <drm/drm_pciids.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index cf3156a65fc1..fc4212633bdf 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -119,7 +119,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
@@ -298,7 +298,7 @@ out:
}
if (fb && ret) {
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb);
kfree(fb);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 068c3e5da173..44157ada9b0e 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -275,7 +275,7 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
@@ -342,24 +342,24 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
- down_read(&current->mm->mmap_sem);
+ mmap_read_lock(current->mm);
r = radeon_bo_reserve(bo, true);
if (r) {
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
goto release_object;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
- up_read(&current->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (r)
goto release_object;
}
r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (r)
goto handle_lockup;
@@ -368,7 +368,7 @@ int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
return 0;
release_object:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
handle_lockup:
up_read(&rdev->exclusive_lock);
@@ -402,7 +402,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
@@ -421,11 +421,11 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
}
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return -EPERM;
}
*offset_p = radeon_bo_mmap_offset(robj);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -460,7 +460,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
args->domain = radeon_mem_type_to_domain(cur_placement);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -492,7 +492,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
if (rdev->asic->mmio_hdp_flush &&
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
robj->rdev->asic->mmio_hdp_flush(rdev);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
@@ -511,7 +511,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -534,7 +534,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
out:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -668,14 +668,14 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
r = radeon_bo_reserve(rbo, false);
if (r) {
args->operation = RADEON_VA_RESULT_ERROR;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) {
args->operation = RADEON_VA_RESULT_ERROR;
radeon_bo_unreserve(rbo);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return -ENOENT;
}
@@ -702,7 +702,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
args->operation = RADEON_VA_RESULT_ERROR;
}
out:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -743,7 +743,7 @@ int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
radeon_bo_unreserve(robj);
out:
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return r;
}
@@ -769,7 +769,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
r = drm_gem_handle_create(file_priv, gobj, &handle);
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 58176db85952..c5d1dc9618a4 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -158,7 +158,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
- dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -828,7 +828,7 @@ int radeon_enable_vblank_kms(struct drm_crtc *crtc)
unsigned long irqflags;
int r;
- if (pipe < 0 || pipe >= rdev->num_crtc) {
+ if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
@@ -854,7 +854,7 @@ void radeon_disable_vblank_kms(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (pipe < 0 || pipe >= rdev->num_crtc) {
+ if (pipe >= rdev->num_crtc) {
DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 140d94cc080d..f3dee01250da 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -448,7 +448,7 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->tbo.base);
+ drm_gem_object_put(&bo->tbo.base);
}
}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 2cb85dbe728f..a167e1c36d24 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -252,24 +252,6 @@ static const struct si_dte_data dte_data_tahiti =
false
};
-static const struct si_dte_data dte_data_tahiti_le =
-{
- { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
- { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
- 0x5,
- 0xAFC8,
- 0x64,
- 0x32,
- 1,
- 0,
- 0x10,
- { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
- { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
- { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
- 85,
- true
-};
-
static const struct si_dte_data dte_data_tahiti_pro =
{
{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 654e2dd08146..f53b0ec71085 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -476,16 +476,7 @@ DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
- .dumb_create = rcar_du_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(rcar_du_dumb_create),
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -530,7 +521,6 @@ static int rcar_du_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index c07c6a88aff0..b0335da0c161 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -13,6 +13,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
@@ -23,13 +24,6 @@
* Encoder
*/
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
{
struct device_node *ports;
@@ -110,13 +104,11 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
}
}
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(rcdu->ddev, encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret < 0)
goto done;
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
/*
* Attach the bridge to the encoder. The bridge will create the
* connector.
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fcfd916227d1..482329102f19 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -712,7 +712,9 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
unsigned int i;
int ret;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c6430027169f..a0021fc25b27 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -785,13 +785,15 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp)
drm_plane_create_alpha_property(&plane->plane);
- if (type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- drm_object_attach_property(&plane->plane.base,
- rcdu->props.colorkey,
- RCAR_DU_COLORKEY_NONE);
- drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(&plane->plane,
+ 0);
+ } else {
+ drm_object_attach_property(&plane->plane.base,
+ rcdu->props.colorkey,
+ RCAR_DU_COLORKEY_NONE);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1, 7);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 5e4faf258c31..f1a81c9b184d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -392,12 +392,14 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
drm_plane_helper_add(&plane->plane,
&rcar_du_vsp_plane_helper_funcs);
- if (type == DRM_PLANE_TYPE_PRIMARY)
- continue;
-
- drm_plane_create_alpha_property(&plane->plane);
- drm_plane_create_zpos_property(&plane->plane, 1, 1,
- vsp->num_planes - 1);
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ drm_plane_create_zpos_immutable_property(&plane->plane,
+ 0);
+ } else {
+ drm_plane_create_alpha_property(&plane->plane);
+ drm_plane_create_zpos_property(&plane->plane, 1, 1,
+ vsp->num_planes - 1);
+ }
}
return 0;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ce98c08aa8b4..ade2327a10e2 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -26,6 +26,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -258,10 +259,6 @@ static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = {
.atomic_check = rockchip_dp_drm_encoder_atomic_check,
};
-static struct drm_encoder_funcs rockchip_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rockchip_dp_of_probe(struct rockchip_dp_device *dp)
{
struct device *dev = dp->dev;
@@ -309,8 +306,8 @@ static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp)
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index eed594bd38d3..c634b95b50f7 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -20,6 +20,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "cdn-dp-core.h"
#include "cdn-dp-reg.h"
@@ -689,10 +690,6 @@ static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = {
.atomic_check = cdn_dp_encoder_atomic_check,
};
-static const struct drm_encoder_funcs cdn_dp_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
{
struct device *dev = dp->dev;
@@ -1030,8 +1027,8 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data)
dev->of_node);
DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
- ret = drm_encoder_init(drm_dev, encoder, &cdn_dp_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
DRM_ERROR("failed to initialize encoder with drm\n");
return ret;
@@ -1109,7 +1106,7 @@ static const struct component_ops cdn_dp_component_ops = {
.unbind = cdn_dp_unbind,
};
-int cdn_dp_suspend(struct device *dev)
+static int cdn_dp_suspend(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
int ret = 0;
@@ -1123,7 +1120,7 @@ int cdn_dp_suspend(struct device *dev)
return ret;
}
-int cdn_dp_resume(struct device *dev)
+static int cdn_dp_resume(struct device *dev)
{
struct cdn_dp_device *dp = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 7361c07cb4a7..9d2163ef4d6e 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -601,7 +601,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
case YCBCR_4_2_0:
val[0] = 5;
break;
- };
+ }
switch (video->color_depth) {
case 6:
@@ -619,7 +619,7 @@ static int cdn_dp_get_msa_misc(struct video_info *video,
case 16:
val[1] = 4;
break;
- };
+ }
msa_misc = 2 * val[0] + 32 * val[1] +
((video->color_fmt == Y_ONLY) ? (1 << 14) : 0);
@@ -700,7 +700,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp)
case 16:
val = BCS_16;
break;
- };
+ }
val += video->color_fmt << 8;
ret = cdn_dp_reg_write(dp, DP_FRAMER_PXL_REPR, val);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index 6e1270e45f97..3feff0c45b3f 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -21,6 +21,7 @@
#include <drm/bridge/dw_mipi_dsi.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -789,10 +790,6 @@ dw_mipi_dsi_encoder_helper_funcs = {
.disable = dw_mipi_dsi_encoder_disable,
};
-static const struct drm_encoder_funcs dw_mipi_dsi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
struct drm_device *drm_dev)
{
@@ -802,8 +799,7 @@ static int rockchip_dsi_drm_create_encoder(struct dw_mipi_dsi_rockchip *dsi,
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dsi->dev->of_node);
- ret = drm_encoder_init(drm_dev, encoder, &dw_mipi_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_DSI);
if (ret) {
DRM_ERROR("Failed to initialize encoder with drm\n");
return ret;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 7f56d8c3491d..121aa8a63a76 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -14,6 +14,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -237,10 +238,6 @@ dw_hdmi_rockchip_mode_valid(struct drm_connector *connector,
return (valid) ? MODE_OK : MODE_BAD;
}
-static const struct drm_encoder_funcs dw_hdmi_rockchip_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void dw_hdmi_rockchip_encoder_disable(struct drm_encoder *encoder)
{
}
@@ -546,8 +543,7 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &dw_hdmi_rockchip_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
platform_set_drvdata(pdev, hdmi);
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index e5864e823020..7afdc54eb3ec 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -19,6 +19,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -532,10 +533,6 @@ static struct drm_encoder_helper_funcs inno_hdmi_encoder_helper_funcs = {
.atomic_check = inno_hdmi_encoder_atomic_check,
};
-static struct drm_encoder_funcs inno_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_connector_status
inno_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -617,8 +614,7 @@ static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &inno_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &inno_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
index fe203d38664e..1c546c3a8998 100644
--- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -6,6 +6,7 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/mfd/syscon.h>
@@ -451,10 +452,6 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
.atomic_check = rk3066_hdmi_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_connector_status
rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -557,8 +554,7 @@ rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
return -EPROBE_DEFER;
drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index 20ecb1508a22..0f3eb392fe39 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -135,14 +135,16 @@ static int rockchip_drm_bind(struct device *dev)
if (ret)
goto err_free;
- drm_mode_config_init(drm_dev);
+ ret = drmm_mode_config_init(drm_dev);
+ if (ret)
+ goto err_iommu_cleanup;
rockchip_drm_mode_config_init(drm_dev);
/* Try to bind all sub drivers. */
ret = component_bind_all(dev, drm_dev);
if (ret)
- goto err_mode_config_cleanup;
+ goto err_iommu_cleanup;
ret = drm_vblank_init(drm_dev, drm_dev->mode_config.num_crtc);
if (ret)
@@ -173,12 +175,9 @@ err_kms_helper_poll_fini:
rockchip_drm_fbdev_fini(drm_dev);
err_unbind_all:
component_unbind_all(dev, drm_dev);
-err_mode_config_cleanup:
- drm_mode_config_cleanup(drm_dev);
+err_iommu_cleanup:
rockchip_iommu_cleanup(drm_dev);
err_free:
- drm_dev->dev_private = NULL;
- dev_set_drvdata(dev, NULL);
drm_dev_put(drm_dev);
return ret;
}
@@ -194,11 +193,8 @@ static void rockchip_drm_unbind(struct device *dev)
drm_atomic_helper_shutdown(drm_dev);
component_unbind_all(dev, drm_dev);
- drm_mode_config_cleanup(drm_dev);
rockchip_iommu_cleanup(drm_dev);
- drm_dev->dev_private = NULL;
- dev_set_drvdata(dev, NULL);
drm_dev_put(drm_dev);
}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index c5b06048124e..e33c2dcd0d4b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -30,6 +30,7 @@ struct rockchip_crtc_state {
int output_mode;
int output_bpc;
int output_flags;
+ bool enable_afbc;
};
#define to_rockchip_crtc_state(s) \
container_of(s, struct rockchip_crtc_state, base)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 221e72e71432..3aa37e177667 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -57,8 +57,49 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers =
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
+static struct drm_framebuffer *
+rockchip_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_afbc_framebuffer *afbc_fb;
+ const struct drm_format_info *info;
+ int ret;
+
+ info = drm_get_format_info(dev, mode_cmd);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL);
+ if (!afbc_fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd,
+ &rockchip_drm_fb_funcs);
+ if (ret) {
+ kfree(afbc_fb);
+ return ERR_PTR(ret);
+ }
+
+ if (drm_is_afbc(mode_cmd->modifier[0])) {
+ int ret, i;
+
+ ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb);
+ if (ret) {
+ struct drm_gem_object **obj = afbc_fb->base.obj;
+
+ for (i = 0; i < info->num_planes; ++i)
+ drm_gem_object_put(obj[i]);
+
+ kfree(afbc_fb);
+ return ERR_PTR(ret);
+ }
+ }
+
+ return &afbc_fb->base;
+}
+
static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
+ .fb_create = rockchip_fb_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 0d1884684dcb..b9275ba7c5a5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -392,7 +392,7 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv,
goto err_handle_create;
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return rk_obj;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index cecb2cc781f5..c80f7d9fd13f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -91,9 +91,22 @@
#define VOP_WIN_TO_INDEX(vop_win) \
((vop_win) - (vop_win)->vop->win)
+#define VOP_AFBC_SET(vop, name, v) \
+ do { \
+ if ((vop)->data->afbc) \
+ vop_reg_set((vop), &(vop)->data->afbc->name, \
+ 0, ~0, v, #name); \
+ } while (0)
+
#define to_vop(x) container_of(x, struct vop, crtc)
#define to_vop_win(x) container_of(x, struct vop_win, base)
+#define AFBC_FMT_RGB565 0x0
+#define AFBC_FMT_U8U8U8U8 0x5
+#define AFBC_FMT_U8U8U8 0x4
+
+#define AFBC_TILE_16x16 BIT(4)
+
/*
* The coefficients of the following matrix are all fixed points.
* The format is S2.10 for the 3x3 part of the matrix, and S9.12 for the offsets.
@@ -274,6 +287,29 @@ static enum vop_data_format vop_convert_format(uint32_t format)
}
}
+static int vop_convert_afbc_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return AFBC_FMT_U8U8U8U8;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ return AFBC_FMT_U8U8U8;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ return AFBC_FMT_RGB565;
+ /* either of the below should not be reachable */
+ default:
+ DRM_WARN_ONCE("unsupported AFBC format[%08x]\n", format);
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
static uint16_t scl_vop_cal_scale(enum scale_mode mode, uint32_t src,
uint32_t dst, bool is_horizontal,
int vsu_mode, int *vskiplines)
@@ -598,10 +634,21 @@ static int vop_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state)
vop_win_disable(vop, vop_win);
}
}
- spin_unlock(&vop->reg_lock);
+
+ if (vop->data->afbc) {
+ struct rockchip_crtc_state *s;
+ /*
+ * Disable AFBC and forget there was a vop window with AFBC
+ */
+ VOP_AFBC_SET(vop, enable, 0);
+ s = to_rockchip_crtc_state(crtc->state);
+ s->enable_afbc = false;
+ }
vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
/*
* At here, vop clock & iommu is enable, R/W vop regs would be safe.
*/
@@ -710,6 +757,26 @@ static void vop_plane_destroy(struct drm_plane *plane)
drm_plane_cleanup(plane);
}
+static inline bool rockchip_afbc(u64 modifier)
+{
+ return modifier == ROCKCHIP_AFBC_MOD;
+}
+
+static bool rockchip_mod_supported(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ if (modifier == DRM_FORMAT_MOD_LINEAR)
+ return true;
+
+ if (!rockchip_afbc(modifier)) {
+ DRM_DEBUG_KMS("Unsupported format modifier 0x%llx\n", modifier);
+
+ return false;
+ }
+
+ return vop_convert_afbc_format(format) >= 0;
+}
+
static int vop_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -758,6 +825,30 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (rockchip_afbc(fb->modifier)) {
+ struct vop *vop = to_vop(crtc);
+
+ if (!vop->data->afbc) {
+ DRM_ERROR("vop does not support AFBC\n");
+ return -EINVAL;
+ }
+
+ ret = vop_convert_afbc_format(fb->format->format);
+ if (ret < 0)
+ return ret;
+
+ if (state->src.x1 || state->src.y1) {
+ DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", state->src.x1, state->src.y1, fb->offsets[0]);
+ return -EINVAL;
+ }
+
+ if (state->rotation && state->rotation != DRM_MODE_ROTATE_0) {
+ DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
+ state->rotation);
+ return -EINVAL;
+ }
+ }
+
return 0;
}
@@ -846,6 +937,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock);
+ if (rockchip_afbc(fb->modifier)) {
+ int afbc_format = vop_convert_afbc_format(fb->format->format);
+
+ VOP_AFBC_SET(vop, format, afbc_format | AFBC_TILE_16x16);
+ VOP_AFBC_SET(vop, hreg_block_split, 0);
+ VOP_AFBC_SET(vop, win_sel, VOP_WIN_TO_INDEX(vop_win));
+ VOP_AFBC_SET(vop, hdr_ptr, dma_addr);
+ VOP_AFBC_SET(vop, pic_size, act_info);
+ }
+
VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
@@ -906,6 +1007,10 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
SRC_FACTOR_M0(ALPHA_ONE);
VOP_WIN_SET(vop, win, src_alpha_ctl, val);
+
+ VOP_WIN_SET(vop, win, alpha_pre_mul, ALPHA_SRC_PRE_MUL);
+ VOP_WIN_SET(vop, win, alpha_mode, ALPHA_PER_PIX);
+ VOP_WIN_SET(vop, win, alpha_en, 1);
} else {
VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
}
@@ -1001,6 +1106,7 @@ static const struct drm_plane_funcs vop_plane_funcs = {
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .format_mod_supported = rockchip_mod_supported,
};
static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
@@ -1310,6 +1416,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state)
{
struct vop *vop = to_vop(crtc);
+ struct drm_plane *plane;
+ struct drm_plane_state *plane_state;
+ struct rockchip_crtc_state *s;
+ int afbc_planes = 0;
if (vop->lut_regs && crtc_state->color_mgmt_changed &&
crtc_state->gamma_lut) {
@@ -1323,6 +1433,27 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
}
}
+ drm_atomic_crtc_state_for_each_plane(plane, crtc_state) {
+ plane_state =
+ drm_atomic_get_plane_state(crtc_state->state, plane);
+ if (IS_ERR(plane_state)) {
+ DRM_DEBUG_KMS("Cannot get plane state for plane %s\n",
+ plane->name);
+ return PTR_ERR(plane_state);
+ }
+
+ if (drm_is_afbc(plane_state->fb->modifier))
+ ++afbc_planes;
+ }
+
+ if (afbc_planes > 1) {
+ DRM_DEBUG_KMS("Invalid number of AFBC planes; got %d, expected at most 1\n", afbc_planes);
+ return -EINVAL;
+ }
+
+ s = to_rockchip_crtc_state(crtc_state);
+ s->enable_afbc = afbc_planes > 0;
+
return 0;
}
@@ -1333,6 +1464,7 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_plane_state *old_plane_state, *new_plane_state;
struct vop *vop = to_vop(crtc);
struct drm_plane *plane;
+ struct rockchip_crtc_state *s;
int i;
if (WARN_ON(!vop->is_enabled))
@@ -1340,6 +1472,9 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
spin_lock(&vop->reg_lock);
+ /* Enable AFBC if there is some AFBC window, disable otherwise. */
+ s = to_rockchip_crtc_state(crtc->state);
+ VOP_AFBC_SET(vop, enable, s->enable_afbc);
vop_cfg_done(vop);
spin_unlock(&vop->reg_lock);
@@ -1634,7 +1769,8 @@ static int vop_create_crtc(struct vop *vop)
0, &vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- NULL, win_data->type, NULL);
+ win_data->phy->format_modifiers,
+ win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init plane %d\n",
ret);
@@ -1678,7 +1814,8 @@ static int vop_create_crtc(struct vop *vop)
&vop_plane_funcs,
win_data->phy->data_formats,
win_data->phy->nformats,
- NULL, win_data->type, NULL);
+ win_data->phy->format_modifiers,
+ win_data->type, NULL);
if (ret) {
DRM_DEV_ERROR(vop->dev, "failed to init overlay %d\n",
ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index cc672620d6e0..4a2099cb582e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -17,6 +17,11 @@
#define NUM_YUV2YUV_COEFFICIENTS 12
+#define ROCKCHIP_AFBC_MOD \
+ DRM_FORMAT_MOD_ARM_AFBC( \
+ AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 | AFBC_FORMAT_MOD_SPARSE \
+ )
+
enum vop_data_format {
VOP_FMT_ARGB8888 = 0,
VOP_FMT_RGB888,
@@ -34,6 +39,16 @@ struct vop_reg {
bool relaxed;
};
+struct vop_afbc {
+ struct vop_reg enable;
+ struct vop_reg win_sel;
+ struct vop_reg format;
+ struct vop_reg hreg_block_split;
+ struct vop_reg pic_size;
+ struct vop_reg hdr_ptr;
+ struct vop_reg rstn;
+};
+
struct vop_modeset {
struct vop_reg htotal_pw;
struct vop_reg hact_st_end;
@@ -134,6 +149,7 @@ struct vop_win_phy {
const struct vop_scl_regs *scl;
const uint32_t *data_formats;
uint32_t nformats;
+ const uint64_t *format_modifiers;
struct vop_reg enable;
struct vop_reg gate;
@@ -151,6 +167,9 @@ struct vop_win_phy {
struct vop_reg dst_alpha_ctl;
struct vop_reg src_alpha_ctl;
+ struct vop_reg alpha_pre_mul;
+ struct vop_reg alpha_mode;
+ struct vop_reg alpha_en;
struct vop_reg channel;
};
@@ -173,6 +192,7 @@ struct vop_data {
const struct vop_misc *misc;
const struct vop_modeset *modeset;
const struct vop_output *output;
+ const struct vop_afbc *afbc;
const struct vop_win_yuv2yuv_data *win_yuv2yuv;
const struct vop_win_data *win;
unsigned int win_size;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 449a62908d21..63f967902c2d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -16,13 +16,14 @@
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
-
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -435,10 +436,6 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
.atomic_check = rockchip_lvds_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rockchip_lvds_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
@@ -607,8 +604,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev,
dev->of_node);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_lvds_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index 90784781e515..9a771af5d0c9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -14,6 +14,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "rockchip_drm_drv.h"
#include "rockchip_drm_vop.h"
@@ -67,10 +68,6 @@ struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
.atomic_check = rockchip_rgb_encoder_atomic_check,
};
-static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
struct drm_crtc *crtc,
struct drm_device *drm_dev)
@@ -126,8 +123,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
encoder = &rgb->encoder;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(drm_dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret < 0) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to initialize encoder: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 7a9d979c8d5d..80053d91a301 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -50,6 +50,17 @@ static const uint32_t formats_win_full[] = {
DRM_FORMAT_NV24,
};
+static const uint64_t format_modifiers_win_full[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
+static const uint64_t format_modifiers_win_full_afbc[] = {
+ ROCKCHIP_AFBC_MOD,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
static const uint32_t formats_win_lite[] = {
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
@@ -61,6 +72,11 @@ static const uint32_t formats_win_lite[] = {
DRM_FORMAT_BGR565,
};
+static const uint64_t format_modifiers_win_lite[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID,
+};
+
static const struct vop_scl_regs rk3036_win_scl = {
.scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
.scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
@@ -72,6 +88,7 @@ static const struct vop_win_phy rk3036_win0_data = {
.scl = &rk3036_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 15),
@@ -87,6 +104,7 @@ static const struct vop_win_phy rk3036_win0_data = {
static const struct vop_win_phy rk3036_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -153,6 +171,7 @@ static const struct vop_data rk3036_vop = {
static const struct vop_win_phy rk3126_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3036_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3036_SYS_CTRL, 0x1, 19),
@@ -234,6 +253,7 @@ static const struct vop_win_phy px30_win0_data = {
.scl = &px30_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
@@ -244,11 +264,15 @@ static const struct vop_win_phy px30_win0_data = {
.uv_mst = VOP_REG(PX30_WIN0_CBR_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 0),
.uv_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 16),
+ .alpha_pre_mul = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 2),
+ .alpha_mode = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 1),
+ .alpha_en = VOP_REG(PX30_WIN0_ALPHA_CTRL, 0x1, 0),
};
static const struct vop_win_phy px30_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
@@ -256,11 +280,15 @@ static const struct vop_win_phy px30_win1_data = {
.dsp_st = VOP_REG(PX30_WIN1_DSP_ST, 0xffffffff, 0),
.yrgb_mst = VOP_REG(PX30_WIN1_MST, 0xffffffff, 0),
.yrgb_vir = VOP_REG(PX30_WIN1_VIR, 0x1fff, 0),
+ .alpha_pre_mul = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 2),
+ .alpha_mode = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 1),
+ .alpha_en = VOP_REG(PX30_WIN1_ALPHA_CTRL, 0x1, 0),
};
static const struct vop_win_phy px30_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
.enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
@@ -269,6 +297,9 @@ static const struct vop_win_phy px30_win2_data = {
.dsp_st = VOP_REG(PX30_WIN2_DSP_ST0, 0x1fff1fff, 0),
.yrgb_mst = VOP_REG(PX30_WIN2_MST0, 0xffffffff, 0),
.yrgb_vir = VOP_REG(PX30_WIN2_VIR0_1, 0x1fff, 0),
+ .alpha_pre_mul = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 2),
+ .alpha_mode = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 1),
+ .alpha_en = VOP_REG(PX30_WIN2_ALPHA_CTRL, 0x1, 0),
};
static const struct vop_win_data px30_vop_big_win_data[] = {
@@ -316,6 +347,7 @@ static const struct vop_win_phy rk3066_win0_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 0),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 4),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 19),
@@ -332,6 +364,7 @@ static const struct vop_win_phy rk3066_win1_data = {
.scl = &rk3066_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 1),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 7),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 23),
@@ -347,6 +380,7 @@ static const struct vop_win_phy rk3066_win1_data = {
static const struct vop_win_phy rk3066_win2_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3066_SYS_CTRL1, 0x1, 2),
.format = VOP_REG(RK3066_SYS_CTRL0, 0x7, 10),
.rb_swap = VOP_REG(RK3066_SYS_CTRL0, 0x1, 27),
@@ -426,6 +460,7 @@ static const struct vop_win_phy rk3188_win0_data = {
.scl = &rk3188_win_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
@@ -440,6 +475,7 @@ static const struct vop_win_phy rk3188_win0_data = {
static const struct vop_win_phy rk3188_win1_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
.format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
.rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
@@ -545,6 +581,7 @@ static const struct vop_win_phy rk3288_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
@@ -563,6 +600,7 @@ static const struct vop_win_phy rk3288_win01_data = {
static const struct vop_win_phy rk3288_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.enable = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 4),
.gate = VOP_REG(RK3288_WIN2_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN2_CTRL0, 0x7, 1),
@@ -677,6 +715,7 @@ static const struct vop_win_phy rk3368_win01_data = {
.scl = &rk3288_win_full_scl,
.data_formats = formats_win_full,
.nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full,
.enable = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3368_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3368_WIN0_CTRL0, 0x1, 12),
@@ -697,6 +736,7 @@ static const struct vop_win_phy rk3368_win01_data = {
static const struct vop_win_phy rk3368_win23_data = {
.data_formats = formats_win_lite,
.nformats = ARRAY_SIZE(formats_win_lite),
+ .format_modifiers = format_modifiers_win_lite,
.gate = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 0),
.enable = VOP_REG(RK3368_WIN2_CTRL0, 0x1, 4),
.format = VOP_REG(RK3368_WIN2_CTRL0, 0x3, 5),
@@ -817,6 +857,53 @@ static const struct vop_win_yuv2yuv_data rk3399_vop_big_win_yuv2yuv_data[] = {
.y2r_en = VOP_REG(RK3399_YUV2YUV_WIN, 0x1, 9) },
{ .base = 0xC0, .phy = &rk3399_yuv2yuv_win23_data },
{ .base = 0x120, .phy = &rk3399_yuv2yuv_win23_data },
+
+};
+
+static const struct vop_win_phy rk3399_win01_data = {
+ .scl = &rk3288_win_full_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .format_modifiers = format_modifiers_win_full_afbc,
+ .enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
+ .act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3288_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3288_WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3288_WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+/*
+ * rk3399 vop big windows register layout is same as rk3288, but we
+ * have a separate rk3399 win data array here so that we can advertise
+ * AFBC on the primary plane.
+ */
+static const struct vop_win_data rk3399_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3399_win01_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &rk3288_win01_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &rk3288_win23_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_afbc rk3399_vop_afbc = {
+ .rstn = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 3),
+ .enable = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 0),
+ .win_sel = VOP_REG(RK3399_AFBCD0_CTRL, 0x3, 1),
+ .format = VOP_REG(RK3399_AFBCD0_CTRL, 0x1f, 16),
+ .hreg_block_split = VOP_REG(RK3399_AFBCD0_CTRL, 0x1, 21),
+ .hdr_ptr = VOP_REG(RK3399_AFBCD0_HDR_PTR, 0xffffffff, 0),
+ .pic_size = VOP_REG(RK3399_AFBCD0_PIC_SIZE, 0xffffffff, 0),
};
static const struct vop_data rk3399_vop_big = {
@@ -826,9 +913,10 @@ static const struct vop_data rk3399_vop_big = {
.common = &rk3288_common,
.modeset = &rk3288_modeset,
.output = &rk3399_output,
+ .afbc = &rk3399_vop_afbc,
.misc = &rk3368_misc,
- .win = rk3368_vop_win_data,
- .win_size = ARRAY_SIZE(rk3368_vop_win_data),
+ .win = rk3399_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3399_vop_win_data),
.win_yuv2yuv = rk3399_vop_big_win_yuv2yuv_data,
};
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 8e731ed0d9d9..2f319102ae9f 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -676,7 +676,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
*/
if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
!cancel_delayed_work(&sched->work_tdr)) ||
- __kthread_should_park(sched->thread))
+ kthread_should_park())
return NULL;
spin_lock(&sched->job_list_lock);
diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index 6b943ea1c57d..8c87c964176b 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -14,6 +14,7 @@ selftest(insert, igt_insert)
selftest(replace, igt_replace)
selftest(insert_range, igt_insert_range)
selftest(align, igt_align)
+selftest(frag, igt_frag)
selftest(align32, igt_align32)
selftest(align64, igt_align64)
selftest(evict, igt_evict)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 9aabe82dcd3a..3846b0f5bae3 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -10,6 +10,7 @@
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/vmalloc.h>
+#include <linux/ktime.h>
#include <drm/drm_mm.h>
@@ -1033,6 +1034,129 @@ static int igt_insert_range(void *ignored)
return 0;
}
+static int prepare_igt_frag(struct drm_mm *mm,
+ struct drm_mm_node *nodes,
+ unsigned int num_insert,
+ const struct insert_mode *mode)
+{
+ unsigned int size = 4096;
+ unsigned int i;
+ u64 ret = -EINVAL;
+
+ for (i = 0; i < num_insert; i++) {
+ if (!expect_insert(mm, &nodes[i], size, 0, i,
+ mode) != 0) {
+ pr_err("%s insert failed\n", mode->name);
+ goto out;
+ }
+ }
+
+ /* introduce fragmentation by freeing every other node */
+ for (i = 0; i < num_insert; i++) {
+ if (i % 2 == 0)
+ drm_mm_remove_node(&nodes[i]);
+ }
+
+out:
+ return ret;
+
+}
+
+static u64 get_insert_time(struct drm_mm *mm,
+ unsigned int num_insert,
+ struct drm_mm_node *nodes,
+ const struct insert_mode *mode)
+{
+ unsigned int size = 8192;
+ ktime_t start;
+ unsigned int i;
+ u64 ret = -EINVAL;
+
+ start = ktime_get();
+ for (i = 0; i < num_insert; i++) {
+ if (!expect_insert(mm, &nodes[i], size, 0, i, mode) != 0) {
+ pr_err("%s insert failed\n", mode->name);
+ goto out;
+ }
+ }
+
+ ret = ktime_to_ns(ktime_sub(ktime_get(), start));
+
+out:
+ return ret;
+
+}
+
+static int igt_frag(void *ignored)
+{
+ struct drm_mm mm;
+ const struct insert_mode *mode;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int insert_size = 10000;
+ unsigned int scale_factor = 4;
+ int ret = -EINVAL;
+
+ /* We need 4 * insert_size nodes to hold intermediate allocated
+ * drm_mm nodes.
+ * 1 times for prepare_igt_frag()
+ * 1 times for get_insert_time()
+ * 2 times for get_insert_time()
+ */
+ nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
+ if (!nodes)
+ return -ENOMEM;
+
+ /* For BOTTOMUP and TOPDOWN, we first fragment the
+ * address space using prepare_igt_frag() and then try to verify
+ * that that insertions scale quadratically from 10k to 20k insertions
+ */
+ drm_mm_init(&mm, 1, U64_MAX - 2);
+ for (mode = insert_modes; mode->name; mode++) {
+ u64 insert_time1, insert_time2;
+
+ if (mode->mode != DRM_MM_INSERT_LOW &&
+ mode->mode != DRM_MM_INSERT_HIGH)
+ continue;
+
+ ret = prepare_igt_frag(&mm, nodes, insert_size, mode);
+ if (!ret)
+ goto err;
+
+ insert_time1 = get_insert_time(&mm, insert_size,
+ nodes + insert_size, mode);
+ if (insert_time1 < 0)
+ goto err;
+
+ insert_time2 = get_insert_time(&mm, (insert_size * 2),
+ nodes + insert_size * 2, mode);
+ if (insert_time2 < 0)
+ goto err;
+
+ pr_info("%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
+ mode->name, insert_size, insert_size * 2,
+ insert_time1, insert_time2);
+
+ if (insert_time2 > (scale_factor * insert_time1)) {
+ pr_err("%s fragmented insert took %llu nsecs more\n",
+ mode->name,
+ insert_time2 - (scale_factor * insert_time1));
+ goto err;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ }
+
+ ret = 0;
+err:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+
+ return ret;
+}
+
static int igt_align(void *ignored)
{
const struct insert_mode *mode;
@@ -2359,7 +2483,7 @@ static int __init test_drm_mm_init(void)
while (!random_seed)
random_seed = get_random_int();
- pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
+ pr_info("Testing DRM range manager (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n",
random_seed, max_iterations, max_prime);
err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 75a752d59ef1..03556dbfcafb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -17,6 +17,7 @@
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
#include "shmob_drm_backlight.h"
@@ -558,15 +559,6 @@ static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.mode_set = shmob_drm_encoder_mode_set,
};
-static void shmob_drm_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = shmob_drm_encoder_destroy,
-};
-
int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
{
struct drm_encoder *encoder = &sdev->encoder.encoder;
@@ -576,8 +568,8 @@ int shmob_drm_encoder_create(struct shmob_drm_device *sdev)
encoder->possible_crtcs = 1;
- ret = drm_encoder_init(sdev->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(sdev->ddev, encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index b8c0930959c7..26a15c214bd3 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -131,16 +131,7 @@ DEFINE_DRM_GEM_CMA_FOPS(shmob_drm_fops);
static struct drm_driver shmob_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET,
.irq_handler = shmob_drm_irq,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
- .dumb_create = drm_gem_cma_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
@@ -192,7 +183,6 @@ static int shmob_drm_remove(struct platform_device *pdev)
drm_dev_unregister(ddev);
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
drm_irq_uninstall(ddev);
drm_dev_put(ddev);
@@ -288,7 +278,6 @@ err_irq_uninstall:
drm_irq_uninstall(ddev);
err_modeset_cleanup:
drm_kms_helper_poll_fini(ddev);
- drm_mode_config_cleanup(ddev);
err_free_drm_dev:
drm_dev_put(ddev);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
index c51197b6fd85..7a866d6ce6bb 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_kms.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_kms.c
@@ -126,7 +126,11 @@ static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = {
int shmob_drm_modeset_init(struct shmob_drm_device *sdev)
{
- drm_mode_config_init(sdev->ddev);
+ int ret;
+
+ ret = drmm_mode_config_init(sdev->ddev);
+ if (ret)
+ return ret;
shmob_drm_crtc_create(sdev);
shmob_drm_encoder_create(sdev);
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index c7652584255d..319962a2c17b 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -42,8 +42,8 @@ static const struct sti_compositor_data stih407_compositor_data = {
},
};
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
- struct drm_minor *minor)
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor)
{
unsigned int i;
@@ -54,8 +54,6 @@ int sti_compositor_debugfs_init(struct sti_compositor *compo,
for (i = 0; i < STI_MAX_MIXER; i++)
if (compo->mixer[i])
sti_mixer_debugfs_init(compo->mixer[i], minor);
-
- return 0;
}
static int sti_compositor_bind(struct device *dev,
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index ac4bb3834810..25bb01bdd013 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -79,7 +79,7 @@ struct sti_compositor {
struct notifier_block vtg_vblank_nb[STI_MAX_MIXER];
};
-int sti_compositor_debugfs_init(struct sti_compositor *compo,
- struct drm_minor *minor);
+void sti_compositor_debugfs_init(struct sti_compositor *compo,
+ struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index 49e6cb8f5836..6f37c104c46f 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -319,7 +319,7 @@ static int sti_crtc_late_register(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
if (drm_crtc_index(crtc) == 0)
- return sti_compositor_debugfs_init(compo, crtc->dev->primary);
+ sti_compositor_debugfs_init(compo, crtc->dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index ea64c1dcaf63..a98057431023 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -131,17 +131,17 @@ static struct drm_info_list cursor_debugfs_files[] = {
{ "cursor", cursor_dbg_show, 0, NULL },
};
-static int cursor_debugfs_init(struct sti_cursor *cursor,
- struct drm_minor *minor)
+static void cursor_debugfs_init(struct sti_cursor *cursor,
+ struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
cursor_debugfs_files[i].data = cursor;
- return drm_debugfs_create_files(cursor_debugfs_files,
- ARRAY_SIZE(cursor_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(cursor_debugfs_files,
+ ARRAY_SIZE(cursor_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
@@ -342,7 +342,9 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane)
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_cursor *cursor = to_sti_cursor(plane);
- return cursor_debugfs_init(cursor, drm_plane->dev->primary);
+ cursor_debugfs_init(cursor, drm_plane->dev->primary);
+
+ return 0;
}
static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 50870d8cbb76..3f54efa36098 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -92,24 +92,16 @@ static struct drm_info_list sti_drm_dbg_list[] = {
{"fps_get", sti_drm_fps_dbg_show, 0},
};
-static int sti_drm_dbg_init(struct drm_minor *minor)
+static void sti_drm_dbg_init(struct drm_minor *minor)
{
- int ret;
-
- ret = drm_debugfs_create_files(sti_drm_dbg_list,
- ARRAY_SIZE(sti_drm_dbg_list),
- minor->debugfs_root, minor);
- if (ret)
- goto err;
+ drm_debugfs_create_files(sti_drm_dbg_list,
+ ARRAY_SIZE(sti_drm_dbg_list),
+ minor->debugfs_root, minor);
debugfs_create_file("fps_show", S_IRUGO | S_IWUSR, minor->debugfs_root,
minor->dev, &sti_drm_fps_fops);
DRM_INFO("%s: debugfs installed\n", DRIVER_NAME);
- return 0;
-err:
- DRM_ERROR("%s: cannot install debugfs\n", DRIVER_NAME);
- return ret;
}
static const struct drm_mode_config_funcs sti_mode_config_funcs = {
@@ -140,18 +132,8 @@ DEFINE_DRM_GEM_CMA_FOPS(sti_driver_fops);
static struct drm_driver sti_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
.fops = &sti_driver_fops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.debugfs_init = sti_drm_dbg_init,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 3d04bfca21a0..de4af7735c46 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -196,16 +196,16 @@ static struct drm_info_list dvo_debugfs_files[] = {
{ "dvo", dvo_dbg_show, 0, NULL },
};
-static int dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
+static void dvo_debugfs_init(struct sti_dvo *dvo, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(dvo_debugfs_files); i++)
dvo_debugfs_files[i].data = dvo;
- return drm_debugfs_create_files(dvo_debugfs_files,
- ARRAY_SIZE(dvo_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(dvo_debugfs_files,
+ ARRAY_SIZE(dvo_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_dvo_disable(struct drm_bridge *bridge)
@@ -405,10 +405,7 @@ static int sti_dvo_late_register(struct drm_connector *connector)
= to_sti_dvo_connector(connector);
struct sti_dvo *dvo = dvo_connector->dvo;
- if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
- DRM_ERROR("DVO debugfs setup failed\n");
- return -EINVAL;
- }
+ dvo_debugfs_init(dvo, dvo->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 11595c748844..2d5a2b5b78b8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -343,9 +343,10 @@ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
for (i = 0; i < nb_files; i++)
gdp_debugfs_files[i].data = gdp;
- return drm_debugfs_create_files(gdp_debugfs_files,
- nb_files,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(gdp_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
+ return 0;
}
static int sti_gdp_fourcc2format(int fourcc)
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f3f28d79b0e4..5c2b650b561d 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -367,16 +367,16 @@ static struct drm_info_list hda_debugfs_files[] = {
{ "hda", hda_dbg_show, 0, NULL },
};
-static int hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
+static void hda_debugfs_init(struct sti_hda *hda, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hda_debugfs_files); i++)
hda_debugfs_files[i].data = hda;
- return drm_debugfs_create_files(hda_debugfs_files,
- ARRAY_SIZE(hda_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hda_debugfs_files,
+ ARRAY_SIZE(hda_debugfs_files),
+ minor->debugfs_root, minor);
}
/**
@@ -586,7 +586,6 @@ static int sti_hda_connector_get_modes(struct drm_connector *connector)
&hda_supported_modes[i].mode);
if (!mode)
continue;
- mode->vrefresh = drm_mode_vrefresh(mode);
/* the first mode is the preferred mode */
if (i == 0)
@@ -643,10 +642,7 @@ static int sti_hda_late_register(struct drm_connector *connector)
= to_sti_hda_connector(connector);
struct sti_hda *hda = hda_connector->hda;
- if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
- DRM_ERROR("HDA debugfs setup failed\n");
- return -EINVAL;
- }
+ hda_debugfs_init(hda, hda->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 18eaf786ffa4..5b15c4974e6b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -727,16 +727,16 @@ static struct drm_info_list hdmi_debugfs_files[] = {
{ "hdmi", hdmi_dbg_show, 0, NULL },
};
-static int hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
+static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_files); i++)
hdmi_debugfs_files[i].data = hdmi;
- return drm_debugfs_create_files(hdmi_debugfs_files,
- ARRAY_SIZE(hdmi_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hdmi_debugfs_files,
+ ARRAY_SIZE(hdmi_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_hdmi_disable(struct drm_bridge *bridge)
@@ -1113,10 +1113,7 @@ static int sti_hdmi_late_register(struct drm_connector *connector)
= to_sti_hdmi_connector(connector);
struct sti_hdmi *hdmi = hdmi_connector->hdmi;
- if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
- DRM_ERROR("HDMI debugfs setup failed\n");
- return -EINVAL;
- }
+ hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1015abe0ce08..5a4e12194a77 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -639,16 +639,16 @@ static struct drm_info_list hqvdp_debugfs_files[] = {
{ "hqvdp", hqvdp_dbg_show, 0, NULL },
};
-static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+static void hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
hqvdp_debugfs_files[i].data = hqvdp;
- return drm_debugfs_create_files(hqvdp_debugfs_files,
- ARRAY_SIZE(hqvdp_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(hqvdp_debugfs_files,
+ ARRAY_SIZE(hqvdp_debugfs_files),
+ minor->debugfs_root, minor);
}
/**
@@ -1274,7 +1274,9 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
struct sti_plane *plane = to_sti_plane(drm_plane);
struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
- return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+ hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
+
+ return 0;
}
static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index c3a3e1e5fc8a..7e5f14646625 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -178,7 +178,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
{ "mixer_aux", mixer_dbg_show, 0, NULL },
};
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
{
unsigned int i;
struct drm_info_list *mixer_debugfs_files;
@@ -194,15 +194,15 @@ int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
nb_files = ARRAY_SIZE(mixer1_debugfs_files);
break;
default:
- return -EINVAL;
+ return;
}
for (i = 0; i < nb_files; i++)
mixer_debugfs_files[i].data = mixer;
- return drm_debugfs_create_files(mixer_debugfs_files,
- nb_files,
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(mixer_debugfs_files,
+ nb_files,
+ minor->debugfs_root, minor);
}
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable)
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index d9544246913a..ab06beb7b258 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -58,7 +58,7 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
-int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
+void sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
/* depth in Cross-bar control = z order */
#define GAM_MIXER_NB_DEPTH_LEVEL 6
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index c36a8da373cb..df3817f0fd30 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -570,16 +570,16 @@ static struct drm_info_list tvout_debugfs_files[] = {
{ "tvout", tvout_dbg_show, 0, NULL },
};
-static int tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
+static void tvout_debugfs_init(struct sti_tvout *tvout, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(tvout_debugfs_files); i++)
tvout_debugfs_files[i].data = tvout;
- return drm_debugfs_create_files(tvout_debugfs_files,
- ARRAY_SIZE(tvout_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tvout_debugfs_files,
+ ARRAY_SIZE(tvout_debugfs_files),
+ minor->debugfs_root, minor);
}
static void sti_tvout_encoder_dpms(struct drm_encoder *encoder, int mode)
@@ -603,14 +603,11 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
static int sti_tvout_late_register(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- int ret;
if (tvout->debugfs_registered)
return 0;
- ret = tvout_debugfs_init(tvout, encoder->dev->primary);
- if (ret)
- return ret;
+ tvout_debugfs_init(tvout, encoder->dev->primary);
tvout->debugfs_registered = true;
return 0;
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 2d4230410464..2d818397918d 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -124,16 +124,16 @@ static struct drm_info_list vid_debugfs_files[] = {
{ "vid", vid_dbg_show, 0, NULL },
};
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(vid_debugfs_files); i++)
vid_debugfs_files[i].data = vid;
- return drm_debugfs_create_files(vid_debugfs_files,
- ARRAY_SIZE(vid_debugfs_files),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(vid_debugfs_files,
+ ARRAY_SIZE(vid_debugfs_files),
+ minor->debugfs_root, minor);
}
void sti_vid_commit(struct sti_vid *vid,
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 9dbd78461de1..991849ba50b5 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,6 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
int id, void __iomem *baseaddr);
-int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
+void vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index ea9fcbdc68b3..411103f013e2 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -62,16 +62,7 @@ static struct drm_driver drv_driver = {
.minor = 0,
.patchlevel = 0,
.fops = &drv_driver_fops,
- .dumb_create = stm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(stm_gem_cma_dumb_create),
};
static int drv_load(struct drm_device *ddev)
@@ -88,7 +79,9 @@ static int drv_load(struct drm_device *ddev)
ddev->dev_private = (void *)ldev;
- drm_mode_config_init(ddev);
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
/*
* set max width and height as default value.
@@ -103,7 +96,7 @@ static int drv_load(struct drm_device *ddev)
ret = ltdc_load(ddev);
if (ret)
- goto err;
+ return ret;
drm_mode_config_reset(ddev);
drm_kms_helper_poll_init(ddev);
@@ -111,9 +104,6 @@ static int drv_load(struct drm_device *ddev)
platform_set_drvdata(pdev, ddev);
return 0;
-err:
- drm_mode_config_cleanup(ddev);
- return ret;
}
static void drv_unload(struct drm_device *ddev)
@@ -122,7 +112,6 @@ static void drv_unload(struct drm_device *ddev)
drm_kms_helper_poll_fini(ddev);
ltdc_unload(ddev);
- drm_mode_config_cleanup(ddev);
}
static __maybe_unused int drv_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index df585fe64f61..f894968d6e45 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -42,8 +42,6 @@
#define MAX_IRQ 4
-#define MAX_ENDPOINTS 2
-
#define HWVER_10200 0x010200
#define HWVER_10300 0x010300
#define HWVER_20101 0x020101
@@ -1201,36 +1199,20 @@ int ltdc_load(struct drm_device *ddev)
struct ltdc_device *ldev = ddev->dev_private;
struct device *dev = ddev->dev;
struct device_node *np = dev->of_node;
- struct drm_bridge *bridge[MAX_ENDPOINTS] = {NULL};
- struct drm_panel *panel[MAX_ENDPOINTS] = {NULL};
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct drm_crtc *crtc;
struct reset_control *rstc;
struct resource *res;
- int irq, ret, i, endpoint_not_ready = -ENODEV;
+ int irq, i, nb_endpoints;
+ int ret = -ENODEV;
DRM_DEBUG_DRIVER("\n");
- /* Get endpoints if any */
- for (i = 0; i < MAX_ENDPOINTS; i++) {
- ret = drm_of_find_panel_or_bridge(np, 0, i, &panel[i],
- &bridge[i]);
-
- /*
- * If at least one endpoint is -EPROBE_DEFER, defer probing,
- * else if at least one endpoint is ready, continue probing.
- */
- if (ret == -EPROBE_DEFER)
- return ret;
- else if (!ret)
- endpoint_not_ready = 0;
- }
-
- if (endpoint_not_ready)
- return endpoint_not_ready;
-
- rstc = devm_reset_control_get_exclusive(dev, NULL);
-
- mutex_init(&ldev->err_lock);
+ /* Get number of endpoints */
+ nb_endpoints = of_graph_get_endpoint_count(np);
+ if (!nb_endpoints)
+ return -ENODEV;
ldev->pixel_clk = devm_clk_get(dev, "lcd");
if (IS_ERR(ldev->pixel_clk)) {
@@ -1244,6 +1226,43 @@ int ltdc_load(struct drm_device *ddev)
return -ENODEV;
}
+ /* Get endpoints if any */
+ for (i = 0; i < nb_endpoints; i++) {
+ ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge);
+
+ /*
+ * If at least one endpoint is -ENODEV, continue probing,
+ * else if at least one endpoint returned an error
+ * (ie -EPROBE_DEFER) then stop probing.
+ */
+ if (ret == -ENODEV)
+ continue;
+ else if (ret)
+ goto err;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ DRM_ERROR("panel-bridge endpoint %d\n", i);
+ ret = PTR_ERR(bridge);
+ goto err;
+ }
+ }
+
+ if (bridge) {
+ ret = ltdc_encoder_init(ddev, bridge);
+ if (ret) {
+ DRM_ERROR("init encoder endpoint %d\n", i);
+ goto err;
+ }
+ }
+ }
+
+ rstc = devm_reset_control_get_exclusive(dev, NULL);
+
+ mutex_init(&ldev->err_lock);
+
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
usleep_range(10, 20);
@@ -1285,27 +1304,7 @@ int ltdc_load(struct drm_device *ddev)
DRM_ERROR("Failed to register LTDC interrupt\n");
goto err;
}
- }
- /* Add endpoints panels or bridges if any */
- for (i = 0; i < MAX_ENDPOINTS; i++) {
- if (panel[i]) {
- bridge[i] = drm_panel_bridge_add_typed(panel[i],
- DRM_MODE_CONNECTOR_DPI);
- if (IS_ERR(bridge[i])) {
- DRM_ERROR("panel-bridge endpoint %d\n", i);
- ret = PTR_ERR(bridge[i]);
- goto err;
- }
- }
-
- if (bridge[i]) {
- ret = ltdc_encoder_init(ddev, bridge[i]);
- if (ret) {
- DRM_ERROR("init encoder endpoint %d\n", i);
- goto err;
- }
- }
}
crtc = devm_kzalloc(dev, sizeof(*crtc), GFP_KERNEL);
@@ -1340,8 +1339,8 @@ int ltdc_load(struct drm_device *ddev)
return 0;
err:
- for (i = 0; i < MAX_ENDPOINTS; i++)
- drm_panel_bridge_remove(bridge[i]);
+ for (i = 0; i < nb_endpoints; i++)
+ drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
clk_disable_unprepare(ldev->pixel_clk);
@@ -1350,11 +1349,14 @@ err:
void ltdc_unload(struct drm_device *ddev)
{
- int i;
+ struct device *dev = ddev->dev;
+ int nb_endpoints, i;
DRM_DEBUG_DRIVER("\n");
- for (i = 0; i < MAX_ENDPOINTS; i++)
+ nb_endpoints = of_graph_get_endpoint_count(dev->of_node);
+
+ for (i = 0; i < nb_endpoints; i++)
drm_of_panel_bridge_remove(ddev->dev->of_node, 0, i);
pm_runtime_disable(ddev->dev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 328272ff77d8..29861fc81b35 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -52,8 +52,7 @@ static struct drm_driver sun4i_drv_driver = {
.minor = 0,
/* GEM Operations */
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
- .dumb_create = drm_sun4i_gem_dumb_create,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(drm_sun4i_gem_dumb_create),
};
static int sun4i_drv_bind(struct device *dev)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 7ad3f06c127e..00ca35f07ba5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -148,7 +148,7 @@
#define SUN4I_HDMI_DDC_CMD_IMPLICIT_WRITE 3
#define SUN4I_HDMI_DDC_CLK_REG 0x528
-#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0x7) << 3)
+#define SUN4I_HDMI_DDC_CLK_M(m) (((m) & 0xf) << 3)
#define SUN4I_HDMI_DDC_CLK_N(n) ((n) & 0x7)
#define SUN4I_HDMI_DDC_LINE_CTRL_REG 0x540
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
index 2ff780114106..12430b9d4e93 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_ddc_clk.c
@@ -33,7 +33,7 @@ static unsigned long sun4i_ddc_calc_divider(unsigned long rate,
unsigned long best_rate = 0;
u8 best_m = 0, best_n = 0, _m, _n;
- for (_m = 0; _m < 8; _m++) {
+ for (_m = 0; _m < 16; _m++) {
for (_n = 0; _n < 8; _n++) {
unsigned long tmp_rate;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 68d4644ac2dc..ce07ddc3e058 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -22,6 +22,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
@@ -204,10 +205,6 @@ static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = {
.mode_valid = sun4i_hdmi_mode_valid,
};
-static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int sun4i_hdmi_get_modes(struct drm_connector *connector)
{
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
@@ -282,7 +279,7 @@ static const struct drm_connector_funcs sun4i_hdmi_connector_funcs = {
};
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
-static bool sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
+static int sun4i_hdmi_cec_pin_read(struct cec_adapter *adap)
{
struct sun4i_hdmi *hdmi = cec_get_drvdata(adap);
@@ -611,11 +608,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
- ret = drm_encoder_init(drm,
- &hdmi->encoder,
- &sun4i_hdmi_funcs,
- DRM_MODE_ENCODER_TMDS,
- NULL);
+ ret = drm_simple_encoder_init(drm, &hdmi->encoder,
+ DRM_MODE_ENCODER_TMDS);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
goto err_put_ddc_i2c;
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 26e5c7ceb8ff..ffda3184aa12 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -12,6 +12,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -96,10 +97,6 @@ static const struct drm_encoder_helper_funcs sun4i_lvds_enc_helper_funcs = {
.enable = sun4i_lvds_encoder_enable,
};
-static const struct drm_encoder_funcs sun4i_lvds_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
@@ -121,11 +118,8 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_encoder_helper_add(&lvds->encoder,
&sun4i_lvds_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &lvds->encoder,
- &sun4i_lvds_enc_funcs,
- DRM_MODE_ENCODER_LVDS,
- NULL);
+ ret = drm_simple_encoder_init(drm, &lvds->encoder,
+ DRM_MODE_ENCODER_LVDS);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the lvds encoder\n");
goto err_out;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 3b23d5be3cf3..5a7d43939ae6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -14,6 +14,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -188,15 +189,6 @@ static struct drm_encoder_helper_funcs sun4i_rgb_enc_helper_funcs = {
.mode_valid = sun4i_rgb_mode_valid,
};
-static void sun4i_rgb_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
- .destroy = sun4i_rgb_enc_destroy,
-};
-
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
@@ -218,11 +210,8 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_encoder_helper_add(&rgb->encoder,
&sun4i_rgb_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &rgb->encoder,
- &sun4i_rgb_enc_funcs,
- DRM_MODE_ENCODER_NONE,
- NULL);
+ ret = drm_simple_encoder_init(drm, &rgb->encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(drm->dev, "Couldn't initialise the rgb encoder\n");
goto err_out;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 624437b27cdc..359b56e43b83 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -812,10 +812,8 @@ static int sun4i_tcon_init_irq(struct device *dev,
int irq, ret;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "Couldn't retrieve the TCON interrupt\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, sun4i_tcon_handler, 0,
dev_name(dev), tcon);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index 39c15282e448..63f4428ac3bf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -19,6 +19,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
@@ -473,15 +474,6 @@ static struct drm_encoder_helper_funcs sun4i_tv_helper_funcs = {
.mode_set = sun4i_tv_mode_set,
};
-static void sun4i_tv_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static struct drm_encoder_funcs sun4i_tv_funcs = {
- .destroy = sun4i_tv_destroy,
-};
-
static int sun4i_tv_comp_get_modes(struct drm_connector *connector)
{
int i;
@@ -592,11 +584,8 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&tv->encoder,
&sun4i_tv_helper_funcs);
- ret = drm_encoder_init(drm,
- &tv->encoder,
- &sun4i_tv_funcs,
- DRM_MODE_ENCODER_TVDAC,
- NULL);
+ ret = drm_simple_encoder_init(drm, &tv->encoder,
+ DRM_MODE_ENCODER_TVDAC);
if (ret) {
dev_err(dev, "Couldn't initialise the TV encoder\n");
goto err_disable_clk;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 059939789730..aa67cb037e9d 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,6 +24,7 @@
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
@@ -717,7 +718,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
struct sun6i_dsi *dsi = encoder_to_sun6i_dsi(encoder);
struct mipi_dsi_device *device = dsi->device;
- union phy_configure_opts opts = { 0 };
+ union phy_configure_opts opts = { };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
int err;
@@ -846,10 +847,6 @@ static const struct drm_encoder_helper_funcs sun6i_dsi_enc_helper_funcs = {
.enable = sun6i_dsi_encoder_enable,
};
-static const struct drm_encoder_funcs sun6i_dsi_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi,
const struct mipi_dsi_msg *msg)
{
@@ -1062,11 +1059,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
- ret = drm_encoder_init(drm,
- &dsi->encoder,
- &sun6i_dsi_enc_funcs,
- DRM_MODE_ENCODER_DSI,
- NULL);
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
if (ret) {
dev_err(dsi->dev, "Couldn't initialise the DSI encoder\n");
return ret;
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index e8a317d5ba19..972682bb8000 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -10,6 +10,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "sun8i_dw_hdmi.h"
#include "sun8i_tcon_top.h"
@@ -29,10 +30,6 @@ sun8i_dw_hdmi_encoder_helper_funcs = {
.mode_set = sun8i_dw_hdmi_encoder_mode_set,
};
-static const struct drm_encoder_funcs sun8i_dw_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static enum drm_mode_status
sun8i_dw_hdmi_mode_valid_a83t(struct drm_connector *connector,
const struct drm_display_mode *mode)
@@ -220,8 +217,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, &sun8i_dw_hdmi_encoder_helper_funcs);
- drm_encoder_init(drm, encoder, &sun8i_dw_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
sun8i_hdmi_phy_init(hdmi->phy);
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 4a64f7ae437a..cc4fb916318f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -27,314 +27,225 @@
#include "sun8i_vi_layer.h"
#include "sunxi_engine.h"
+struct de2_fmt_info {
+ u32 drm_fmt;
+ u32 de2_fmt;
+};
+
static const struct de2_fmt_info de2_formats[] = {
{
.drm_fmt = DRM_FORMAT_ARGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_XRGB8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XRGB8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_XBGR8888,
.de2_fmt = SUN8I_MIXER_FBFMT_XBGR8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBX8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRX8888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRX8888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGB888,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGR888,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGB565,
.de2_fmt = SUN8I_MIXER_FBFMT_RGB565,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGR565,
.de2_fmt = SUN8I_MIXER_FBFMT_BGR565,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XRGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_XBGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_RGBX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
/* for DE2 VI layer which ignores alpha */
.drm_fmt = DRM_FORMAT_BGRX5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ARGB2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_ABGR2101010,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_RGBA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_BGRA1010102,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_OFF,
},
{
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_VYUY,
.de2_fmt = SUN8I_MIXER_FBFMT_VYUY,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUYV,
.de2_fmt = SUN8I_MIXER_FBFMT_YUYV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVYU,
.de2_fmt = SUN8I_MIXER_FBFMT_YVYU,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV16,
.de2_fmt = SUN8I_MIXER_FBFMT_NV16,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV61,
.de2_fmt = SUN8I_MIXER_FBFMT_NV61,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV12,
.de2_fmt = SUN8I_MIXER_FBFMT_NV12,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_NV21,
.de2_fmt = SUN8I_MIXER_FBFMT_NV21,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YUV411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU420,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV420,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_YVU411,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV411,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
},
{
.drm_fmt = DRM_FORMAT_P010,
.de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
.drm_fmt = DRM_FORMAT_P210,
.de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
- .rgb = false,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
},
};
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(de2_formats); ++i)
- if (de2_formats[i].drm_fmt == format)
- return &de2_formats[i];
+ if (de2_formats[i].drm_fmt == format) {
+ *hw_format = de2_formats[i].de2_fmt;
+ return 0;
+ }
- return NULL;
+ return -EINVAL;
}
static void sun8i_mixer_commit(struct sunxi_engine *engine)
@@ -452,6 +363,19 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
+ if (of_find_property(dev->of_node, "iommus", NULL)) {
+ /*
+ * This assume we have the same DMA constraints for
+ * all our the mixers in our pipeline. This sounds
+ * bad, but it has always been the case for us, and
+ * DRM doesn't do per-device allocation either, so we
+ * would need to fix DRM first...
+ */
+ ret = of_dma_configure(drm->dev, dev->of_node, true);
+ if (ret)
+ return ret;
+ }
+
/*
* While this function can fail, we shouldn't do anything
* if this happens. Some early DE2 DT entries don't provide
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 345b28b0a80a..7576b523fdbb 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -10,7 +10,6 @@
#include <linux/regmap.h>
#include <linux/reset.h>
-#include "sun8i_csc.h"
#include "sunxi_engine.h"
#define SUN8I_MIXER_SIZE(w, h) (((h) - 1) << 16 | ((w) - 1))
@@ -144,13 +143,6 @@
#define SUN50I_MIXER_CDC0_EN 0xd0000
#define SUN50I_MIXER_CDC1_EN 0xd8000
-struct de2_fmt_info {
- u32 drm_fmt;
- u32 de2_fmt;
- bool rgb;
- enum sun8i_csc_mode csc;
-};
-
/**
* struct sun8i_mixer_cfg - mixer HW configuration
* @vi_num: number of VI channels
@@ -210,5 +202,5 @@ sun8i_channel_base(struct sun8i_mixer *mixer, int channel)
return DE2_CH_BASE + channel * DE2_CH_SIZE;
}
-const struct de2_fmt_info *sun8i_mixer_format_info(u32 format);
+int sun8i_mixer_drm_format_to_hw(u32 format, u32 *hw_format);
#endif /* _SUN8I_MIXER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
index c87fd842918e..54f937a7d5e7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_ui_layer.c
@@ -19,8 +19,8 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "sun8i_ui_layer.h"
#include "sun8i_mixer.h"
+#include "sun8i_ui_layer.h"
#include "sun8i_ui_scaler.h"
static void sun8i_ui_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -174,18 +174,20 @@ static int sun8i_ui_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- const struct de2_fmt_info *fmt_info;
- u32 val, ch_base;
+ const struct drm_format_info *fmt;
+ u32 val, ch_base, hw_fmt;
+ int ret;
ch_base = sun8i_channel_base(mixer, channel);
- fmt_info = sun8i_mixer_format_info(state->fb->format->format);
- if (!fmt_info || !fmt_info->rgb) {
+ fmt = state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret || fmt->is_yuv) {
DRM_DEBUG_DRIVER("Invalid format\n");
return -EINVAL;
}
- val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
+ val = hw_fmt << SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_UI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_UI_LAYER_ATTR_FBFMT_MASK, val);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index b8398ca18b0f..22c8c5375d0d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -12,8 +12,9 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include "sun8i_vi_layer.h"
+#include "sun8i_csc.h"
#include "sun8i_mixer.h"
+#include "sun8i_vi_layer.h"
#include "sun8i_vi_scaler.h"
static void sun8i_vi_layer_enable(struct sun8i_mixer *mixer, int channel,
@@ -210,28 +211,47 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
return 0;
}
+static bool sun8i_vi_layer_get_csc_mode(const struct drm_format_info *format)
+{
+ if (!format->is_yuv)
+ return SUN8I_CSC_MODE_OFF;
+
+ switch (format->format) {
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YVU444:
+ return SUN8I_CSC_MODE_YVU2RGB;
+ default:
+ return SUN8I_CSC_MODE_YUV2RGB;
+ }
+}
+
static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
int overlay, struct drm_plane *plane)
{
struct drm_plane_state *state = plane->state;
- const struct de2_fmt_info *fmt_info;
- u32 val, ch_base;
+ u32 val, ch_base, csc_mode, hw_fmt;
+ const struct drm_format_info *fmt;
+ int ret;
ch_base = sun8i_channel_base(mixer, channel);
- fmt_info = sun8i_mixer_format_info(state->fb->format->format);
- if (!fmt_info) {
+ fmt = state->fb->format;
+ ret = sun8i_mixer_drm_format_to_hw(fmt->format, &hw_fmt);
+ if (ret) {
DRM_DEBUG_DRIVER("Invalid format\n");
- return -EINVAL;
+ return ret;
}
- val = fmt_info->de2_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
+ val = hw_fmt << SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_OFFSET;
regmap_update_bits(mixer->engine.regs,
SUN8I_MIXER_CHAN_VI_LAYER_ATTR(ch_base, overlay),
SUN8I_MIXER_CHAN_VI_LAYER_ATTR_FBFMT_MASK, val);
- if (fmt_info->csc != SUN8I_CSC_MODE_OFF) {
- sun8i_csc_set_ccsc_coefficients(mixer, channel, fmt_info->csc,
+ csc_mode = sun8i_vi_layer_get_csc_mode(fmt);
+ if (csc_mode != SUN8I_CSC_MODE_OFF) {
+ sun8i_csc_set_ccsc_coefficients(mixer, channel, csc_mode,
state->color_encoding,
state->color_range);
sun8i_csc_enable_ccsc(mixer, channel, true);
@@ -239,7 +259,7 @@ static int sun8i_vi_layer_update_formats(struct sun8i_mixer *mixer, int channel,
sun8i_csc_enable_ccsc(mixer, channel, false);
}
- if (fmt_info->rgb)
+ if (!fmt->is_yuv)
val = SUN8I_MIXER_CHAN_VI_LAYER_ATTR_RGB_MODE;
else
val = 0;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 1a7b08f35776..83f31c6e891c 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1496,7 +1496,6 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
struct tegra_dc *dc = to_tegra_dc(crtc);
- int err;
#ifdef CONFIG_DEBUG_FS
root = crtc->debugfs_entry;
@@ -1512,17 +1511,9 @@ static int tegra_dc_late_register(struct drm_crtc *crtc)
for (i = 0; i < count; i++)
dc->debugfs_files[i].data = dc;
- err = drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(dc->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(dc->debugfs_files);
- dc->debugfs_files = NULL;
-
- return err;
}
static void tegra_dc_early_unregister(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index 7dfb50f65067..105fb9cdbb3b 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -5,12 +5,10 @@
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index bd268028fb3d..ba9d1c3e7cac 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -328,7 +328,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
fail:
while (num_refs--)
- drm_gem_object_put_unlocked(refs[num_refs]);
+ drm_gem_object_put(refs[num_refs]);
kfree(refs);
@@ -368,7 +368,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
@@ -636,7 +636,7 @@ static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
bo->tiling.mode = mode;
bo->tiling.value = value;
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
@@ -676,7 +676,7 @@ static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
break;
}
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return err;
}
@@ -701,7 +701,7 @@ static int tegra_gem_set_flags(struct drm_device *drm, void *data,
if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
@@ -723,7 +723,7 @@ static int tegra_gem_get_flags(struct drm_device *drm, void *data,
if (bo->flags & TEGRA_BO_BOTTOM_UP)
args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
- drm_gem_object_put_unlocked(gem);
+ drm_gem_object_put(gem);
return 0;
}
@@ -839,11 +839,11 @@ static struct drm_info_list tegra_debugfs_list[] = {
{ "iova", tegra_debugfs_iova, 0 },
};
-static int tegra_debugfs_init(struct drm_minor *minor)
+static void tegra_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(tegra_debugfs_list,
- ARRAY_SIZE(tegra_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tegra_debugfs_list,
+ ARRAY_SIZE(tegra_debugfs_list),
+ minor->debugfs_root, minor);
}
#endif
@@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
static bool host1x_drm_wants_iommu(struct host1x_device *dev)
{
+ struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
struct iommu_domain *domain;
/*
@@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
* sufficient and whether or not the host1x is attached to an IOMMU
* doesn't matter.
*/
- if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
+ if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
return true;
return domain != NULL;
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index ed99b67deb29..b25443255be6 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -9,7 +9,7 @@
#include <linux/host1x.h>
#include <linux/iova.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <drm/drm_atomic.h>
#include <drm/drm_edid.h>
@@ -152,8 +152,6 @@ enum drm_connector_status
tegra_output_connector_detect(struct drm_connector *connector, bool force);
void tegra_output_connector_destroy(struct drm_connector *connector);
-void tegra_output_encoder_destroy(struct drm_encoder *encoder);
-
/* from dpaux.c */
struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np);
enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 88b9d64c77bf..38beab9ab4f8 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -22,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "drm.h"
@@ -234,7 +235,6 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_dsi *dsi = to_dsi(output);
- int err;
dsi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -244,17 +244,9 @@ static int tegra_dsi_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
dsi->debugfs_files[i].data = dsi;
- err = drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(dsi->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(dsi->debugfs_files);
- dsi->debugfs_files = NULL;
-
- return err;
}
static void tegra_dsi_early_unregister(struct drm_connector *connector)
@@ -824,10 +816,6 @@ static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs
.mode_valid = tegra_dsi_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_dsi_unprepare(struct tegra_dsi *dsi)
{
int err;
@@ -1058,9 +1046,8 @@ static int tegra_dsi_init(struct host1x_client *client)
&tegra_dsi_connector_helper_funcs);
dsi->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &dsi->output.encoder,
- &tegra_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ drm_simple_encoder_init(drm, &dsi->output.encoder,
+ DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(&dsi->output.encoder,
&tegra_dsi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index b8a328f53862..01939c57fc74 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -4,7 +4,7 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*
* Based on the KMS/FB CMA helpers
- * Copyright (C) 2012 Analog Device Inc.
+ * Copyright (C) 2012 Analog Devices Inc.
*/
#include <linux/console.h>
@@ -171,7 +171,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm,
unreference:
while (i--)
- drm_gem_object_put_unlocked(&planes[i]->gem);
+ drm_gem_object_put(&planes[i]->gem);
return ERR_PTR(err);
}
@@ -235,7 +235,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
dev_err(drm->dev, "failed to allocate framebuffer info\n");
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put(&bo->gem);
return PTR_ERR(info);
}
@@ -244,7 +244,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
err = PTR_ERR(fbdev->fb);
dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
err);
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put(&bo->gem);
return PTR_ERR(fbdev->fb);
}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 623768100c6a..723df142a981 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -24,7 +24,7 @@ static void tegra_bo_put(struct host1x_bo *bo)
{
struct tegra_bo *obj = host1x_to_tegra_bo(bo);
- drm_gem_object_put_unlocked(&obj->gem);
+ drm_gem_object_put(&obj->gem);
}
/* XXX move this into lib/scatterlist.c? */
@@ -385,7 +385,7 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
return ERR_PTR(err);
}
- drm_gem_object_put_unlocked(&bo->gem);
+ drm_gem_object_put(&bo->gem);
return bo;
}
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 38252c0f068d..d09a24931c87 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -7,7 +7,6 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
#include <linux/hdmi.h>
#include <linux/math64.h>
#include <linux/module.h>
@@ -22,6 +21,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "hda.h"
#include "hdmi.h"
@@ -1064,7 +1064,6 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_hdmi *hdmi = to_hdmi(output);
- int err;
hdmi->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -1074,17 +1073,9 @@ static int tegra_hdmi_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
hdmi->debugfs_files[i].data = hdmi;
- err = drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(hdmi->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(hdmi->debugfs_files);
- hdmi->debugfs_files = NULL;
-
- return err;
}
static void tegra_hdmi_early_unregister(struct drm_connector *connector)
@@ -1136,10 +1127,6 @@ tegra_hdmi_connector_helper_funcs = {
.mode_valid = tegra_hdmi_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -1445,8 +1432,8 @@ static int tegra_hdmi_init(struct host1x_client *client)
&tegra_hdmi_connector_helper_funcs);
hdmi->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &hdmi->output.encoder, &tegra_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, &hdmi->output.encoder,
+ DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&hdmi->output.encoder,
&tegra_hdmi_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index a264259b97a2..e36e5e7c2f69 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -6,6 +6,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
@@ -79,11 +80,6 @@ void tegra_output_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
}
-void tegra_output_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
static irqreturn_t hpd_irq(int irq, void *data)
{
struct tegra_output *output = data;
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 4be4dfd4a68a..0562a7eb793f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -8,6 +8,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
#include "drm.h"
#include "dc.h"
@@ -110,10 +111,6 @@ static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs
.mode_valid = tegra_rgb_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static void tegra_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct tegra_output *output = encoder_to_output(encoder);
@@ -281,8 +278,7 @@ int tegra_dc_rgb_init(struct drm_device *drm, struct tegra_dc *dc)
&tegra_rgb_connector_helper_funcs);
output->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &output->encoder, &tegra_rgb_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ drm_simple_encoder_init(drm, &output->encoder, DRM_MODE_ENCODER_LVDS);
drm_encoder_helper_add(&output->encoder,
&tegra_rgb_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 81226a4953c1..7cbcf9617f5e 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -6,7 +6,6 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/debugfs.h>
-#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -23,6 +22,7 @@
#include <drm/drm_file.h>
#include <drm/drm_panel.h>
#include <drm/drm_scdc_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "dc.h"
#include "dp.h"
@@ -1687,7 +1687,6 @@ static int tegra_sor_late_register(struct drm_connector *connector)
struct drm_minor *minor = connector->dev->primary;
struct dentry *root = connector->debugfs_entry;
struct tegra_sor *sor = to_sor(output);
- int err;
sor->debugfs_files = kmemdup(debugfs_files, sizeof(debugfs_files),
GFP_KERNEL);
@@ -1697,17 +1696,9 @@ static int tegra_sor_late_register(struct drm_connector *connector)
for (i = 0; i < count; i++)
sor->debugfs_files[i].data = sor;
- err = drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
- if (err < 0)
- goto free;
+ drm_debugfs_create_files(sor->debugfs_files, count, root, minor);
return 0;
-
-free:
- kfree(sor->debugfs_files);
- sor->debugfs_files = NULL;
-
- return err;
}
static void tegra_sor_early_unregister(struct drm_connector *connector)
@@ -1805,10 +1796,6 @@ static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs
.mode_valid = tegra_sor_connector_mode_valid,
};
-static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
- .destroy = tegra_output_encoder_destroy,
-};
-
static int
tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
@@ -3102,8 +3089,7 @@ static int tegra_sor_init(struct host1x_client *client)
&tegra_sor_connector_helper_funcs);
sor->output.connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_init(drm, &sor->output.encoder, &tegra_sor_encoder_funcs,
- encoder, NULL);
+ drm_simple_encoder_init(drm, &sor->output.encoder, encoder);
drm_encoder_helper_add(&sor->output.encoder, helpers);
drm_connector_attach_encoder(&sor->output.connector,
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
index d4ce9bab8c7e..89a226912de8 100644
--- a/drivers/gpu/drm/tidss/tidss_crtc.c
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -24,7 +24,7 @@
static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
{
struct drm_device *ddev = tcrtc->crtc.dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct drm_pending_vblank_event *event;
unsigned long flags;
bool busy;
@@ -88,7 +88,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct dispc_device *dispc = tidss->dispc;
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
@@ -165,7 +165,7 @@ static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
dev_dbg(ddev->dev,
@@ -216,7 +216,7 @@ static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
unsigned long flags;
int r;
@@ -259,7 +259,7 @@ static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
@@ -295,7 +295,7 @@ enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
{
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
}
@@ -314,7 +314,7 @@ static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -328,7 +328,7 @@ static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -379,9 +379,17 @@ static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
+static void tidss_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(tcrtc);
+}
+
static const struct drm_crtc_funcs tidss_crtc_funcs = {
.reset = tidss_crtc_reset,
- .destroy = drm_crtc_cleanup,
+ .destroy = tidss_crtc_destroy,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = tidss_crtc_duplicate_state,
@@ -400,7 +408,7 @@ struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
bool has_ctm = tidss->feat->vp_feat.color.has_ctm;
int ret;
- tcrtc = devm_kzalloc(tidss->dev, sizeof(*tcrtc), GFP_KERNEL);
+ tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
if (!tcrtc)
return ERR_PTR(-ENOMEM);
@@ -411,8 +419,10 @@ struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary,
NULL, &tidss_crtc_funcs, NULL);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(tcrtc);
return ERR_PTR(ret);
+ }
drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs);
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
index 29f42768e294..629dd06393f6 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.c
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -181,10 +181,6 @@ const struct dispc_features dispc_am65x_feats = {
.vid_name = { "vid", "vidl1" },
.vid_lite = { false, true, },
.vid_order = { 1, 0 },
-
- .errata = {
- .i2000 = true,
- },
};
static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
@@ -2674,12 +2670,9 @@ int dispc_init(struct tidss_device *tidss)
return -ENOMEM;
num_fourccs = 0;
- for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
- if (feat->errata.i2000 &&
- dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
- continue;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i)
dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
- }
+
dispc->num_fourccs = num_fourccs;
dispc->tidss = tidss;
dispc->dev = dev;
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
index a4a68249e44b..902e612ff7ac 100644
--- a/drivers/gpu/drm/tidss/tidss_dispc.h
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -46,10 +46,6 @@ struct dispc_features_scaling {
u32 xinc_max;
};
-struct dispc_errata {
- bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
-};
-
enum dispc_vp_bus_type {
DISPC_VP_DPI, /* DPI output */
DISPC_VP_OLDI, /* OLDI (LVDS) output */
@@ -83,8 +79,6 @@ struct dispc_features {
const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
bool vid_lite[TIDSS_MAX_PLANES];
u32 vid_order[TIDSS_MAX_PLANES];
-
- struct dispc_errata errata;
};
extern const struct dispc_features dispc_k2g_feats;
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
index d95e4be2c7b9..fee2f6fa3506 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.c
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_irq.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include "tidss_dispc.h"
@@ -102,15 +103,7 @@ static const struct dev_pm_ops tidss_pm_ops = {
static void tidss_release(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
-
drm_kms_helper_poll_fini(ddev);
-
- tidss_modeset_cleanup(tidss);
-
- drm_dev_fini(ddev);
-
- kfree(tidss);
}
DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
@@ -119,7 +112,7 @@ static struct drm_driver tidss_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &tidss_fops,
.release = tidss_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.name = "tidss",
.desc = "TI Keystone DSS",
.date = "20180215",
@@ -142,26 +135,18 @@ static int tidss_probe(struct platform_device *pdev)
dev_dbg(dev, "%s\n", __func__);
- /* Can't use devm_* since drm_device's lifetime may exceed dev's */
- tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
- if (!tidss)
- return -ENOMEM;
+ tidss = devm_drm_dev_alloc(&pdev->dev, &tidss_driver,
+ struct tidss_device, ddev);
+ if (IS_ERR(tidss))
+ return PTR_ERR(tidss);
ddev = &tidss->ddev;
- ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
- if (ret) {
- kfree(ddev);
- return ret;
- }
-
tidss->dev = dev;
tidss->feat = of_device_get_match_data(dev);
platform_set_drvdata(pdev, tidss);
- ddev->dev_private = tidss;
-
ret = dispc_init(tidss);
if (ret) {
dev_err(dev, "failed to initialize dispc: %d\n", ret);
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
index e2aa6436ad18..3b0a3d87b7c4 100644
--- a/drivers/gpu/drm/tidss/tidss_drv.h
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -29,10 +29,10 @@ struct tidss_device {
spinlock_t wait_lock; /* protects the irq masks */
dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
-
- struct drm_atomic_state *saved_state;
};
+#define to_tidss(__dev) container_of(__dev, struct tidss_device, ddev)
+
int tidss_runtime_get(struct tidss_device *tidss);
void tidss_runtime_put(struct tidss_device *tidss);
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
index 83785b0a66a9..30bf2a65949c 100644
--- a/drivers/gpu/drm/tidss/tidss_encoder.c
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -55,12 +55,18 @@ static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
return 0;
}
+static void tidss_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
.atomic_check = tidss_encoder_atomic_check,
};
static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
+ .destroy = tidss_encoder_destroy,
};
struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
@@ -69,7 +75,7 @@ struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
struct drm_encoder *enc;
int ret;
- enc = devm_kzalloc(tidss->dev, sizeof(*enc), GFP_KERNEL);
+ enc = kzalloc(sizeof(*enc), GFP_KERNEL);
if (!enc)
return ERR_PTR(-ENOMEM);
@@ -77,8 +83,10 @@ struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
encoder_type, NULL);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(enc);
return ERR_PTR(ret);
+ }
drm_encoder_helper_add(enc, &encoder_helper_funcs);
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
index 612c046738e5..1b80f2d62e0a 100644
--- a/drivers/gpu/drm/tidss/tidss_irq.c
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -23,7 +23,7 @@ static void tidss_irq_update(struct tidss_device *tidss)
void tidss_irq_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
@@ -38,7 +38,7 @@ void tidss_irq_enable_vblank(struct drm_crtc *crtc)
void tidss_irq_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *ddev = crtc->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
u32 hw_videoport = tcrtc->hw_videoport;
unsigned long flags;
@@ -53,7 +53,7 @@ void tidss_irq_disable_vblank(struct drm_crtc *crtc)
irqreturn_t tidss_irq_handler(int irq, void *arg)
{
struct drm_device *ddev = (struct drm_device *)arg;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned int id;
dispc_irq_t irqstatus;
@@ -95,7 +95,7 @@ void tidss_irq_resume(struct tidss_device *tidss)
void tidss_irq_preinstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
spin_lock_init(&tidss->wait_lock);
@@ -109,7 +109,7 @@ void tidss_irq_preinstall(struct drm_device *ddev)
int tidss_irq_postinstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
unsigned long flags;
unsigned int i;
@@ -138,7 +138,7 @@ int tidss_irq_postinstall(struct drm_device *ddev)
void tidss_irq_uninstall(struct drm_device *ddev)
{
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
tidss_runtime_get(tidss);
dispc_set_irqenable(tidss->dispc, 0);
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
index 7d419960b030..4b99e9fa84a5 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.c
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -25,7 +25,7 @@
static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *ddev = old_state->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -258,7 +258,9 @@ int tidss_modeset_init(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s\n", __func__);
- drm_mode_config_init(ddev);
+ ret = drmm_mode_config_init(ddev);
+ if (ret)
+ return ret;
ddev->mode_config.min_width = 8;
ddev->mode_config.min_height = 8;
@@ -270,11 +272,11 @@ int tidss_modeset_init(struct tidss_device *tidss)
ret = tidss_dispc_modeset_init(tidss);
if (ret)
- goto err_mode_config_cleanup;
+ return ret;
ret = drm_vblank_init(ddev, tidss->num_crtcs);
if (ret)
- goto err_mode_config_cleanup;
+ return ret;
/* Start with vertical blanking interrupt reporting disabled. */
for (i = 0; i < tidss->num_crtcs; ++i)
@@ -285,15 +287,4 @@ int tidss_modeset_init(struct tidss_device *tidss)
dev_dbg(tidss->dev, "%s done\n", __func__);
return 0;
-
-err_mode_config_cleanup:
- drm_mode_config_cleanup(ddev);
- return ret;
-}
-
-void tidss_modeset_cleanup(struct tidss_device *tidss)
-{
- struct drm_device *ddev = &tidss->ddev;
-
- drm_mode_config_cleanup(ddev);
}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
index dda5625d0128..99aaff099f22 100644
--- a/drivers/gpu/drm/tidss/tidss_kms.h
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -10,6 +10,5 @@
struct tidss_device;
int tidss_modeset_init(struct tidss_device *tidss);
-void tidss_modeset_cleanup(struct tidss_device *tidss);
#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
index ff99b2dd4a17..0a563eabcbb9 100644
--- a/drivers/gpu/drm/tidss/tidss_plane.c
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -22,7 +22,7 @@ static int tidss_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
const struct drm_format_info *finfo;
struct drm_crtc_state *crtc_state;
@@ -101,7 +101,7 @@ static void tidss_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
struct drm_plane_state *state = plane->state;
u32 hw_videoport;
@@ -133,7 +133,7 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_device *ddev = plane->dev;
- struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_device *tidss = to_tidss(ddev);
struct tidss_plane *tplane = to_tidss_plane(plane);
dev_dbg(ddev->dev, "%s\n", __func__);
@@ -141,6 +141,14 @@ static void tidss_plane_atomic_disable(struct drm_plane *plane,
dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
}
+static void drm_plane_destroy(struct drm_plane *plane)
+{
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ drm_plane_cleanup(plane);
+ kfree(tplane);
+}
+
static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
.atomic_check = tidss_plane_atomic_check,
.atomic_update = tidss_plane_atomic_update,
@@ -151,7 +159,7 @@ static const struct drm_plane_funcs tidss_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.reset = drm_atomic_helper_plane_reset,
- .destroy = drm_plane_cleanup,
+ .destroy = drm_plane_destroy,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
@@ -175,7 +183,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
BIT(DRM_MODE_BLEND_COVERAGE));
int ret;
- tplane = devm_kzalloc(tidss->dev, sizeof(*tplane), GFP_KERNEL);
+ tplane = kzalloc(sizeof(*tplane), GFP_KERNEL);
if (!tplane)
return ERR_PTR(-ENOMEM);
@@ -190,7 +198,7 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
formats, num_formats,
NULL, type, NULL);
if (ret < 0)
- return ERR_PTR(ret);
+ goto err;
drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
@@ -203,15 +211,19 @@ struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
default_encoding,
default_range);
if (ret)
- return ERR_PTR(ret);
+ goto err;
ret = drm_plane_create_alpha_property(&tplane->plane);
if (ret)
- return ERR_PTR(ret);
+ goto err;
ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes);
if (ret)
- return ERR_PTR(ret);
+ goto err;
return tplane;
+
+err:
+ kfree(tplane);
+ return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 0791a0200cc3..0d74a6443263 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -390,10 +390,9 @@ static int tilcdc_init(struct drm_driver *ddrv, struct device *dev)
ret = drm_dev_register(ddev, 0);
if (ret)
goto init_failed;
+ priv->is_registered = true;
drm_fbdev_generic_setup(ddev, bpp);
-
- priv->is_registered = true;
return 0;
init_failed:
@@ -478,26 +477,17 @@ static struct drm_info_list tilcdc_debugfs_list[] = {
{ "mm", tilcdc_mm_show, 0 },
};
-static int tilcdc_debugfs_init(struct drm_minor *minor)
+static void tilcdc_debugfs_init(struct drm_minor *minor)
{
- struct drm_device *dev = minor->dev;
struct tilcdc_module *mod;
- int ret;
- ret = drm_debugfs_create_files(tilcdc_debugfs_list,
- ARRAY_SIZE(tilcdc_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(tilcdc_debugfs_list,
+ ARRAY_SIZE(tilcdc_debugfs_list),
+ minor->debugfs_root, minor);
list_for_each_entry(mod, &module_list, list)
if (mod->funcs->debugfs_init)
mod->funcs->debugfs_init(mod, minor);
-
- if (ret) {
- dev_err(dev->dev, "could not install tilcdc_debugfs_list\n");
- return ret;
- }
-
- return ret;
}
#endif
@@ -506,18 +496,7 @@ DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver tilcdc_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.irq_handler = tilcdc_irq,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_print_info = drm_gem_cma_print_info,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 28b7f703236e..b177525588c1 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_external.h"
@@ -83,10 +84,6 @@ int tilcdc_add_component_encoder(struct drm_device *ddev)
return 0;
}
-static const struct drm_encoder_funcs tilcdc_external_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static
int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
{
@@ -131,9 +128,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev)
if (!priv->external_encoder)
return -ENOMEM;
- ret = drm_encoder_init(ddev, priv->external_encoder,
- &tilcdc_external_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(ddev, priv->external_encoder,
+ DRM_MODE_ENCODER_NONE);
if (ret) {
dev_err(ddev->dev, "drm_encoder_init() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_panel.c b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
index 5584e656b857..00efc30b47d8 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_panel.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_panel.c
@@ -16,6 +16,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "tilcdc_drv.h"
#include "tilcdc_panel.h"
@@ -74,10 +75,6 @@ static void panel_encoder_mode_set(struct drm_encoder *encoder,
/* nothing needed */
}
-static const struct drm_encoder_funcs panel_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static const struct drm_encoder_helper_funcs panel_encoder_helper_funcs = {
.dpms = panel_encoder_dpms,
.prepare = panel_encoder_prepare,
@@ -102,8 +99,7 @@ static struct drm_encoder *panel_encoder_create(struct drm_device *dev,
encoder = &panel_encoder->base;
encoder->possible_crtcs = 1;
- ret = drm_encoder_init(dev, encoder, &panel_encoder_funcs,
- DRM_MODE_ENCODER_LVDS, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_LVDS);
if (ret < 0)
goto fail;
@@ -143,12 +139,16 @@ static int panel_connector_get_modes(struct drm_connector *connector)
int i;
for (i = 0; i < timings->num_timings; i++) {
- struct drm_display_mode *mode = drm_mode_create(dev);
+ struct drm_display_mode *mode;
struct videomode vm;
if (videomode_from_timings(timings, &vm, i))
break;
+ mode = drm_mode_create(dev);
+ if (!mode)
+ break;
+
drm_display_mode_from_videomode(&vm, mode);
mode->type = DRM_MODE_TYPE_DRIVER;
@@ -303,7 +303,8 @@ put_node:
static int panel_probe(struct platform_device *pdev)
{
- struct device_node *bl_node, *node = pdev->dev.of_node;
+ struct device_node *node = pdev->dev.of_node;
+ struct backlight_device *backlight;
struct panel_module *panel_mod;
struct tilcdc_module *mod;
struct pinctrl *pinctrl;
@@ -319,16 +320,10 @@ static int panel_probe(struct platform_device *pdev)
if (!panel_mod)
return -ENOMEM;
- bl_node = of_parse_phandle(node, "backlight", 0);
- if (bl_node) {
- panel_mod->backlight = of_find_backlight_by_node(bl_node);
- of_node_put(bl_node);
-
- if (!panel_mod->backlight)
- return -EPROBE_DEFER;
-
- dev_info(&pdev->dev, "found backlight\n");
- }
+ backlight = devm_of_find_backlight(&pdev->dev);
+ if (IS_ERR(backlight))
+ return PTR_ERR(backlight);
+ panel_mod->backlight = backlight;
panel_mod->enable_gpio = devm_gpiod_get_optional(&pdev->dev, "enable",
GPIOD_OUT_LOW);
@@ -400,7 +395,7 @@ static const struct of_device_id panel_of_match[] = {
{ },
};
-struct platform_driver panel_driver = {
+static struct platform_driver panel_driver = {
.probe = panel_probe,
.remove = panel_remove,
.driver = {
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
index e2090020b3a0..0d09b31ae759 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c
@@ -62,8 +62,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (state->fb && old_state->fb &&
- state->fb->format != old_state->fb->format) {
+ if (old_state->fb && state->fb->format != old_state->fb->format) {
dev_dbg(plane->dev->dev,
"%s(): pixel format change requires mode_change\n",
__func__);
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 4160e74e4751..2b6414f0fa75 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -1,5 +1,24 @@
# SPDX-License-Identifier: GPL-2.0-only
+config DRM_CIRRUS_QEMU
+ tristate "Cirrus driver for QEMU emulated device"
+ depends on DRM && PCI && MMU
+ select DRM_KMS_HELPER
+ select DRM_GEM_SHMEM_HELPER
+ help
+ This is a KMS driver for emulated cirrus device in qemu.
+ It is *NOT* intended for real cirrus devices. This requires
+ the modesetting userspace X.org driver.
+
+ Cirrus is obsolete, the hardware was designed in the 90ies
+ and can't keep up with todays needs. More background:
+ https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+ Better alternatives are:
+ - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+ - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+ - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
+
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
depends on DRM && USB
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index c96ceee71453..6ae4e9e5a35f 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index d2ff63ce8eaf..744a8e337e41 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -35,6 +35,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -58,6 +59,8 @@ struct cirrus_device {
void __iomem *mmio;
};
+#define to_cirrus(_dev) container_of(_dev, struct cirrus_device, dev)
+
/* ------------------------------------------------------------------ */
/*
* The meat of this driver. The core passes us a mode and we have to program
@@ -310,7 +313,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
struct drm_rect *rect)
{
- struct cirrus_device *cirrus = fb->dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(fb->dev);
void *vmap;
int idx, ret;
@@ -435,7 +438,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
cirrus_fb_blit_fullscreen(plane_state->fb);
@@ -444,7 +447,7 @@ static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
- struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct cirrus_device *cirrus = to_cirrus(pipe->crtc.dev);
struct drm_plane_state *state = pipe->plane.state;
struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
@@ -509,11 +512,15 @@ static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+static int cirrus_mode_config_init(struct cirrus_device *cirrus)
{
struct drm_device *dev = &cirrus->dev;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
- drm_mode_config_init(dev);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
@@ -521,18 +528,12 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
dev->mode_config.preferred_depth = 16;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.funcs = &cirrus_mode_config_funcs;
+
+ return 0;
}
/* ------------------------------------------------------------------ */
-static void cirrus_release(struct drm_device *dev)
-{
- struct cirrus_device *cirrus = dev->dev_private;
-
- drm_mode_config_cleanup(dev);
- kfree(cirrus);
-}
-
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -546,7 +547,6 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
- .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -560,7 +560,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
@@ -569,36 +569,34 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
return ret;
ret = -ENOMEM;
- cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
- if (cirrus == NULL)
- goto err_pci_release;
+ cirrus = devm_drm_dev_alloc(&pdev->dev, &cirrus_driver,
+ struct cirrus_device, dev);
+ if (IS_ERR(cirrus))
+ return PTR_ERR(cirrus);
dev = &cirrus->dev;
- ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
- if (ret)
- goto err_free_cirrus;
- dev->dev_private = cirrus;
- ret = -ENOMEM;
- cirrus->vram = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
+ cirrus->vram = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
if (cirrus->vram == NULL)
- goto err_dev_put;
+ return -ENOMEM;
- cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
- pci_resource_len(pdev, 1));
+ cirrus->mmio = devm_ioremap(&pdev->dev, pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
if (cirrus->mmio == NULL)
- goto err_unmap_vram;
+ return -ENOMEM;
- cirrus_mode_config_init(cirrus);
+ ret = cirrus_mode_config_init(cirrus);
+ if (ret)
+ return ret;
ret = cirrus_conn_init(cirrus);
if (ret < 0)
- goto err_cleanup;
+ return ret;
ret = cirrus_pipe_init(cirrus);
if (ret < 0)
- goto err_cleanup;
+ return ret;
drm_mode_config_reset(dev);
@@ -606,36 +604,18 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_cleanup;
+ return ret;
drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
return 0;
-
-err_cleanup:
- drm_mode_config_cleanup(dev);
- iounmap(cirrus->mmio);
-err_unmap_vram:
- iounmap(cirrus->vram);
-err_dev_put:
- drm_dev_put(dev);
-err_free_cirrus:
- kfree(cirrus);
-err_pci_release:
- pci_release_regions(pdev);
- return ret;
}
static void cirrus_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct cirrus_device *cirrus = dev->dev_private;
drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- iounmap(cirrus->mmio);
- iounmap(cirrus->vram);
- drm_dev_put(dev);
- pci_release_regions(pdev);
}
static const struct pci_device_id pciidlist[] = {
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index a48173441ae0..cc397671f689 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -19,6 +19,7 @@
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@@ -87,18 +88,18 @@ struct gm12u320_device {
struct usb_device *udev;
unsigned char *cmd_buf;
unsigned char *data_buf[GM12U320_BLOCK_COUNT];
- bool pipe_enabled;
struct {
- bool run;
- struct workqueue_struct *workq;
- struct work_struct work;
- wait_queue_head_t waitq;
+ struct delayed_work work;
struct mutex lock;
struct drm_framebuffer *fb;
struct drm_rect rect;
+ int frame;
+ int draw_status_timeout;
} fb_update;
};
+#define to_gm12u320(__dev) container_of(__dev, struct gm12u320_device, dev)
+
static const char cmd_data[CMD_SIZE] = {
0x55, 0x53, 0x42, 0x43, 0x00, 0x00, 0x00, 0x00,
0x68, 0xfc, 0x00, 0x00, 0x00, 0x00, 0x10, 0xff,
@@ -159,7 +160,7 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
int i, block_size;
const char *hdr;
- gm12u320->cmd_buf = kmalloc(CMD_SIZE, GFP_KERNEL);
+ gm12u320->cmd_buf = drmm_kmalloc(&gm12u320->dev, CMD_SIZE, GFP_KERNEL);
if (!gm12u320->cmd_buf)
return -ENOMEM;
@@ -172,7 +173,8 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
hdr = data_block_header;
}
- gm12u320->data_buf[i] = kzalloc(block_size, GFP_KERNEL);
+ gm12u320->data_buf[i] = drmm_kzalloc(&gm12u320->dev,
+ block_size, GFP_KERNEL);
if (!gm12u320->data_buf[i])
return -ENOMEM;
@@ -182,26 +184,9 @@ static int gm12u320_usb_alloc(struct gm12u320_device *gm12u320)
data_block_footer, DATA_BLOCK_FOOTER_SIZE);
}
- gm12u320->fb_update.workq = create_singlethread_workqueue(DRIVER_NAME);
- if (!gm12u320->fb_update.workq)
- return -ENOMEM;
-
return 0;
}
-static void gm12u320_usb_free(struct gm12u320_device *gm12u320)
-{
- int i;
-
- if (gm12u320->fb_update.workq)
- destroy_workqueue(gm12u320->fb_update.workq);
-
- for (i = 0; i < GM12U320_BLOCK_COUNT; i++)
- kfree(gm12u320->data_buf[i]);
-
- kfree(gm12u320->cmd_buf);
-}
-
static int gm12u320_misc_request(struct gm12u320_device *gm12u320,
u8 req_a, u8 req_b,
u8 arg_a, u8 arg_b, u8 arg_c, u8 arg_d)
@@ -344,80 +329,77 @@ unlock:
static void gm12u320_fb_update_work(struct work_struct *work)
{
struct gm12u320_device *gm12u320 =
- container_of(work, struct gm12u320_device, fb_update.work);
- int draw_status_timeout = FIRST_FRAME_TIMEOUT;
+ container_of(to_delayed_work(work), struct gm12u320_device,
+ fb_update.work);
int block, block_size, len;
- int frame = 0;
int ret = 0;
- while (gm12u320->fb_update.run) {
- gm12u320_copy_fb_to_blocks(gm12u320);
-
- for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
- if (block == GM12U320_BLOCK_COUNT - 1)
- block_size = DATA_LAST_BLOCK_SIZE;
- else
- block_size = DATA_BLOCK_SIZE;
-
- /* Send data command to device */
- memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
- gm12u320->cmd_buf[8] = block_size & 0xff;
- gm12u320->cmd_buf[9] = block_size >> 8;
- gm12u320->cmd_buf[20] = 0xfc - block * 4;
- gm12u320->cmd_buf[21] = block | (frame << 7);
-
- ret = usb_bulk_msg(gm12u320->udev,
- usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->cmd_buf, CMD_SIZE, &len,
- CMD_TIMEOUT);
- if (ret || len != CMD_SIZE)
- goto err;
-
- /* Send data block to device */
- ret = usb_bulk_msg(gm12u320->udev,
- usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->data_buf[block], block_size,
- &len, DATA_TIMEOUT);
- if (ret || len != block_size)
- goto err;
-
- /* Read status */
- ret = usb_bulk_msg(gm12u320->udev,
- usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
- gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
- CMD_TIMEOUT);
- if (ret || len != READ_STATUS_SIZE)
- goto err;
- }
+ gm12u320_copy_fb_to_blocks(gm12u320);
+
+ for (block = 0; block < GM12U320_BLOCK_COUNT; block++) {
+ if (block == GM12U320_BLOCK_COUNT - 1)
+ block_size = DATA_LAST_BLOCK_SIZE;
+ else
+ block_size = DATA_BLOCK_SIZE;
+
+ /* Send data command to device */
+ memcpy(gm12u320->cmd_buf, cmd_data, CMD_SIZE);
+ gm12u320->cmd_buf[8] = block_size & 0xff;
+ gm12u320->cmd_buf[9] = block_size >> 8;
+ gm12u320->cmd_buf[20] = 0xfc - block * 4;
+ gm12u320->cmd_buf[21] =
+ block | (gm12u320->fb_update.frame << 7);
- /* Send draw command to device */
- memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
ret = usb_bulk_msg(gm12u320->udev,
usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
- gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ gm12u320->cmd_buf, CMD_SIZE, &len,
+ CMD_TIMEOUT);
if (ret || len != CMD_SIZE)
goto err;
+ /* Send data block to device */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->data_buf[block], block_size,
+ &len, DATA_TIMEOUT);
+ if (ret || len != block_size)
+ goto err;
+
/* Read status */
ret = usb_bulk_msg(gm12u320->udev,
usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
- draw_status_timeout);
+ CMD_TIMEOUT);
if (ret || len != READ_STATUS_SIZE)
goto err;
-
- draw_status_timeout = CMD_TIMEOUT;
- frame = !frame;
-
- /*
- * We must draw a frame every 2s otherwise the projector
- * switches back to showing its logo.
- */
- wait_event_timeout(gm12u320->fb_update.waitq,
- !gm12u320->fb_update.run ||
- gm12u320->fb_update.fb != NULL,
- IDLE_TIMEOUT);
}
+
+ /* Send draw command to device */
+ memcpy(gm12u320->cmd_buf, cmd_draw, CMD_SIZE);
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_sndbulkpipe(gm12u320->udev, DATA_SND_EPT),
+ gm12u320->cmd_buf, CMD_SIZE, &len, CMD_TIMEOUT);
+ if (ret || len != CMD_SIZE)
+ goto err;
+
+ /* Read status */
+ ret = usb_bulk_msg(gm12u320->udev,
+ usb_rcvbulkpipe(gm12u320->udev, DATA_RCV_EPT),
+ gm12u320->cmd_buf, READ_STATUS_SIZE, &len,
+ gm12u320->fb_update.draw_status_timeout);
+ if (ret || len != READ_STATUS_SIZE)
+ goto err;
+
+ gm12u320->fb_update.draw_status_timeout = CMD_TIMEOUT;
+ gm12u320->fb_update.frame = !gm12u320->fb_update.frame;
+
+ /*
+ * We must draw a frame every 2s otherwise the projector
+ * switches back to showing its logo.
+ */
+ queue_delayed_work(system_long_wq, &gm12u320->fb_update.work,
+ IDLE_TIMEOUT);
+
return;
err:
/* Do not log errors caused by module unload or device unplug */
@@ -428,7 +410,7 @@ err:
static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
struct drm_rect *dirty)
{
- struct gm12u320_device *gm12u320 = fb->dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(fb->dev);
struct drm_framebuffer *old_fb = NULL;
bool wakeup = false;
@@ -452,36 +434,24 @@ static void gm12u320_fb_mark_dirty(struct drm_framebuffer *fb,
mutex_unlock(&gm12u320->fb_update.lock);
if (wakeup)
- wake_up(&gm12u320->fb_update.waitq);
+ mod_delayed_work(system_long_wq, &gm12u320->fb_update.work, 0);
if (old_fb)
drm_framebuffer_put(old_fb);
}
-static void gm12u320_start_fb_update(struct gm12u320_device *gm12u320)
-{
- mutex_lock(&gm12u320->fb_update.lock);
- gm12u320->fb_update.run = true;
- mutex_unlock(&gm12u320->fb_update.lock);
-
- queue_work(gm12u320->fb_update.workq, &gm12u320->fb_update.work);
-}
-
static void gm12u320_stop_fb_update(struct gm12u320_device *gm12u320)
{
- mutex_lock(&gm12u320->fb_update.lock);
- gm12u320->fb_update.run = false;
- mutex_unlock(&gm12u320->fb_update.lock);
+ struct drm_framebuffer *old_fb;
- wake_up(&gm12u320->fb_update.waitq);
- cancel_work_sync(&gm12u320->fb_update.work);
+ cancel_delayed_work_sync(&gm12u320->fb_update.work);
mutex_lock(&gm12u320->fb_update.lock);
- if (gm12u320->fb_update.fb) {
- drm_framebuffer_put(gm12u320->fb_update.fb);
- gm12u320->fb_update.fb = NULL;
- }
+ old_fb = gm12u320->fb_update.fb;
+ gm12u320->fb_update.fb = NULL;
mutex_unlock(&gm12u320->fb_update.lock);
+
+ drm_framebuffer_put(old_fb);
}
static int gm12u320_set_ecomode(struct gm12u320_device *gm12u320)
@@ -589,20 +559,18 @@ static void gm12u320_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
struct drm_rect rect = { 0, 0, GM12U320_USER_WIDTH, GM12U320_HEIGHT };
+ struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
+ gm12u320->fb_update.draw_status_timeout = FIRST_FRAME_TIMEOUT;
gm12u320_fb_mark_dirty(plane_state->fb, &rect);
- gm12u320_start_fb_update(gm12u320);
- gm12u320->pipe_enabled = true;
}
static void gm12u320_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct gm12u320_device *gm12u320 = pipe->crtc.dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(pipe->crtc.dev);
gm12u320_stop_fb_update(gm12u320);
- gm12u320->pipe_enabled = false;
}
static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -630,16 +598,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = {
DRM_FORMAT_MOD_INVALID
};
-static void gm12u320_driver_release(struct drm_device *dev)
-{
- struct gm12u320_device *gm12u320 = dev->dev_private;
-
- gm12u320_usb_free(gm12u320);
- drm_mode_config_cleanup(dev);
- drm_dev_fini(dev);
- kfree(gm12u320);
-}
-
DEFINE_DRM_GEM_FOPS(gm12u320_fops);
static struct drm_driver gm12u320_drm_driver = {
@@ -651,7 +609,6 @@ static struct drm_driver gm12u320_drm_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
- .release = gm12u320_driver_release,
.fops = &gm12u320_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
};
@@ -676,24 +633,21 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
if (interface->cur_altsetting->desc.bInterfaceNumber != 0)
return -ENODEV;
- gm12u320 = kzalloc(sizeof(*gm12u320), GFP_KERNEL);
- if (gm12u320 == NULL)
- return -ENOMEM;
+ gm12u320 = devm_drm_dev_alloc(&interface->dev, &gm12u320_drm_driver,
+ struct gm12u320_device, dev);
+ if (IS_ERR(gm12u320))
+ return PTR_ERR(gm12u320);
gm12u320->udev = interface_to_usbdev(interface);
- INIT_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
+ INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work);
mutex_init(&gm12u320->fb_update.lock);
- init_waitqueue_head(&gm12u320->fb_update.waitq);
dev = &gm12u320->dev;
- ret = drm_dev_init(dev, &gm12u320_drm_driver, &interface->dev);
- if (ret) {
- kfree(gm12u320);
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
return ret;
- }
- dev->dev_private = gm12u320;
- drm_mode_config_init(dev);
dev->mode_config.min_width = GM12U320_USER_WIDTH;
dev->mode_config.max_width = GM12U320_USER_WIDTH;
dev->mode_config.min_height = GM12U320_HEIGHT;
@@ -702,15 +656,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
ret = gm12u320_usb_alloc(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = gm12u320_set_ecomode(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = gm12u320_conn_init(gm12u320);
if (ret)
- goto err_put;
+ return ret;
ret = drm_simple_display_pipe_init(&gm12u320->dev,
&gm12u320->pipe,
@@ -720,56 +674,44 @@ static int gm12u320_usb_probe(struct usb_interface *interface,
gm12u320_pipe_modifiers,
&gm12u320->conn);
if (ret)
- goto err_put;
+ return ret;
drm_mode_config_reset(dev);
usb_set_intfdata(interface, dev);
ret = drm_dev_register(dev, 0);
if (ret)
- goto err_put;
+ return ret;
drm_fbdev_generic_setup(dev, 0);
return 0;
-
-err_put:
- drm_dev_put(dev);
- return ret;
}
static void gm12u320_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
- gm12u320_stop_fb_update(gm12u320);
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ drm_atomic_helper_shutdown(dev);
}
static __maybe_unused int gm12u320_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
- if (gm12u320->pipe_enabled)
- gm12u320_stop_fb_update(gm12u320);
-
- return 0;
+ return drm_mode_config_helper_suspend(dev);
}
static __maybe_unused int gm12u320_resume(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- struct gm12u320_device *gm12u320 = dev->dev_private;
+ struct gm12u320_device *gm12u320 = to_gm12u320(dev);
gm12u320_set_ecomode(gm12u320);
- if (gm12u320->pipe_enabled)
- gm12u320_start_fb_update(gm12u320);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
static const struct usb_device_id id_table[] = {
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 9af8ff84974f..0998309b0d95 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -21,6 +21,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -195,8 +196,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
.desc = "HX8357D",
@@ -226,18 +226,12 @@ static int hx8357d_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &hx8357d_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 802fb8dde1b6..16400064320f 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -24,6 +24,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -345,8 +346,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9225_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.name = "ili9225",
.desc = "Ilitek ILI9225",
.date = "20171106",
@@ -376,19 +376,13 @@ static int ili9225_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9225_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index 33b51dc7faa8..d39c39df56ad 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -20,6 +20,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -151,8 +152,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9341_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
.desc = "Ilitek ILI9341",
@@ -183,19 +183,13 @@ static int ili9341_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9341_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 532560aebb1e..403af68fa440 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -19,6 +19,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
@@ -164,8 +165,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
static struct drm_driver ili9486_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &ili9486_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9486",
.desc = "Ilitek ILI9486",
@@ -197,19 +197,13 @@ static int ili9486_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &ili9486_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index e2cfd9a17143..2131b4268c00 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -18,6 +18,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_modeset_helper.h>
#include <video/mipi_display.h>
@@ -155,8 +156,7 @@ DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
.desc = "Multi-Inno MI0283QT",
@@ -187,19 +187,13 @@ static int mi0283qt_probe(struct spi_device *spi)
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &mi0283qt_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index f5ebcaf7ee3a..08164e2a2d13 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -31,6 +31,7 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
#include <drm/drm_probe_helper.h>
@@ -908,17 +909,6 @@ static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static void repaper_release(struct drm_device *drm)
-{
- struct repaper_epd *epd = drm_to_epd(drm);
-
- DRM_DEBUG_DRIVER("\n");
-
- drm_mode_config_cleanup(drm);
- drm_dev_fini(drm);
- kfree(epd);
-}
-
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
@@ -956,8 +946,7 @@ DEFINE_DRM_GEM_CMA_FOPS(repaper_fops);
static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &repaper_fops,
- .release = repaper_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
.date = "20170405",
@@ -1013,19 +1002,16 @@ static int repaper_probe(struct spi_device *spi)
}
}
- epd = kzalloc(sizeof(*epd), GFP_KERNEL);
- if (!epd)
- return -ENOMEM;
+ epd = devm_drm_dev_alloc(dev, &repaper_driver,
+ struct repaper_epd, drm);
+ if (IS_ERR(epd))
+ return PTR_ERR(epd);
drm = &epd->drm;
- ret = devm_drm_dev_init(dev, drm, &repaper_driver);
- if (ret) {
- kfree(epd);
+ ret = drmm_mode_config_init(drm);
+ if (ret)
return ret;
- }
-
- drm_mode_config_init(drm);
drm->mode_config.funcs = &repaper_mode_config_funcs;
epd->spi = spi;
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 9ef559dd3191..1311e5df8721 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -21,6 +21,7 @@
#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
@@ -284,8 +285,7 @@ DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7586_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
.desc = "Sitronix ST7586",
@@ -317,19 +317,13 @@ static int st7586_probe(struct spi_device *spi)
size_t bufsize;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
- return -ENOMEM;
+ dbidev = devm_drm_dev_alloc(dev, &st7586_driver,
+ struct mipi_dbi_dev, drm);
+ if (IS_ERR(dbidev))
+ return PTR_ERR(dbidev);
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &st7586_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3cd9b8d9888d..c0bc2a18edde 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -21,6 +21,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_mipi_dbi.h>
#define ST7735R_FRMCTR1 0xb1
@@ -156,8 +157,7 @@ DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.fops = &st7735r_fops,
- .release = mipi_dbi_release,
- DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ DRM_GEM_CMA_DRIVER_OPS_VMAP,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
.desc = "Sitronix ST7735R",
@@ -195,22 +195,16 @@ static int st7735r_probe(struct spi_device *spi)
if (!cfg)
cfg = (void *)spi_get_device_id(spi)->driver_data;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ priv = devm_drm_dev_alloc(dev, &st7735r_driver,
+ struct st7735r_priv, dbidev.drm);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
dbidev = &priv->dbidev;
priv->cfg = cfg;
dbi = &dbidev->dbi;
drm = &dbidev->drm;
- ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
- if (ret) {
- kfree(dbidev);
- return ret;
- }
-
- drm_mode_config_init(drm);
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(dbi->reset)) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 9e07c3f75156..f73b81c2576e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -588,7 +588,8 @@ static void ttm_bo_release(struct kref *kref)
ttm_mem_io_unlock(man);
}
- if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
+ !dma_resv_trylock(bo->base.resv)) {
/* The BO is not idle, resurrect it for delayed destroy */
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
@@ -621,6 +622,7 @@ static void ttm_bo_release(struct kref *kref)
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
+ dma_resv_unlock(bo->base.resv);
BUG_ON(bo->mem.mm_node != NULL);
atomic_dec(&ttm_bo_glob.bo_count);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 52d2b71f1588..f09b096ba4fd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -257,54 +257,6 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-#ifdef CONFIG_X86
-#define __ttm_kmap_atomic_prot(__page, __prot) kmap_atomic_prot(__page, __prot)
-#define __ttm_kunmap_atomic(__addr) kunmap_atomic(__addr)
-#else
-#define __ttm_kmap_atomic_prot(__page, __prot) vmap(&__page, 1, 0, __prot)
-#define __ttm_kunmap_atomic(__addr) vunmap(__addr)
-#endif
-
-
-/**
- * ttm_kmap_atomic_prot - Efficient kernel map of a single page with
- * specified page protection.
- *
- * @page: The page to map.
- * @prot: The page protection.
- *
- * This function maps a TTM page using the kmap_atomic api if available,
- * otherwise falls back to vmap. The user must make sure that the
- * specified page does not have an aliased mapping with a different caching
- * policy unless the architecture explicitly allows it. Also mapping and
- * unmapping using this api must be correctly nested. Unmapping should
- * occur in the reverse order of mapping.
- */
-void *ttm_kmap_atomic_prot(struct page *page, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- return kmap_atomic(page);
- else
- return __ttm_kmap_atomic_prot(page, prot);
-}
-EXPORT_SYMBOL(ttm_kmap_atomic_prot);
-
-/**
- * ttm_kunmap_atomic_prot - Unmap a page that was mapped using
- * ttm_kmap_atomic_prot.
- *
- * @addr: The virtual address from the map.
- * @prot: The page protection.
- */
-void ttm_kunmap_atomic_prot(void *addr, pgprot_t prot)
-{
- if (pgprot_val(prot) == pgprot_val(PAGE_KERNEL))
- kunmap_atomic(addr);
- else
- __ttm_kunmap_atomic(addr);
-}
-EXPORT_SYMBOL(ttm_kunmap_atomic_prot);
-
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
@@ -316,13 +268,13 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = ttm_kmap_atomic_prot(d, prot);
+ dst = kmap_atomic_prot(d, prot);
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(dst, prot);
+ kunmap_atomic(dst);
return 0;
}
@@ -338,13 +290,13 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = ttm_kmap_atomic_prot(s, prot);
+ src = kmap_atomic_prot(s, prot);
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
- ttm_kunmap_atomic_prot(src, prot);
+ kunmap_atomic(src);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 0ad30b112982..a43aa7275f12 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -58,7 +58,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_clear;
/*
- * If possible, avoid waiting for GPU with mmap_sem
+ * If possible, avoid waiting for GPU with mmap_lock
* held. We only do this if the fault allows retry and this
* is the first attempt.
*/
@@ -68,7 +68,7 @@ static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
goto out_unlock;
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
(void) dma_fence_wait(bo->moving, true);
dma_resv_unlock(bo->base.resv);
ttm_bo_put(bo);
@@ -131,20 +131,20 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
{
/*
* Work around locking order reversal in fault / nopfn
- * between mmap_sem and bo_reserve: Perform a trylock operation
+ * between mmap_lock and bo_reserve: Perform a trylock operation
* for reserve, and if it fails, retry the fault after waiting
* for the buffer to become unreserved.
*/
if (unlikely(!dma_resv_trylock(bo->base.resv))) {
/*
* If the fault allows retry and this is the first
- * fault attempt, we try to release the mmap_sem
+ * fault attempt, we try to release the mmap_lock
* before waiting
*/
if (fault_flag_allow_retry_first(vmf->flags)) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
ttm_bo_get(bo);
- up_read(&vmf->vma->vm_mm->mmap_sem);
+ mmap_read_unlock(vmf->vma->vm_mm);
if (!dma_resv_lock_interruptible(bo->base.resv,
NULL))
dma_resv_unlock(bo->base.resv);
diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c
index 00ba9e5ce130..c3aa39bd38ec 100644
--- a/drivers/gpu/drm/tve200/tve200_drv.c
+++ b/drivers/gpu/drm/tve200/tve200_drv.c
@@ -147,17 +147,7 @@ static struct drm_driver tve200_drm_driver = {
.major = 1,
.minor = 0,
.patchlevel = 0,
- .dumb_create = drm_gem_cma_dumb_create,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
};
static int tve200_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index b50179bb4de0..24d61f61d7db 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o udl_gem.o
+udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o
obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 0afdfb0d1fe1..cdc1c42e1669 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -59,7 +59,7 @@ static int udl_get_modes(struct drm_connector *connector)
static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct udl_device *udl = connector->dev->dev_private;
+ struct udl_device *udl = to_udl(connector->dev);
if (!udl->sku_pixel_limit)
return 0;
@@ -72,7 +72,7 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
- struct udl_device *udl = connector->dev->dev_private;
+ struct udl_device *udl = to_udl(connector->dev);
struct udl_drm_connector *udl_connector =
container_of(connector,
struct udl_drm_connector,
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index e6c1cd77d4d4..96d4317a2c1b 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -10,6 +10,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
@@ -33,20 +34,11 @@ static int udl_usb_resume(struct usb_interface *interface)
DEFINE_DRM_GEM_FOPS(udl_driver_fops);
-static void udl_driver_release(struct drm_device *dev)
-{
- udl_fini(dev);
- udl_modeset_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
-}
-
static struct drm_driver driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
- .release = udl_driver_release,
- /* gem hooks */
- .gem_create_object = udl_driver_gem_create_object,
+ /* GEM hooks */
+ .gem_create_object = drm_gem_shmem_create_object_cached,
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
@@ -65,27 +57,19 @@ static struct udl_device *udl_driver_create(struct usb_interface *interface)
struct udl_device *udl;
int r;
- udl = kzalloc(sizeof(*udl), GFP_KERNEL);
- if (!udl)
- return ERR_PTR(-ENOMEM);
-
- r = drm_dev_init(&udl->drm, &driver, &interface->dev);
- if (r) {
- kfree(udl);
- return ERR_PTR(r);
- }
+ udl = devm_drm_dev_alloc(&interface->dev, &driver,
+ struct udl_device, drm);
+ if (IS_ERR(udl))
+ return udl;
udl->udev = udev;
- udl->drm.dev_private = udl;
r = udl_init(udl);
- if (r) {
- drm_dev_fini(&udl->drm);
- kfree(udl);
+ if (r)
return ERR_PTR(r);
- }
usb_set_intfdata(interface, udl);
+
return udl;
}
@@ -101,31 +85,22 @@ static int udl_usb_probe(struct usb_interface *interface,
r = drm_dev_register(&udl->drm, 0);
if (r)
- goto err_free;
+ return r;
DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
- r = drm_fbdev_generic_setup(&udl->drm, 0);
- if (r)
- goto err_drm_dev_unregister;
+ drm_fbdev_generic_setup(&udl->drm, 0);
return 0;
-
-err_drm_dev_unregister:
- drm_dev_unregister(&udl->drm);
-err_free:
- drm_dev_put(&udl->drm);
- return r;
}
static void udl_usb_disconnect(struct usb_interface *interface)
{
struct drm_device *dev = usb_get_intfdata(interface);
- drm_kms_helper_poll_disable(dev);
+ drm_kms_helper_poll_fini(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
- drm_dev_put(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index e67227c44cc4..b1461f30780b 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -68,7 +68,6 @@ struct udl_device {
/* modeset */
int udl_modeset_init(struct drm_device *dev);
-void udl_modeset_cleanup(struct drm_device *dev);
struct drm_connector *udl_connector_init(struct drm_device *dev);
struct urb *udl_get_urb(struct drm_device *dev);
@@ -77,15 +76,11 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
int udl_init(struct udl_device *udl);
-void udl_fini(struct drm_device *dev);
int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width);
-struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
- size_t size);
-
int udl_drop_usb(struct drm_device *dev);
#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
deleted file mode 100644
index b6e26f98aa0a..000000000000
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ /dev/null
@@ -1,106 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2012 Red Hat
- */
-
-#include <linux/dma-buf.h>
-#include <linux/vmalloc.h>
-
-#include <drm/drm_drv.h>
-#include <drm/drm_gem_shmem_helper.h>
-#include <drm/drm_mode.h>
-#include <drm/drm_prime.h>
-
-#include "udl_drv.h"
-
-/*
- * GEM object funcs
- */
-
-static int udl_gem_object_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- ret = drm_gem_shmem_mmap(obj, vma);
- if (ret)
- return ret;
-
- vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- if (obj->import_attach)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
-
- return 0;
-}
-
-static void *udl_gem_object_vmap(struct drm_gem_object *obj)
-{
- struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
- int ret;
-
- ret = mutex_lock_interruptible(&shmem->vmap_lock);
- if (ret)
- return ERR_PTR(ret);
-
- if (shmem->vmap_use_count++ > 0)
- goto out;
-
- ret = drm_gem_shmem_get_pages(shmem);
- if (ret)
- goto err_zero_use;
-
- if (obj->import_attach)
- shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
- else
- shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
- VM_MAP, PAGE_KERNEL);
-
- if (!shmem->vaddr) {
- DRM_DEBUG_KMS("Failed to vmap pages\n");
- ret = -ENOMEM;
- goto err_put_pages;
- }
-
-out:
- mutex_unlock(&shmem->vmap_lock);
- return shmem->vaddr;
-
-err_put_pages:
- drm_gem_shmem_put_pages(shmem);
-err_zero_use:
- shmem->vmap_use_count = 0;
- mutex_unlock(&shmem->vmap_lock);
- return ERR_PTR(ret);
-}
-
-static const struct drm_gem_object_funcs udl_gem_object_funcs = {
- .free = drm_gem_shmem_free_object,
- .print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
- .unpin = drm_gem_shmem_unpin,
- .get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = udl_gem_object_vmap,
- .vunmap = drm_gem_shmem_vunmap,
- .mmap = udl_gem_object_mmap,
-};
-
-/*
- * Helpers for struct drm_driver
- */
-
-struct drm_gem_object *udl_driver_gem_create_object(struct drm_device *dev,
- size_t size)
-{
- struct drm_gem_shmem_object *shmem;
- struct drm_gem_object *obj;
-
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return NULL;
-
- obj = &shmem->base;
- obj->funcs = &udl_gem_object_funcs;
-
- return obj;
-}
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 538718919916..f5d27f2a5654 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -351,13 +351,3 @@ int udl_drop_usb(struct drm_device *dev)
udl_free_urb_list(dev);
return 0;
}
-
-void udl_fini(struct drm_device *dev)
-{
- struct udl_device *udl = to_udl(dev);
-
- drm_kms_helper_poll_fini(dev);
-
- if (udl->urbs.count)
- udl_free_urb_list(dev);
-}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index d59ebac70b15..fef43f4e3bac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -215,7 +215,7 @@ static char *udl_dummy_render(char *wrptr)
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int retval;
@@ -266,8 +266,8 @@ static int udl_aligned_damage_clip(struct drm_rect *clip, int x, int y,
return 0;
}
-int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
- int width, int height)
+static int udl_handle_damage(struct drm_framebuffer *fb, int x, int y,
+ int width, int height)
{
struct drm_device *dev = fb->dev;
struct dma_buf_attachment *import_attach = fb->obj[0]->import_attach;
@@ -369,7 +369,7 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = plane_state->fb;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
char *buf;
char *wrptr;
@@ -464,11 +464,13 @@ static const struct drm_mode_config_funcs udl_mode_funcs = {
int udl_modeset_init(struct drm_device *dev)
{
size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct drm_connector *connector;
int ret;
- drm_mode_config_init(dev);
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
@@ -482,10 +484,8 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &udl_mode_funcs;
connector = udl_connector_init(dev);
- if (IS_ERR(connector)) {
- ret = PTR_ERR(connector);
- goto err_drm_mode_config_cleanup;
- }
+ if (IS_ERR(connector))
+ return PTR_ERR(connector);
format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
@@ -494,18 +494,9 @@ int udl_modeset_init(struct drm_device *dev)
udl_simple_display_pipe_formats,
format_count, NULL, connector);
if (ret)
- goto err_drm_mode_config_cleanup;
+ return ret;
drm_mode_config_reset(dev);
return 0;
-
-err_drm_mode_config_cleanup:
- drm_mode_config_cleanup(dev);
- return ret;
-}
-
-void udl_modeset_cleanup(struct drm_device *dev)
-{
- drm_mode_config_cleanup(dev);
}
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index edd299ab53d8..8b52cb25877c 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -185,7 +185,7 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
args->offset = bo->node.start << PAGE_SHIFT;
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
@@ -208,7 +208,7 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
}
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return 0;
}
@@ -229,6 +229,6 @@ int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
args->offset = bo->node.start << PAGE_SHIFT;
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 9e953ce64ef7..e76b24bb8828 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -132,7 +132,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
u32 ident0, ident1, ident2, ident3, cores;
int ret, core;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -187,8 +187,8 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused)
(misccfg & V3D_MISCCFG_OVRTMUOUT) != 0);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -219,7 +219,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
int measure_ms = 1000;
int ret;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -245,8 +245,8 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -258,10 +258,10 @@ static const struct drm_info_list v3d_debugfs_list[] = {
{"bo_stats", v3d_debugfs_bo_stats, 0},
};
-int
+void
v3d_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(v3d_debugfs_list,
- ARRAY_SIZE(v3d_debugfs_list),
- minor->debugfs_root, minor);
+ drm_debugfs_create_files(v3d_debugfs_list,
+ ARRAY_SIZE(v3d_debugfs_list),
+ minor->debugfs_root, minor);
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index eaa8e9682373..82a7dfdd14c2 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -25,6 +25,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_managed.h>
#include <uapi/drm/v3d_drm.h>
#include "v3d_drv.h"
@@ -104,7 +105,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
if (args->value != 0)
return -EINVAL;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
@@ -113,8 +114,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
} else {
args->value = V3D_READ(offset);
}
- pm_runtime_mark_last_busy(v3d->dev);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_mark_last_busy(v3d->drm.dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return 0;
}
@@ -234,9 +235,9 @@ static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
struct resource *res =
- platform_get_resource_byname(v3d->pdev, IORESOURCE_MEM, name);
+ platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
- *regs = devm_ioremap_resource(v3d->dev, res);
+ *regs = devm_ioremap_resource(v3d->drm.dev, res);
return PTR_ERR_OR_ZERO(*regs);
}
@@ -250,20 +251,21 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
u32 ident1;
- v3d = kzalloc(sizeof(*v3d), GFP_KERNEL);
- if (!v3d)
- return -ENOMEM;
- v3d->dev = dev;
- v3d->pdev = pdev;
+ v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
+ if (IS_ERR(v3d))
+ return PTR_ERR(v3d);
+
drm = &v3d->drm;
+ platform_set_drvdata(pdev, drm);
+
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
- goto dev_free;
+ return ret;
ret = map_regs(v3d, &v3d->core_regs[0], "core0");
if (ret)
- goto dev_free;
+ return ret;
mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO);
dev->coherent_dma_mask =
@@ -281,45 +283,37 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
ret = PTR_ERR(v3d->reset);
if (ret == -EPROBE_DEFER)
- goto dev_free;
+ return ret;
v3d->reset = NULL;
ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
if (ret) {
dev_err(dev,
"Failed to get reset control or bridge regs\n");
- goto dev_free;
+ return ret;
}
}
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
- goto dev_free;
+ return ret;
}
v3d->mmu_scratch = dma_alloc_wc(dev, 4096, &v3d->mmu_scratch_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->mmu_scratch) {
dev_err(dev, "Failed to allocate MMU scratch page\n");
- ret = -ENOMEM;
- goto dev_free;
+ return -ENOMEM;
}
pm_runtime_use_autosuspend(dev);
pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
- ret = drm_dev_init(&v3d->drm, &v3d_drm_driver, dev);
- if (ret)
- goto dma_free;
-
- platform_set_drvdata(pdev, drm);
- drm->dev_private = v3d;
-
ret = v3d_gem_init(drm);
if (ret)
- goto dev_destroy;
+ goto dma_free;
ret = v3d_irq_init(v3d);
if (ret)
@@ -335,12 +329,8 @@ irq_disable:
v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
-dev_destroy:
- drm_dev_put(drm);
dma_free:
dma_free_wc(dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
-dev_free:
- kfree(v3d);
return ret;
}
@@ -353,9 +343,8 @@ static int v3d_platform_drm_remove(struct platform_device *pdev)
v3d_gem_destroy(drm);
- drm_dev_put(drm);
-
- dma_free_wc(v3d->dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr);
+ dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch,
+ v3d->mmu_scratch_paddr);
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index ac2603334587..8a390738d65b 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -14,7 +14,6 @@
#include "uapi/drm/v3d_drm.h"
struct clk;
-struct device;
struct platform_device;
struct reset_control;
@@ -47,8 +46,6 @@ struct v3d_dev {
int ver;
bool single_irq_line;
- struct device *dev;
- struct platform_device *pdev;
void __iomem *hub_regs;
void __iomem *core_regs[3];
void __iomem *bridge_regs;
@@ -121,7 +118,7 @@ struct v3d_dev {
static inline struct v3d_dev *
to_v3d_dev(struct drm_device *dev)
{
- return (struct v3d_dev *)dev->dev_private;
+ return container_of(dev, struct v3d_dev, drm);
}
static inline bool
@@ -130,6 +127,8 @@ v3d_has_csd(struct v3d_dev *v3d)
return v3d->ver >= 41;
}
+#define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev)
+
/* The per-fd struct, which tracks the MMU mappings. */
struct v3d_file_priv {
struct v3d_dev *v3d;
@@ -316,7 +315,7 @@ struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
/* v3d_debugfs.c */
-int v3d_debugfs_init(struct drm_minor *minor);
+void v3d_debugfs_init(struct drm_minor *minor);
/* v3d_fence.c */
extern const struct dma_fence_ops v3d_fence_ops;
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 549dde83408b..915f8bfdb58c 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -358,7 +358,7 @@ v3d_job_free(struct kref *ref)
for (i = 0; i < job->bo_count; i++) {
if (job->bo[i])
- drm_gem_object_put_unlocked(job->bo[i]);
+ drm_gem_object_put(job->bo[i]);
}
kvfree(job->bo);
@@ -370,8 +370,8 @@ v3d_job_free(struct kref *ref)
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
- pm_runtime_mark_last_busy(job->v3d->dev);
- pm_runtime_put_autosuspend(job->v3d->dev);
+ pm_runtime_mark_last_busy(job->v3d->drm.dev);
+ pm_runtime_put_autosuspend(job->v3d->drm.dev);
kfree(job);
}
@@ -384,7 +384,7 @@ v3d_render_job_free(struct kref *ref)
struct v3d_bo *bo, *save;
list_for_each_entry_safe(bo, save, &job->unref_list, unref_head) {
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
}
v3d_job_free(ref);
@@ -439,7 +439,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
job->v3d = v3d;
job->free = free;
- ret = pm_runtime_get_sync(v3d->dev);
+ ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
return ret;
@@ -458,7 +458,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
return 0;
fail:
xa_destroy(&job->deps);
- pm_runtime_put_autosuspend(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->drm.dev);
return ret;
}
@@ -886,12 +886,12 @@ v3d_gem_init(struct drm_device *dev)
*/
drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
- v3d->pt = dma_alloc_wc(v3d->dev, pt_size,
+ v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
&v3d->pt_paddr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
- dev_err(v3d->dev,
+ dev_err(v3d->drm.dev,
"Failed to allocate page tables. "
"Please ensure you have CMA enabled.\n");
return -ENOMEM;
@@ -903,7 +903,7 @@ v3d_gem_init(struct drm_device *dev)
ret = v3d_sched_init(v3d);
if (ret) {
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt,
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
v3d->pt_paddr);
}
@@ -925,5 +925,6 @@ v3d_gem_destroy(struct drm_device *dev)
drm_mm_takedown(&v3d->mm);
- dma_free_coherent(v3d->dev, 4096 * 1024, (void *)v3d->pt, v3d->pt_paddr);
+ dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
+ v3d->pt_paddr);
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 662e67279a7b..c88686489b88 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -72,7 +72,7 @@ v3d_overflow_mem_work(struct work_struct *work)
V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
}
static irqreturn_t
@@ -128,7 +128,7 @@ v3d_irq(int irq, void *arg)
* always-allowed mode.
*/
if (intsts & V3D_INT_GMPV)
- dev_err(v3d->dev, "GMP violation\n");
+ dev_err(v3d->drm.dev, "GMP violation\n");
/* V3D 4.2 wires the hub and core IRQs together, so if we &
* didn't see the common one then check hub for MMU IRQs.
@@ -189,7 +189,7 @@ v3d_hub_irq(int irq, void *arg)
client = v3d41_axi_ids[axi_id];
}
- dev_err(v3d->dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
+ dev_err(v3d->drm.dev, "MMU error from client %s (%d) at 0x%llx%s%s%s\n",
client, axi_id, (long long)vio_addr,
((intsts & V3D_HUB_INT_MMU_WRV) ?
", write violation" : ""),
@@ -217,16 +217,17 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- irq1 = platform_get_irq(v3d->pdev, 1);
+ irq1 = platform_get_irq(v3d_to_pdev(v3d), 1);
if (irq1 == -EPROBE_DEFER)
return irq1;
if (irq1 > 0) {
- ret = devm_request_irq(v3d->dev, irq1,
+ ret = devm_request_irq(v3d->drm.dev, irq1,
v3d_irq, IRQF_SHARED,
"v3d_core0", v3d);
if (ret)
goto fail;
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ ret = devm_request_irq(v3d->drm.dev,
+ platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_hub_irq, IRQF_SHARED,
"v3d_hub", v3d);
if (ret)
@@ -234,7 +235,8 @@ v3d_irq_init(struct v3d_dev *v3d)
} else {
v3d->single_irq_line = true;
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ ret = devm_request_irq(v3d->drm.dev,
+ platform_get_irq(v3d_to_pdev(v3d), 0),
v3d_irq, IRQF_SHARED,
"v3d", v3d);
if (ret)
@@ -246,7 +248,7 @@ v3d_irq_init(struct v3d_dev *v3d)
fail:
if (ret != -EPROBE_DEFER)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ dev_err(v3d->drm.dev, "IRQ setup failed: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index 395e81d97163..3b81ea28c0bb 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -40,7 +40,7 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret)
- dev_err(v3d->dev, "TLB clear wait idle pre-wait failed\n");
+ dev_err(v3d->drm.dev, "TLB clear wait idle pre-wait failed\n");
V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL) |
V3D_MMU_CTL_TLB_CLEAR);
@@ -52,14 +52,14 @@ static int v3d_mmu_flush_all(struct v3d_dev *v3d)
ret = wait_for(!(V3D_READ(V3D_MMU_CTL) &
V3D_MMU_CTL_TLB_CLEARING), 100);
if (ret) {
- dev_err(v3d->dev, "TLB clear wait idle failed\n");
+ dev_err(v3d->drm.dev, "TLB clear wait idle failed\n");
return ret;
}
ret = wait_for(!(V3D_READ(V3D_MMUC_CONTROL) &
V3D_MMUC_CONTROL_FLUSHING), 100);
if (ret)
- dev_err(v3d->dev, "MMUC flush wait idle failed\n");
+ dev_err(v3d->drm.dev, "MMUC flush wait idle failed\n");
return ret;
}
@@ -109,7 +109,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
- dev_err(v3d->dev, "MMU flush timeout\n");
+ dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
@@ -122,5 +122,5 @@ void v3d_mmu_remove_ptes(struct v3d_bo *bo)
v3d->pt[page] = 0;
if (v3d_mmu_flush_all(v3d))
- dev_err(v3d->dev, "MMU flush timeout\n");
+ dev_err(v3d->drm.dev, "MMU flush timeout\n");
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 8c2df6d95283..0747614a78f0 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -403,7 +403,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_bin");
if (ret) {
- dev_err(v3d->dev, "Failed to create bin scheduler: %d.", ret);
+ dev_err(v3d->drm.dev, "Failed to create bin scheduler: %d.", ret);
return ret;
}
@@ -413,7 +413,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_render");
if (ret) {
- dev_err(v3d->dev, "Failed to create render scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create render scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -425,7 +425,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_tfu");
if (ret) {
- dev_err(v3d->dev, "Failed to create TFU scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create TFU scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -438,7 +438,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_csd");
if (ret) {
- dev_err(v3d->dev, "Failed to create CSD scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create CSD scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
@@ -450,7 +450,7 @@ v3d_sched_init(struct v3d_dev *v3d)
msecs_to_jiffies(hang_limit_ms),
"v3d_cache_clean");
if (ret) {
- dev_err(v3d->dev, "Failed to create CACHE_CLEAN scheduler: %d.",
+ dev_err(v3d->drm.dev, "Failed to create CACHE_CLEAN scheduler: %d.",
ret);
v3d_sched_fini(v3d);
return ret;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index ac8f75db2ecd..cf2e3e6a2388 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -17,6 +17,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include "vbox_drv.h"
@@ -45,28 +46,22 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
- if (!vbox)
- return -ENOMEM;
-
- ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
- if (ret) {
- kfree(vbox);
- return ret;
- }
+ vbox = devm_drm_dev_alloc(&pdev->dev, &driver,
+ struct vbox_private, ddev);
+ if (IS_ERR(vbox))
+ return PTR_ERR(vbox);
vbox->ddev.pdev = pdev;
- vbox->ddev.dev_private = vbox;
pci_set_drvdata(pdev, vbox);
mutex_init(&vbox->hw_mutex);
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
- goto err_dev_put;
+ return ret;
ret = vbox_hw_init(vbox);
if (ret)
- goto err_pci_disable;
+ return ret;
ret = vbox_mm_init(vbox);
if (ret)
@@ -80,14 +75,12 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto err_mode_fini;
- ret = drm_fbdev_generic_setup(&vbox->ddev, 32);
- if (ret)
- goto err_irq_fini;
-
ret = drm_dev_register(&vbox->ddev, 0);
if (ret)
goto err_irq_fini;
+ drm_fbdev_generic_setup(&vbox->ddev, 32);
+
return 0;
err_irq_fini:
@@ -98,10 +91,6 @@ err_mm_fini:
vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
-err_pci_disable:
- pci_disable_device(pdev);
-err_dev_put:
- drm_dev_put(&vbox->ddev);
return ret;
}
@@ -114,7 +103,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
vbox_mode_fini(vbox);
vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
- drm_dev_put(&vbox->ddev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 87421903816c..ac7c2effc46f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -127,6 +127,7 @@ struct vbox_encoder {
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_dev(x) container_of(x, struct vbox_private, ddev)
bool vbox_check_supported(u16 id);
int vbox_hw_init(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
index 16a1e29f5292..631657fa554f 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_irq.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -34,7 +34,7 @@ void vbox_report_hotplug(struct vbox_private *vbox)
irqreturn_t vbox_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *)arg;
- struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(dev);
u32 host_flags = vbox_get_flags(vbox);
if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
index 9dcab115a261..d68d9bad7674 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_main.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -71,8 +71,6 @@ static void vbox_accel_fini(struct vbox_private *vbox)
for (i = 0; i < vbox->num_crtcs; ++i)
vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
-
- pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
}
/* Do we support the 4.3 plus mode hint reporting interface? */
@@ -123,21 +121,22 @@ int vbox_hw_init(struct vbox_private *vbox)
return -ENOMEM;
/* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
- vbox->guest_pool = gen_pool_create(4, -1);
+ vbox->guest_pool = devm_gen_pool_create(vbox->ddev.dev, 4, -1,
+ "vboxvideo-accel");
if (!vbox->guest_pool)
- goto err_unmap_guest_heap;
+ return -ENOMEM;
ret = gen_pool_add_virt(vbox->guest_pool,
(unsigned long)vbox->guest_heap,
GUEST_HEAP_OFFSET(vbox),
GUEST_HEAP_USABLE_SIZE, -1);
if (ret)
- goto err_destroy_guest_pool;
+ return ret;
ret = hgsmi_test_query_conf(vbox->guest_pool);
if (ret) {
DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
- goto err_destroy_guest_pool;
+ return ret;
}
/* Reduce available VRAM size to reflect the guest heap. */
@@ -149,33 +148,23 @@ int vbox_hw_init(struct vbox_private *vbox)
if (!have_hgsmi_mode_hints(vbox)) {
ret = -ENOTSUPP;
- goto err_destroy_guest_pool;
+ return ret;
}
vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
sizeof(struct vbva_modehint),
GFP_KERNEL);
- if (!vbox->last_mode_hints) {
- ret = -ENOMEM;
- goto err_destroy_guest_pool;
- }
+ if (!vbox->last_mode_hints)
+ return -ENOMEM;
ret = vbox_accel_init(vbox);
if (ret)
- goto err_destroy_guest_pool;
+ return ret;
return 0;
-
-err_destroy_guest_pool:
- gen_pool_destroy(vbox->guest_pool);
-err_unmap_guest_heap:
- pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
- return ret;
}
void vbox_hw_fini(struct vbox_private *vbox)
{
vbox_accel_fini(vbox);
- gen_pool_destroy(vbox->guest_pool);
- pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 0883a435e62b..d9a5af62af89 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -36,7 +36,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
u16 flags;
s32 x_offset, y_offset;
- vbox = crtc->dev->dev_private;
+ vbox = to_vbox_dev(crtc->dev);
width = vbox_crtc->width ? vbox_crtc->width : 640;
height = vbox_crtc->height ? vbox_crtc->height : 480;
bpp = fb ? fb->format->cpp[0] * 8 : 32;
@@ -77,7 +77,7 @@ static void vbox_do_modeset(struct drm_crtc *crtc)
static int vbox_set_view(struct drm_crtc *crtc)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(crtc->dev);
struct vbva_infoview *p;
/*
@@ -174,7 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
int x, int y)
{
struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]);
- struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(crtc->dev);
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
@@ -272,7 +272,7 @@ static void vbox_primary_atomic_update(struct drm_plane *plane,
{
struct drm_crtc *crtc = plane->state->crtc;
struct drm_framebuffer *fb = plane->state->fb;
- struct vbox_private *vbox = fb->dev->dev_private;
+ struct vbox_private *vbox = to_vbox_dev(fb->dev);
struct drm_mode_rect *clips;
uint32_t num_clips, i;
@@ -704,7 +704,7 @@ static int vbox_get_modes(struct drm_connector *connector)
int preferred_width, preferred_height;
vbox_connector = to_vbox_connector(connector);
- vbox = connector->dev->dev_private;
+ vbox = to_vbox_dev(connector->dev);
hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
HOST_FLAGS_OFFSET);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index 976423d0c3cc..f5a06675da43 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -24,25 +24,13 @@ int vbox_mm_init(struct vbox_private *vbox)
return ret;
}
-#ifdef DRM_MTRR_WC
- vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
- pci_resource_len(dev->pdev, 0),
- DRM_MTRR_WC);
-#else
vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
-#endif
return 0;
}
void vbox_mm_fini(struct vbox_private *vbox)
{
-#ifdef DRM_MTRR_WC
- drm_mtrr_del(vbox->fb_mtrr,
- pci_resource_start(vbox->ddev.pdev, 0),
- pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
-#else
arch_phys_wc_del(vbox->fb_mtrr);
-#endif
drm_vram_helper_release_mm(&vbox->ddev);
}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 72d30d90b856..74ceebd62fbc 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -490,7 +490,7 @@ int vc4_dumb_create(struct drm_file *file_priv,
bo->madv = VC4_MADV_WILLNEED;
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
@@ -834,7 +834,7 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
bo->madv = VC4_MADV_WILLNEED;
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
@@ -854,7 +854,7 @@ int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
/* The mmap offset was set up at BO allocation time. */
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return 0;
}
@@ -918,7 +918,7 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
fail:
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
return ret;
}
@@ -965,7 +965,7 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
bo = to_vc4_bo(gem_obj);
bo->t_format = t_format;
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return 0;
}
@@ -1000,7 +1000,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
else
args->modifier = DRM_FORMAT_MOD_NONE;
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return 0;
}
@@ -1091,7 +1091,7 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
ret = -ENOMEM;
mutex_unlock(&vc4->bo_lock);
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 1208258ad3b2..29131409a4de 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -44,26 +44,7 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-struct vc4_crtc_state {
- struct drm_crtc_state base;
- /* Dlist area for this CRTC configuration. */
- struct drm_mm_node mm;
- bool feed_txp;
- bool txp_armed;
-
- struct {
- unsigned int left;
- unsigned int right;
- unsigned int top;
- unsigned int bottom;
- } margins;
-};
-
-static inline struct vc4_crtc_state *
-to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
-{
- return (struct vc4_crtc_state *)crtc_state;
-}
+#define HVS_FIFO_LATENCY_PIX 6
#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
@@ -252,18 +233,17 @@ vc4_crtc_update_gamma_lut(struct drm_crtc *crtc)
static u32 vc4_get_fifo_full_level(u32 format)
{
static const u32 fifo_len_bytes = 64;
- static const u32 hvs_latency_pix = 6;
switch (format) {
case PV_CONTROL_FORMAT_DSIV_16:
case PV_CONTROL_FORMAT_DSIC_16:
- return fifo_len_bytes - 2 * hvs_latency_pix;
+ return fifo_len_bytes - 2 * HVS_FIFO_LATENCY_PIX;
case PV_CONTROL_FORMAT_DSIV_18:
return fifo_len_bytes - 14;
case PV_CONTROL_FORMAT_24:
case PV_CONTROL_FORMAT_DSIV_24:
default:
- return fifo_len_bytes - 3 * hvs_latency_pix;
+ return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
}
}
@@ -364,7 +344,8 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc)
(is_dsi ? PV_VCONTROL_DSI : 0));
}
- CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
+ if (is_dsi)
+ CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
CRTC_WRITE(PV_CONTROL,
VC4_SET_FIELD(format, PV_CONTROL_FORMAT) |
@@ -1044,7 +1025,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.get_scanout_position = vc4_crtc_get_scanout_position,
};
-static const struct vc4_crtc_data pv0_data = {
+static const struct vc4_crtc_data bcm2835_pv0_data = {
.hvs_channel = 0,
.debugfs_name = "crtc0_regs",
.encoder_types = {
@@ -1053,7 +1034,7 @@ static const struct vc4_crtc_data pv0_data = {
},
};
-static const struct vc4_crtc_data pv1_data = {
+static const struct vc4_crtc_data bcm2835_pv1_data = {
.hvs_channel = 2,
.debugfs_name = "crtc1_regs",
.encoder_types = {
@@ -1062,7 +1043,7 @@ static const struct vc4_crtc_data pv1_data = {
},
};
-static const struct vc4_crtc_data pv2_data = {
+static const struct vc4_crtc_data bcm2835_pv2_data = {
.hvs_channel = 1,
.debugfs_name = "crtc2_regs",
.encoder_types = {
@@ -1072,9 +1053,9 @@ static const struct vc4_crtc_data pv2_data = {
};
static const struct of_device_id vc4_crtc_dt_match[] = {
- { .compatible = "brcm,bcm2835-pixelvalve0", .data = &pv0_data },
- { .compatible = "brcm,bcm2835-pixelvalve1", .data = &pv1_data },
- { .compatible = "brcm,bcm2835-pixelvalve2", .data = &pv2_data },
+ { .compatible = "brcm,bcm2835-pixelvalve0", .data = &bcm2835_pv0_data },
+ { .compatible = "brcm,bcm2835-pixelvalve1", .data = &bcm2835_pv1_data },
+ { .compatible = "brcm,bcm2835-pixelvalve2", .data = &bcm2835_pv2_data },
{}
};
@@ -1128,10 +1109,10 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = dev_get_drvdata(master);
+ const struct vc4_crtc_data *pv_data;
struct vc4_crtc *vc4_crtc;
struct drm_crtc *crtc;
- struct drm_plane *primary_plane, *cursor_plane, *destroy_plane, *temp;
- const struct of_device_id *match;
+ struct drm_plane *primary_plane, *destroy_plane, *temp;
int ret, i;
vc4_crtc = devm_kzalloc(dev, sizeof(*vc4_crtc), GFP_KERNEL);
@@ -1139,10 +1120,10 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
crtc = &vc4_crtc->base;
- match = of_match_device(vc4_crtc_dt_match, dev);
- if (!match)
+ pv_data = of_device_get_match_data(dev);
+ if (!pv_data)
return -ENODEV;
- vc4_crtc->data = match->data;
+ vc4_crtc->data = pv_data;
vc4_crtc->pdev = pdev;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
@@ -1178,35 +1159,6 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
*/
drm_crtc_enable_color_mgmt(crtc, 0, true, crtc->gamma_size);
- /* Set up some arbitrary number of planes. We're not limited
- * by a set number of physical registers, just the space in
- * the HVS (16k) and how small an plane can be (28 bytes).
- * However, each plane we set up takes up some memory, and
- * increases the cost of looping over planes, which atomic
- * modesetting does quite a bit. As a result, we pick a
- * modest number of planes to expose, that should hopefully
- * still cover any sane usecase.
- */
- for (i = 0; i < 8; i++) {
- struct drm_plane *plane =
- vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
-
- if (IS_ERR(plane))
- continue;
-
- plane->possible_crtcs = drm_crtc_mask(crtc);
- }
-
- /* Set up the legacy cursor after overlay initialization,
- * since we overlay planes on the CRTC in the order they were
- * initialized.
- */
- cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
- if (!IS_ERR(cursor_plane)) {
- cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
- crtc->cursor = cursor_plane;
- }
-
vc4_crtc_get_cob_allocation(vc4_crtc);
CRTC_WRITE(PV_INTEN, 0);
@@ -1226,7 +1178,7 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, vc4_crtc);
- vc4_debugfs_add_regset32(drm, vc4_crtc->data->debugfs_name,
+ vc4_debugfs_add_regset32(drm, pv_data->debugfs_name,
&vc4_crtc->regset);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index b61b2d3407b5..4fbbf980a299 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -20,7 +20,7 @@ struct vc4_debugfs_info_entry {
* Called at drm_dev_register() time on each of the minors registered
* by the DRM device, to attach the debugfs files.
*/
-int
+void
vc4_debugfs_init(struct drm_minor *minor)
{
struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
@@ -30,14 +30,9 @@ vc4_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, &vc4->load_tracker_enabled);
list_for_each_entry(entry, &vc4->debugfs_list, link) {
- int ret = drm_debugfs_create_files(&entry->info, 1,
- minor->debugfs_root, minor);
-
- if (ret)
- return ret;
+ drm_debugfs_create_files(&entry->info, 1,
+ minor->debugfs_root, minor);
}
-
- return 0;
}
static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 6dfede03396e..a90f2545baee 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -17,6 +17,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
@@ -114,10 +115,6 @@ static const struct debugfs_reg32 dpi_regs[] = {
VC4_REG32(DPI_ID),
};
-static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void vc4_dpi_encoder_disable(struct drm_encoder *encoder)
{
struct vc4_dpi_encoder *vc4_encoder = to_vc4_dpi_encoder(encoder);
@@ -309,8 +306,7 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
if (ret)
DRM_ERROR("Failed to turn on core clock: %d\n", ret);
- drm_encoder_init(drm, dpi->encoder, &vc4_dpi_encoder_funcs,
- DRM_MODE_ENCODER_DPI, NULL);
+ drm_simple_encoder_init(drm, dpi->encoder, DRM_MODE_ENCODER_DPI);
drm_encoder_helper_add(dpi->encoder, &vc4_dpi_encoder_helper_funcs);
ret = vc4_dpi_init_bridge(dpi);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 76f93b662766..7792c97d4303 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -288,6 +288,10 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
goto gem_destroy;
+ ret = vc4_plane_create_additional_planes(drm);
+ if (ret)
+ goto unbind_all;
+
drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
ret = vc4_kms_load(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 139d25a8328e..9866d61bfa88 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -2,6 +2,8 @@
/*
* Copyright (C) 2015 Broadcom
*/
+#ifndef _VC4_DRV_H_
+#define _VC4_DRV_H_
#include <linux/delay.h>
#include <linux/refcount.h>
@@ -475,6 +477,27 @@ to_vc4_crtc(struct drm_crtc *crtc)
return (struct vc4_crtc *)crtc;
}
+struct vc4_crtc_state {
+ struct drm_crtc_state base;
+ /* Dlist area for this CRTC configuration. */
+ struct drm_mm_node mm;
+ bool feed_txp;
+ bool txp_armed;
+
+ struct {
+ unsigned int left;
+ unsigned int right;
+ unsigned int top;
+ unsigned int bottom;
+ } margins;
+};
+
+static inline struct vc4_crtc_state *
+to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
+{
+ return (struct vc4_crtc_state *)crtc_state;
+}
+
#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
@@ -759,7 +782,7 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */
-int vc4_debugfs_init(struct drm_minor *minor);
+void vc4_debugfs_init(struct drm_minor *minor);
#ifdef CONFIG_DEBUG_FS
void vc4_debugfs_add_file(struct drm_device *drm,
const char *filename,
@@ -844,6 +867,7 @@ int vc4_kms_load(struct drm_device *dev);
/* vc4_plane.c */
struct drm_plane *vc4_plane_init(struct drm_device *dev,
enum drm_plane_type type);
+int vc4_plane_create_additional_planes(struct drm_device *dev);
u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
void vc4_plane_async_set_fb(struct drm_plane *plane,
@@ -897,3 +921,5 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+#endif /* _VC4_DRV_H_ */
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index d99b1d526651..eaf276978ee7 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -37,6 +37,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -652,15 +653,6 @@ static const struct debugfs_reg32 dsi1_regs[] = {
VC4_REG32(DSI1_ID),
};
-static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_dsi_encoder_funcs = {
- .destroy = vc4_dsi_encoder_destroy,
-};
-
static void vc4_dsi_latch_ulps(struct vc4_dsi *dsi, bool latch)
{
u32 afec0 = DSI_PORT_READ(PHY_AFEC0);
@@ -1615,8 +1607,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (dsi->port == 1)
vc4->dsi1 = dsi;
- drm_encoder_init(drm, dsi->encoder, &vc4_dsi_encoder_funcs,
- DRM_MODE_ENCODER_DSI, NULL);
+ drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
@@ -1656,7 +1647,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
* normally.
*/
list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain);
- vc4_dsi_encoder_destroy(dsi->encoder);
+ drm_encoder_cleanup(dsi->encoder);
if (dsi->port == 1)
vc4->dsi1 = NULL;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index e1cfc3ccd05a..9f01ddd5b932 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -58,7 +58,7 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
unsigned int i;
for (i = 0; i < state->user_state.bo_count; i++)
- drm_gem_object_put_unlocked(state->bo[i]);
+ drm_gem_object_put(state->bo[i]);
kfree(state);
}
@@ -808,7 +808,7 @@ fail_dec_usecnt:
fail_put_bo:
/* Release any reference to acquired objects. */
for (i = 0; i < exec->bo_count && exec->bo[i]; i++)
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put(&exec->bo[i]->base);
fail:
kvfree(handles);
@@ -957,7 +957,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
vc4_bo_dec_usecnt(bo);
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put(&exec->bo[i]->base);
}
kvfree(exec->bo);
}
@@ -966,7 +966,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
struct vc4_bo *bo = list_first_entry(&exec->unref_list,
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
}
/* Free up the allocation of any bin slots we used. */
@@ -1107,7 +1107,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
ret = vc4_wait_for_seqno_ioctl_helper(dev, bo->seqno,
&args->timeout_ns);
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return ret;
}
@@ -1301,7 +1301,7 @@ vc4_gem_destroy(struct drm_device *dev)
* the overflow allocation registers. Now free the object.
*/
if (vc4->bin_bo) {
- drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
+ drm_gem_object_put(&vc4->bin_bo->base.base);
vc4->bin_bo = NULL;
}
@@ -1382,7 +1382,7 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
ret = 0;
out_put_gem:
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
return ret;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 340719238753..15a11cd4de25 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/i2c.h>
@@ -306,15 +307,6 @@ static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
return connector;
}
-static void vc4_hdmi_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs vc4_hdmi_encoder_funcs = {
- .destroy = vc4_hdmi_encoder_destroy,
-};
-
static int vc4_hdmi_stop_packet(struct drm_encoder *encoder,
enum hdmi_infoframe_type type)
{
@@ -1338,8 +1330,10 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
- DRM_ERROR("Failed to get pixel clock\n");
- return PTR_ERR(hdmi->pixel_clock);
+ ret = PTR_ERR(hdmi->pixel_clock);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get pixel clock\n");
+ return ret;
}
hdmi->hsm_clock = devm_clk_get(dev, "hdmi");
if (IS_ERR(hdmi->hsm_clock)) {
@@ -1406,8 +1400,7 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
}
pm_runtime_enable(dev);
- drm_encoder_init(drm, hdmi->encoder, &vc4_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, hdmi->encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(hdmi->encoder, &vc4_hdmi_encoder_helper_funcs);
hdmi->connector =
@@ -1465,7 +1458,7 @@ err_destroy_conn:
vc4_hdmi_connector_destroy(hdmi->connector);
#endif
err_destroy_encoder:
- vc4_hdmi_encoder_destroy(hdmi->encoder);
+ drm_encoder_cleanup(hdmi->encoder);
err_unprepare_hsm:
clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
@@ -1484,7 +1477,7 @@ static void vc4_hdmi_unbind(struct device *dev, struct device *master,
cec_unregister_adapter(hdmi->cec_adap);
vc4_hdmi_connector_destroy(hdmi->connector);
- vc4_hdmi_encoder_destroy(hdmi->encoder);
+ drm_encoder_cleanup(hdmi->encoder);
clk_disable_unprepare(hdmi->hsm_clock);
pm_runtime_disable(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 78d4fb0499e3..08318e69061b 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -330,7 +330,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev,
mode_cmd_local.modifier[0] = DRM_FORMAT_MOD_NONE;
}
- drm_gem_object_put_unlocked(gem_obj);
+ drm_gem_object_put(gem_obj);
mode_cmd = &mode_cmd_local;
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 91e408f7a56e..d040d9f12c6d 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -1267,3 +1267,44 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev,
return plane;
}
+
+int vc4_plane_create_additional_planes(struct drm_device *drm)
+{
+ struct drm_plane *cursor_plane;
+ struct drm_crtc *crtc;
+ unsigned int i;
+
+ /* Set up some arbitrary number of planes. We're not limited
+ * by a set number of physical registers, just the space in
+ * the HVS (16k) and how small an plane can be (28 bytes).
+ * However, each plane we set up takes up some memory, and
+ * increases the cost of looping over planes, which atomic
+ * modesetting does quite a bit. As a result, we pick a
+ * modest number of planes to expose, that should hopefully
+ * still cover any sane usecase.
+ */
+ for (i = 0; i < 8; i++) {
+ struct drm_plane *plane =
+ vc4_plane_init(drm, DRM_PLANE_TYPE_OVERLAY);
+
+ if (IS_ERR(plane))
+ continue;
+
+ plane->possible_crtcs =
+ GENMASK(drm->mode_config.num_crtc - 1, 0);
+ }
+
+ drm_for_each_crtc(crtc, drm) {
+ /* Set up the legacy cursor after overlay initialization,
+ * since we overlay planes on the CRTC in the order they were
+ * initialized.
+ */
+ cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
+ if (!IS_ERR(cursor_plane)) {
+ cursor_plane->possible_crtcs = drm_crtc_mask(crtc);
+ crtc->cursor = cursor_plane;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index cea77a21b205..f7ab979721b3 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -308,7 +308,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
struct vc4_bo, unref_head);
list_del(&bo->unref_head);
- drm_gem_object_put_unlocked(&bo->base.base);
+ drm_gem_object_put(&bo->base.base);
}
return ret;
@@ -344,7 +344,7 @@ static void bin_bo_release(struct kref *ref)
if (WARN_ON_ONCE(!vc4->bin_bo))
return;
- drm_gem_object_put_unlocked(&vc4->bin_bo->base.base);
+ drm_gem_object_put(&vc4->bin_bo->base.base);
vc4->bin_bo = NULL;
}
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 7402bc768664..bd5b8eb58b18 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -17,6 +17,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/of_graph.h>
@@ -374,10 +375,6 @@ static struct drm_connector *vc4_vec_connector_init(struct drm_device *dev,
return connector;
}
-static const struct drm_encoder_funcs vc4_vec_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static void vc4_vec_encoder_disable(struct drm_encoder *encoder)
{
struct vc4_vec_encoder *vc4_vec_encoder = to_vc4_vec_encoder(encoder);
@@ -566,8 +563,7 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
pm_runtime_enable(dev);
- drm_encoder_init(drm, vec->encoder, &vc4_vec_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_simple_encoder_init(drm, vec->encoder, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(vec->encoder, &vc4_vec_encoder_helper_funcs);
vec->connector = vc4_vec_connector_init(drm, vec);
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 909eba43664a..e4dc7b267a0b 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -39,6 +39,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
#include "vgem_drv.h"
@@ -197,7 +198,7 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
ret = drm_gem_handle_create(file, &obj->base, handle);
if (ret) {
- drm_gem_object_put_unlocked(&obj->base);
+ drm_gem_object_put(&obj->base);
return ERR_PTR(ret);
}
@@ -222,7 +223,7 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- drm_gem_object_put_unlocked(gem_object);
+ drm_gem_object_put(gem_object);
DRM_DEBUG("Created object of size %llu\n", args->size);
@@ -250,7 +251,7 @@ static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
*offset = drm_vma_node_offset_addr(&obj->vma_node);
unref:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
@@ -431,9 +432,6 @@ static void vgem_release(struct drm_device *dev)
struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
platform_device_unregister(vgem->platform);
- drm_dev_fini(&vgem->drm);
-
- kfree(vgem);
}
static struct drm_driver vgem_driver = {
@@ -489,16 +487,19 @@ static int __init vgem_init(void)
&vgem_device->platform->dev);
if (ret)
goto out_unregister;
+ drmm_add_final_kfree(&vgem_device->drm, vgem_device);
/* Final step: expose the device/driver to userspace */
- ret = drm_dev_register(&vgem_device->drm, 0);
+ ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_fini;
+ goto out_put;
return 0;
-out_fini:
- drm_dev_fini(&vgem_device->drm);
+out_put:
+ drm_dev_put(&vgem_device->drm);
+ return ret;
+
out_unregister:
platform_device_unregister(vgem_device->platform);
out_free:
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index 9268f6fc3f66..17f32f550dd9 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -182,7 +182,7 @@ err_fence:
dma_fence_put(fence);
}
err:
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index e27120d512b0..3221520f61f0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -72,11 +72,10 @@ static struct drm_info_list virtio_gpu_debugfs_list[] = {
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
-int
+void
virtio_gpu_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(virtio_gpu_debugfs_list,
VIRTIO_GPU_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
- return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 2b7e6ae65546..f3ce49c5a34c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,6 +30,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "virtgpu_drv.h"
@@ -240,10 +241,6 @@ static const struct drm_connector_funcs virtio_gpu_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs virtio_gpu_enc_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
{
struct drm_device *dev = vgdev->ddev;
@@ -276,8 +273,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
if (vgdev->has_edid)
drm_connector_attach_edid_property(connector);
- drm_encoder_init(dev, encoder, &virtio_gpu_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs);
encoder->possible_crtcs = 1 << index;
@@ -311,7 +307,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
if (ret) {
kfree(virtio_gpu_fb);
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index c1824bdf2418..9ff9f4ac0522 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -218,26 +218,19 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
-/* virtio_kms.c */
+/* virtgpu_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
-/* virtio_gem.c */
-void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj);
-int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
-void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p);
+/* virtgpu_gem.c */
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
@@ -263,7 +256,7 @@ void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs);
void virtio_gpu_array_put_free_work(struct work_struct *work);
-/* virtio vg */
+/* virtgpu_vq.c */
int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
@@ -287,10 +280,10 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
uint32_t x, uint32_t y);
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_mem_entry *ents,
- unsigned int nents);
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -343,17 +336,17 @@ void virtio_gpu_dequeue_fence_func(struct work_struct *work);
void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
-/* virtio_gpu_display.c */
+/* virtgpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
-/* virtio_gpu_plane.c */
+/* virtgpu_plane.c */
uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc);
struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
enum drm_plane_type type,
int index);
-/* virtio_gpu_fence.c */
+/* virtgpu_fence.c */
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -362,7 +355,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
-/* virtio_gpu_object */
+/* virtgpu_object.c */
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
@@ -378,7 +371,7 @@ struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
-/* virgl debugfs */
-int virtio_gpu_debugfs_init(struct drm_minor *minor);
+/* virtgpu_debugfs.c */
+void virtio_gpu_debugfs_init(struct drm_minor *minor);
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 0d6152c99a27..24ffacac99e4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -28,17 +28,20 @@
#include "virtgpu_drv.h"
-int virtio_gpu_gem_create(struct drm_file *file,
- struct drm_device *dev,
- struct virtio_gpu_object_params *params,
- struct drm_gem_object **obj_p,
- uint32_t *handle_p)
+static int virtio_gpu_gem_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct drm_gem_object **obj_p,
+ uint32_t *handle_p)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
u32 handle;
+ if (vgdev->has_virgl_3d)
+ virtio_gpu_create_context(dev, file);
+
ret = virtio_gpu_object_create(vgdev, params, &obj, NULL);
if (ret < 0)
return ret;
@@ -52,7 +55,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
*obj_p = &obj->base.base;
/* drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(&obj->base.base);
+ drm_gem_object_put(&obj->base.base);
*handle_p = handle;
return 0;
@@ -102,7 +105,7 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv,
if (gobj == NULL)
return -ENOENT;
*offset_p = drm_vma_node_offset_addr(&gobj->vma_node);
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -114,7 +117,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct virtio_gpu_object_array *objs;
if (!vgdev->has_virgl_3d)
- return 0;
+ goto out_notify;
objs = virtio_gpu_array_alloc(1);
if (!objs)
@@ -123,6 +126,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+out_notify:
virtio_gpu_notify(vgdev);
return 0;
}
@@ -236,7 +240,7 @@ void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs)
u32 i;
for (i = 0; i < objs->nents; i++)
- drm_gem_object_put_unlocked(objs->objs[i]);
+ drm_gem_object_put(objs->objs[i]);
virtio_gpu_array_free(objs);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 336cc9143205..7a2430e34e00 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -27,14 +27,14 @@
#include <linux/file.h>
#include <linux/sync_file.h>
+#include <linux/uaccess.h>
#include <drm/drm_file.h>
#include <drm/virtgpu_drm.h>
#include "virtgpu_drv.h"
-static void virtio_gpu_create_context(struct drm_device *dev,
- struct drm_file *file)
+void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
@@ -47,7 +47,6 @@ static void virtio_gpu_create_context(struct drm_device *dev,
get_task_comm(dbgname, current);
virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
strlen(dbgname), dbgname);
- virtio_gpu_notify(vgdev);
vfpriv->context_created = true;
out_unlock:
@@ -279,7 +278,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
drm_gem_object_release(obj);
return ret;
}
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
@@ -301,7 +300,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
ri->size = qobj->base.base.size;
ri->res_handle = qobj->hw_res_handle;
- drm_gem_object_put_unlocked(gobj);
+ drm_gem_object_put(gobj);
return 0;
}
@@ -418,7 +417,7 @@ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
else if (ret > 0)
ret = 0;
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 023a030ca7b9..0a5c8cf409fb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -25,6 +25,7 @@
#include <linux/virtio.h>
#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
#include <drm/drm_file.h>
@@ -52,14 +53,6 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
- uint32_t ctx_id)
-{
- virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
- virtio_gpu_notify(vgdev);
- ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
-}
-
static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq,
void (*work_func)(struct work_struct *work))
{
@@ -274,14 +267,17 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
if (!vgdev->has_virgl_3d)
return;
- vfpriv = file->driver_priv;
+ if (vfpriv->context_created) {
+ virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id);
+ virtio_gpu_notify(vgdev);
+ }
- virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+ ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1);
mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv);
file->driver_priv = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index d9039bb7c5e3..346cef5ce251 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -150,7 +150,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
if (ret < 0)
return -EINVAL;
- shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ shmem->pages = drm_gem_shmem_get_pages_sgt(&bo->base.base);
if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
@@ -235,13 +235,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
return ret;
}
- ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
- if (ret != 0) {
- virtio_gpu_free_object(&shmem_obj->base);
- return ret;
- }
+ virtio_gpu_object_attach(vgdev, bo, ents, nents);
- virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 73854915ec34..9e663a5d9952 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -1087,14 +1087,13 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
-int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj,
- struct virtio_gpu_mem_entry *ents,
- unsigned int nents)
+void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj,
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents, NULL);
- return 0;
}
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 860de052e820..1e8b2169d834 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -21,6 +21,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
@@ -34,7 +35,7 @@
static struct vkms_device *vkms_device;
-bool enable_cursor;
+bool enable_cursor = true;
module_param_named(enable_cursor, enable_cursor, bool, 0444);
MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support");
@@ -63,7 +64,6 @@ static void vkms_release(struct drm_device *dev)
platform_device_unregister(vkms->platform);
drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
- drm_dev_fini(&vkms->drm);
destroy_workqueue(vkms->output.composer_workq);
}
@@ -158,13 +158,14 @@ static int __init vkms_init(void)
&vkms_device->platform->dev);
if (ret)
goto out_unregister;
+ drmm_add_final_kfree(&vkms_device->drm, vkms_device);
ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev,
DMA_BIT_MASK(64));
if (ret) {
DRM_ERROR("Could not initialize DMA support\n");
- goto out_fini;
+ goto out_put;
}
vkms_device->drm.irq_enabled = true;
@@ -172,25 +173,25 @@ static int __init vkms_init(void)
ret = drm_vblank_init(&vkms_device->drm, 1);
if (ret) {
DRM_ERROR("Failed to vblank\n");
- goto out_fini;
+ goto out_put;
}
ret = vkms_modeset_init(vkms_device);
if (ret)
- goto out_fini;
+ goto out_put;
ret = drm_dev_register(&vkms_device->drm, 0);
if (ret)
- goto out_fini;
+ goto out_put;
return 0;
-out_fini:
- drm_dev_fini(&vkms_device->drm);
+out_put:
+ drm_dev_put(&vkms_device->drm);
+ return ret;
out_unregister:
platform_device_unregister(vkms_device->platform);
-
out_free:
kfree(vkms_device);
return ret;
@@ -205,8 +206,6 @@ static void __exit vkms_exit(void)
drm_dev_unregister(&vkms_device->drm);
drm_dev_put(&vkms_device->drm);
-
- kfree(vkms_device);
}
module_init(vkms_init);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index eda04ffba7b1..f4036bb0b9a8 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -117,11 +117,6 @@ struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
enum drm_plane_type type, int index);
/* Gem stuff */
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size);
-
vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 2e01186fb943..a017fc59905e 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -97,10 +97,10 @@ vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
return ret;
}
-struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
- struct drm_file *file,
- u32 *handle,
- u64 size)
+static struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
+ struct drm_file *file,
+ u32 *handle,
+ u64 size)
{
struct vkms_gem_object *obj;
int ret;
@@ -113,7 +113,6 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->gem, handle);
- drm_gem_object_put_unlocked(&obj->gem);
if (ret)
return ERR_PTR(ret);
@@ -142,6 +141,8 @@ int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_obj->size;
args->pitch = pitch;
+ drm_gem_object_put(gem_obj);
+
DRM_DEBUG_DRIVER("Created object of size %lld\n", size);
return 0;
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index fb1941a6522c..85afb77e97f0 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -3,6 +3,7 @@
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
static void vkms_connector_destroy(struct drm_connector *connector)
{
@@ -17,10 +18,6 @@ static const struct drm_connector_funcs vkms_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static const struct drm_encoder_funcs vkms_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int vkms_conn_get_modes(struct drm_connector *connector)
{
int count;
@@ -70,8 +67,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
drm_connector_helper_add(connector, &vkms_conn_helper_funcs);
- ret = drm_encoder_init(dev, encoder, &vkms_encoder_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
if (ret) {
DRM_ERROR("Failed to init encoder\n");
goto err_encoder;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index bb46ca0c458f..1629427d5734 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,7 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+#include <linux/highmem.h>
/*
* Template that implements find_first_diff() for a generic
@@ -374,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- ttm_kunmap_atomic_prot(d->src_addr, d->src_prot);
+ kunmap_atomic(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- ttm_kunmap_atomic_prot(d->dst_addr, d->dst_prot);
+ kunmap_atomic(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,8 +389,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->dst_addr =
- ttm_kmap_atomic_prot(d->dst_pages[dst_page],
- d->dst_prot);
+ kmap_atomic_prot(d->dst_pages[dst_page],
+ d->dst_prot);
if (!d->dst_addr)
return -ENOMEM;
@@ -401,8 +402,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return -EINVAL;
d->src_addr =
- ttm_kmap_atomic_prot(d->src_pages[src_page],
- d->src_prot);
+ kmap_atomic_prot(d->src_pages[src_page],
+ d->src_prot);
if (!d->src_addr)
return -ENOMEM;
@@ -499,9 +500,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
}
out:
if (d.src_addr)
- ttm_kunmap_atomic_prot(d.src_addr, d.src_prot);
+ kunmap_atomic(d.src_addr);
if (d.dst_addr)
- ttm_kunmap_atomic_prot(d.dst_addr, d.dst_prot);
+ kunmap_atomic(d.dst_addr);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index c2247a893ed4..470428387878 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1129,9 +1129,9 @@ static long vmw_compat_ioctl(struct file *filp, unsigned int cmd,
}
#endif
-static int vmw_master_set(struct drm_device *dev,
- struct drm_file *file_priv,
- bool from_open)
+static void vmw_master_set(struct drm_device *dev,
+ struct drm_file *file_priv,
+ bool from_open)
{
/*
* Inform a new master that the layout may have changed while
@@ -1139,8 +1139,6 @@ static int vmw_master_set(struct drm_device *dev,
*/
if (!from_open)
drm_sysfs_hotplug_event(dev);
-
- return 0;
}
static void vmw_master_drop(struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 8cdcd6e5f9e1..3596f3923ea3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -850,7 +850,7 @@ extern void vmw_bo_bo_free(struct ttm_buffer_object *bo);
extern int vmw_bo_init(struct vmw_private *dev_priv,
struct vmw_buffer_object *vmw_bo,
size_t size, struct ttm_placement *placement,
- bool interuptable,
+ bool interruptible,
void (*bo_free)(struct ttm_buffer_object *bo));
extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
struct ttm_object_file *tfile);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 178a6cd1a06f..0f8d29397157 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -515,7 +515,7 @@ bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
struct vmw_fence_manager *fman = fman_from_fence(fence);
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
- return 1;
+ return true;
vmw_fences_update(fman);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 04d66592f605..3c97654b5a43 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2138,7 +2138,6 @@ void vmw_guess_mode_timing(struct drm_display_mode *mode)
mode->vtotal = mode->vsync_end + 50;
mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
- mode->vrefresh = drm_mode_vrefresh(mode);
}
@@ -2212,7 +2211,6 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
- mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_probed_add(connector, mode);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7ef51fa84b01..126f93c0b0b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1651,7 +1651,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev,
struct vmw_surface_metadata *metadata;
struct ttm_base_object *base;
uint32_t backup_handle;
- int ret = -EINVAL;
+ int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 374142018171..3e660fb111b3 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -419,7 +419,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
goto fail_handle;
/* Drop reference from allocate - handle holds it now */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
return 0;
fail_handle:
@@ -427,7 +427,7 @@ fail_handle:
xen_drm_front_dbuf_to_cookie(obj));
fail_backend:
/* drop reference from allocate */
- drm_gem_object_put_unlocked(obj);
+ drm_gem_object_put(obj);
fail:
DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
return ret;
@@ -460,9 +460,6 @@ static void xen_drm_drv_release(struct drm_device *dev)
drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
-
if (front_info->cfg.be_alloc)
xenbus_switch_state(front_info->xb_dev,
XenbusStateInitialising);
@@ -561,6 +558,7 @@ fail_register:
fail_modeset:
drm_kms_helper_poll_fini(drm_dev);
drm_mode_config_cleanup(drm_dev);
+ drm_dev_put(drm_dev);
fail:
kfree(drm_info);
return ret;
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
index 1141c1ed1ed0..31014a451f8b 100644
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ b/drivers/gpu/drm/zte/zx_drm_drv.c
@@ -36,16 +36,7 @@ DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
static struct drm_driver zx_drm_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- .gem_free_object_unlocked = drm_gem_cma_free_object,
- .gem_vm_ops = &drm_gem_cma_vm_ops,
- .dumb_create = drm_gem_cma_dumb_create,
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
- .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
- .gem_prime_vmap = drm_gem_cma_prime_vmap,
- .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
- .gem_prime_mmap = drm_gem_cma_prime_mmap,
+ DRM_GEM_CMA_DRIVER_OPS,
.fops = &zx_drm_fops,
.name = "zx-vou",
.desc = "ZTE VOU Controller DRM",
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
index b98a1420dcd3..76a16d997a23 100644
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ b/drivers/gpu/drm/zte/zx_hdmi.c
@@ -20,6 +20,7 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
+#include <drm/drm_simple_kms_helper.h>
#include <sound/hdmi-codec.h>
@@ -254,10 +255,6 @@ static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
.mode_set = zx_hdmi_encoder_mode_set,
};
-static const struct drm_encoder_funcs zx_hdmi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
{
struct zx_hdmi *hdmi = to_zx_hdmi(connector);
@@ -313,8 +310,7 @@ static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
encoder->possible_crtcs = VOU_CRTC_MASK;
- drm_encoder_init(drm, encoder, &zx_hdmi_encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
index c598b7daf1f1..d8a89ba383bc 100644
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ b/drivers/gpu/drm/zte/zx_tvenc.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "zx_drm_drv.h"
#include "zx_tvenc_regs.h"
@@ -218,10 +219,6 @@ static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
.mode_set = zx_tvenc_encoder_mode_set,
};
-static const struct drm_encoder_funcs zx_tvenc_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
{
struct zx_tvenc *tvenc = to_zx_tvenc(connector);
@@ -285,8 +282,7 @@ static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
*/
encoder->possible_crtcs = BIT(1);
- drm_encoder_init(drm, encoder, &zx_tvenc_encoder_funcs,
- DRM_MODE_ENCODER_TVDAC, NULL);
+ drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
connector->interlace_allowed = true;
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
index c4fa3bbaba78..a7ed7f5ca837 100644
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ b/drivers/gpu/drm/zte/zx_vga.c
@@ -14,6 +14,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "zx_drm_drv.h"
#include "zx_vga_regs.h"
@@ -72,10 +73,6 @@ static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
.disable = zx_vga_encoder_disable,
};
-static const struct drm_encoder_funcs zx_vga_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
static int zx_vga_connector_get_modes(struct drm_connector *connector)
{
struct zx_vga *vga = to_zx_vga(connector);
@@ -154,8 +151,7 @@ static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
encoder->possible_crtcs = VOU_CRTC_MASK;
- ret = drm_encoder_init(drm, encoder, &zx_vga_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
if (ret) {
DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
return ret;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 388bcc2889aa..d24344e91922 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -192,17 +192,55 @@ static void host1x_setup_sid_table(struct host1x *host)
}
}
+static bool host1x_wants_iommu(struct host1x *host1x)
+{
+ /*
+ * If we support addressing a maximum of 32 bits of physical memory
+ * and if the host1x firewall is enabled, there's no need to enable
+ * IOMMU support. This can happen for example on Tegra20, Tegra30
+ * and Tegra114.
+ *
+ * Tegra124 and later can address up to 34 bits of physical memory and
+ * many platforms come equipped with more than 2 GiB of system memory,
+ * which requires crossing the 4 GiB boundary. But there's a catch: on
+ * SoCs before Tegra186 (i.e. Tegra124 and Tegra210), the host1x can
+ * only address up to 32 bits of memory in GATHER opcodes, which means
+ * that command buffers need to either be in the first 2 GiB of system
+ * memory (which could quickly lead to memory exhaustion), or command
+ * buffers need to be treated differently from other buffers (which is
+ * not possible with the current ABI).
+ *
+ * A third option is to use the IOMMU in these cases to make sure all
+ * buffers will be mapped into a 32-bit IOVA space that host1x can
+ * address. This allows all of the system memory to be used and works
+ * within the limitations of the host1x on these SoCs.
+ *
+ * In summary, default to enable IOMMU on Tegra124 and later. For any
+ * of the earlier SoCs, only use the IOMMU for additional safety when
+ * the host1x firewall is disabled.
+ */
+ if (host1x->info->dma_mask <= DMA_BIT_MASK(32)) {
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ return false;
+ }
+
+ return true;
+}
+
static struct iommu_domain *host1x_iommu_attach(struct host1x *host)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(host->dev);
int err;
/*
- * If the host1x firewall is enabled, there's no need to enable IOMMU
- * support. Similarly, if host1x is already attached to an IOMMU (via
- * the DMA API), don't try to attach again.
+ * We may not always want to enable IOMMU support (for example if the
+ * host1x firewall is already enabled and we don't support addressing
+ * more than 32 bits of physical memory), so check for that first.
+ *
+ * Similarly, if host1x is already attached to an IOMMU (via the DMA
+ * API), don't try to attach again.
*/
- if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) || domain)
+ if (!host1x_wants_iommu(host) || domain)
return domain;
host->group = iommu_group_get(host->dev);
@@ -502,6 +540,19 @@ static void __exit tegra_host1x_exit(void)
}
module_exit(tegra_host1x_exit);
+/**
+ * host1x_get_dma_mask() - query the supported DMA mask for host1x
+ * @host1x: host1x instance
+ *
+ * Note that this returns the supported DMA mask for host1x, which can be
+ * different from the applicable DMA mask under certain circumstances.
+ */
+u64 host1x_get_dma_mask(struct host1x *host1x)
+{
+ return host1x->info->dma_mask;
+}
+EXPORT_SYMBOL(host1x_get_dma_mask);
+
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
MODULE_DESCRIPTION("Host1x driver for Tegra products");