summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c268
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c274
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c92
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vid.h82
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h50
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c324
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h28
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h57
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c341
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c307
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h13
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c213
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h81
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c108
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h72
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c8
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c1
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c15
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c1
-rw-r--r--drivers/gpu/drm/drm_prime.c8
-rw-r--r--drivers/gpu/drm/drm_property.c2
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c65
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c102
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c50
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c9
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c427
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c49
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h19
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c29
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c242
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c128
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c58
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h98
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c26
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c828
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c8
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c5
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c11
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h7
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h18
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c26
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c29
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c100
-rw-r--r--drivers/gpu/drm/i915/intel_display.c460
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h27
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c33
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h71
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c49
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c386
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c65
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c10
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c131
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c63
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c88
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c29
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h65
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c286
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c342
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h84
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c150
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c45
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c11
-rw-r--r--drivers/gpu/drm/imx/Kconfig7
-rw-r--r--drivers/gpu/drm/imx/Makefile3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c18
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c64
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c39
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c75
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c138
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c69
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c54
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c572
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c38
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c28
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c70
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_audio.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c81
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c30
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c192
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h21
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c66
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c123
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c172
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h47
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c340
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h6
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c39
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c35
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c186
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h18
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c69
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h12
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c73
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c41
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c47
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c13
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c53
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c115
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c49
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c93
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c90
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/panel/Kconfig23
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c286
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c739
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c56
-rw-r--r--drivers/gpu/drm/panel/panel-sitronix-st7789v.c449
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c1
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig10
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c32
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h8
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h14
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c134
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c143
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c68
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c82
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c100
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c12
-rw-r--r--drivers/gpu/drm/sun4i/Makefile4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c70
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.h8
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c47
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c30
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c36
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c27
-rw-r--r--drivers/gpu/drm/tegra/gem.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c14
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c8
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_prime.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c31
323 files changed, 11021 insertions, 5139 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 262056778f52..6a8129949333 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -32,7 +32,7 @@
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
-#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
#include <linux/hashtable.h>
#include <linux/dma-fence.h>
@@ -122,14 +122,6 @@ extern int amdgpu_param_buf_per_se;
/* max number of IP instances */
#define AMDGPU_MAX_SDMA_INSTANCES 2
-/* max number of VMHUB */
-#define AMDGPU_MAX_VMHUBS 2
-#define AMDGPU_MMHUB 0
-#define AMDGPU_GFXHUB 1
-
-/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
-
/* hard reset data */
#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
@@ -312,12 +304,9 @@ struct amdgpu_gart_funcs {
/* set pte flags based per asic */
uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev,
uint32_t flags);
-};
-
-/* provided by the mc block */
-struct amdgpu_mc_funcs {
/* adjust mc addr in fb for APU case */
u64 (*adjust_mc_addr)(struct amdgpu_device *adev, u64 addr);
+ uint32_t (*get_invalidate_req)(unsigned int vm_id);
};
/* provided by the ih block */
@@ -379,7 +368,10 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va_mapping {
struct list_head list;
- struct interval_tree_node it;
+ struct rb_node rb;
+ uint64_t start;
+ uint64_t last;
+ uint64_t __subtree_last;
uint64_t offset;
uint64_t flags;
};
@@ -579,8 +571,6 @@ struct amdgpu_vmhub {
uint32_t vm_context0_cntl;
uint32_t vm_l2_pro_fault_status;
uint32_t vm_l2_pro_fault_cntl;
- uint32_t (*get_invalidate_req)(unsigned int vm_id);
- uint32_t (*get_vm_protection_bits)(void);
};
/*
@@ -618,7 +608,6 @@ struct amdgpu_mc {
u64 private_aperture_end;
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
- const struct amdgpu_mc_funcs *mc_funcs;
};
/*
@@ -1712,6 +1701,12 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
#define WREG32_FIELD(reg, field, val) \
WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
+ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
+#define WREG32_FIELD15(ip, idx, reg, field, val) \
+ WREG32(SOC15_REG_OFFSET(ip, idx, mm##reg), (RREG32(SOC15_REG_OFFSET(ip, idx, mm##reg)) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+
/*
* BIOS helpers.
*/
@@ -1887,12 +1882,14 @@ void amdgpu_unregister_atpx_handler(void);
bool amdgpu_has_atpx_dgpu_power_cntl(void);
bool amdgpu_is_atpx_hybrid(void);
bool amdgpu_atpx_dgpu_req_power_for_displays(void);
+bool amdgpu_has_atpx(void);
#else
static inline void amdgpu_register_atpx_handler(void) {}
static inline void amdgpu_unregister_atpx_handler(void) {}
static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
+static inline bool amdgpu_has_atpx(void) { return false; }
#endif
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index f52b1bf3d3d9..ad4329922f79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -754,6 +754,35 @@ union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
};
+/*
+ * Return vram width from integrated system info table, if available,
+ * or 0 if not.
+ */
+int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ u16 data_offset, size;
+ union igp_info *igp_info;
+ u8 frev, crev;
+
+ /* get any igp specific overrides */
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)
+ (mode_info->atom_context->bios + data_offset);
+ switch (crev) {
+ case 8:
+ case 9:
+ return igp_info->info_8.ucUMAChannelNumber * 64;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
index 4e0f488487f3..38d0fe32e5cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h
@@ -148,6 +148,8 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev);
+int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev);
+
bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
struct amdgpu_atom_ss *ss,
int id, u32 clock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 0218cea6be4d..a6649874e6ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -237,7 +237,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
- const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;
+ const void __user *uptr = (const void*)(uintptr_t)args->in.bo_info_ptr;
struct drm_amdgpu_bo_list_entry *info;
struct amdgpu_bo_list *list;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 97f661372a1c..ec71b9320561 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -161,7 +161,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
/* get chunks */
- chunk_array_user = (uint64_t __user *)(unsigned long)(cs->in.chunks);
+ chunk_array_user = (uint64_t __user *)(uintptr_t)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
ret = -EFAULT;
@@ -181,7 +181,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;
- chunk_ptr = (void __user *)(unsigned long)chunk_array[i];
+ chunk_ptr = (void __user *)(uintptr_t)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
ret = -EFAULT;
@@ -192,7 +192,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
size = p->chunks[i].length_dw;
- cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
+ cdata = (void __user *)(uintptr_t)user_chunk.chunk_data;
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
@@ -949,7 +949,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
- (m->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("IB va_start+ib_bytes is invalid\n");
return -EINVAL;
}
@@ -960,7 +960,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return r;
}
- offset = ((uint64_t)m->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += chunk_ib->va_start - offset;
r = amdgpu_ib_get(adev, vm, chunk_ib->ib_bytes, ib);
@@ -1242,6 +1242,7 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
continue;
r = dma_fence_wait_timeout(fence, true, timeout);
+ dma_fence_put(fence);
if (r < 0)
return r;
@@ -1339,7 +1340,7 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
if (fences == NULL)
return -ENOMEM;
- fences_user = (void __user *)(unsigned long)(wait->in.fences);
+ fences_user = (void __user *)(uintptr_t)(wait->in.fences);
if (copy_from_user(fences, fences_user,
sizeof(struct drm_amdgpu_fence) * fence_count)) {
r = -EFAULT;
@@ -1388,8 +1389,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
continue;
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
- if (mapping->it.start > addr ||
- addr > mapping->it.last)
+ if (mapping->start > addr ||
+ addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
@@ -1397,8 +1398,8 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
- if (mapping->it.start > addr ||
- addr > mapping->it.last)
+ if (mapping->start > addr ||
+ addr > mapping->last)
continue;
*bo = lobj->bo_va->bo;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 83dda05325b8..483660742f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1040,43 +1040,60 @@ static bool amdgpu_check_pot_argument(int arg)
return (arg & (arg - 1)) == 0;
}
-static void amdgpu_get_block_size(struct amdgpu_device *adev)
-{
- /* from AI, asic starts to support multiple level VMPT */
- if (adev->asic_type >= CHIP_VEGA10) {
- if (amdgpu_vm_block_size != 9)
- dev_warn(adev->dev,
- "Multi-VMPT limits block size to one page!\n");
- amdgpu_vm_block_size = 9;
- return;
- }
+static void amdgpu_check_block_size(struct amdgpu_device *adev)
+{
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, minimum 9 bits in the
* page table and the remaining bits are in the page directory */
- if (amdgpu_vm_block_size == -1) {
-
- /* Total bits covered by PD + PTs */
- unsigned bits = ilog2(amdgpu_vm_size) + 18;
-
- /* Make sure the PD is 4K in size up to 8GB address space.
- Above that split equal between PD and PTs */
- if (amdgpu_vm_size <= 8)
- amdgpu_vm_block_size = bits - 9;
- else
- amdgpu_vm_block_size = (bits + 3) / 2;
+ if (amdgpu_vm_block_size == -1)
+ return;
- } else if (amdgpu_vm_block_size < 9) {
+ if (amdgpu_vm_block_size < 9) {
dev_warn(adev->dev, "VM page table size (%d) too small\n",
amdgpu_vm_block_size);
- amdgpu_vm_block_size = 9;
+ goto def_value;
}
if (amdgpu_vm_block_size > 24 ||
(amdgpu_vm_size * 1024) < (1ull << amdgpu_vm_block_size)) {
dev_warn(adev->dev, "VM page table size (%d) too large\n",
amdgpu_vm_block_size);
- amdgpu_vm_block_size = 9;
+ goto def_value;
}
+
+ return;
+
+def_value:
+ amdgpu_vm_block_size = -1;
+}
+
+static void amdgpu_check_vm_size(struct amdgpu_device *adev)
+{
+ if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
+ dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ if (amdgpu_vm_size < 1) {
+ dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ /*
+ * Max GPUVM size for Cayman, SI, CI VI are 40 bits.
+ */
+ if (amdgpu_vm_size > 1024) {
+ dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
+ amdgpu_vm_size);
+ goto def_value;
+ }
+
+ return;
+
+def_value:
+ amdgpu_vm_size = -1;
}
/**
@@ -1108,28 +1125,9 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev)
}
}
- if (!amdgpu_check_pot_argument(amdgpu_vm_size)) {
- dev_warn(adev->dev, "VM size (%d) must be a power of 2\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
+ amdgpu_check_vm_size(adev);
- if (amdgpu_vm_size < 1) {
- dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
-
- /*
- * Max GPUVM size for Cayman, SI and CI are 40 bits.
- */
- if (amdgpu_vm_size > 1024) {
- dev_warn(adev->dev, "VM size (%d) too large, max is 1TB\n",
- amdgpu_vm_size);
- amdgpu_vm_size = 8;
- }
-
- amdgpu_get_block_size(adev);
+ amdgpu_check_block_size(adev);
if (amdgpu_vram_page_split != -1 && (amdgpu_vram_page_split < 16 ||
!amdgpu_check_pot_argument(amdgpu_vram_page_split))) {
@@ -2249,9 +2247,10 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_resume(adev);
- if (r)
+ if (r) {
DRM_ERROR("amdgpu_resume failed (%d).\n", r);
-
+ return r;
+ }
amdgpu_fence_driver_resume(adev);
if (resume) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index ce15721cadda..96926a221bd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -614,6 +614,12 @@ amdgpu_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 400917fd7486..4e0f7d2d87f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -86,7 +86,7 @@ int amdgpu_runtime_pm = -1;
unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1;
int amdgpu_deep_color = 0;
-int amdgpu_vm_size = 64;
+int amdgpu_vm_size = -1;
int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index f85520d4e711..03a9c5cad222 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -717,7 +717,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
switch (args->op) {
case AMDGPU_GEM_OP_GET_GEM_CREATE_INFO: {
struct drm_amdgpu_gem_create_in info;
- void __user *out = (void __user *)(long)args->value;
+ void __user *out = (void __user *)(uintptr_t)args->value;
info.bo_size = robj->gem_base.size;
info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
@@ -729,6 +729,11 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
+ if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
+ r = -EINVAL;
+ amdgpu_bo_unreserve(robj);
+ break;
+ }
if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 13b487235a8b..a6b7e367a860 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -316,9 +316,10 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev,
return -EINVAL;
if (!adev->irq.client[client_id].sources) {
- adev->irq.client[client_id].sources = kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
- sizeof(struct amdgpu_irq_src),
- GFP_KERNEL);
+ adev->irq.client[client_id].sources =
+ kcalloc(AMDGPU_MAX_IRQ_SRC_ID,
+ sizeof(struct amdgpu_irq_src *),
+ GFP_KERNEL);
if (!adev->irq.client[client_id].sources)
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index dfb029ab3448..832be632478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -36,12 +36,6 @@
#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx(void);
-#else
-static inline bool amdgpu_has_atpx(void) { return false; }
-#endif
-
/**
* amdgpu_driver_unload_kms - Main unload function for KMS.
*
@@ -243,7 +237,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
- void __user *out = (void __user *)(long)info->return_pointer;
+ void __user *out = (void __user *)(uintptr_t)info->return_pointer;
uint32_t size = info->return_size;
struct drm_crtc *crtc;
uint32_t ui32 = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index 7ea3cacf9f9f..38f739fb727b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -31,6 +31,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/mmu_notifier.h>
+#include <linux/interval_tree.h>
#include <drm/drmP.h>
#include <drm/drm.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5aac350b007f..cb89fff863c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -122,20 +122,19 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
unsigned visible_pfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
- unsigned lpfn = 0;
-
- /* This forces a reallocation if the flag wasn't set before */
- if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- lpfn = adev->mc.real_vram_size >> PAGE_SHIFT;
places[c].fpfn = 0;
- places[c].lpfn = lpfn;
+ places[c].lpfn = 0;
places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_VRAM;
+
if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
places[c].lpfn = visible_pfn;
else
places[c].flags |= TTM_PL_FLAG_TOPDOWN;
+
+ if (flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
+ places[c].flags |= TTM_PL_FLAG_CONTIGUOUS;
c++;
}
@@ -651,6 +650,10 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
+ /* A shared bo cannot be migrated to VRAM */
+ if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM))
+ return -EINVAL;
+
if (bo->pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
@@ -928,8 +931,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
/* TODO: figure out how to map scattered VRAM to the CPU */
- if ((offset + size) <= adev->mc.visible_vram_size &&
- (abo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS))
+ if ((offset + size) <= adev->mc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -937,7 +939,6 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
return -EINVAL;
/* hurrah the memory is not visible ! */
- abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM);
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < abo->placement.num_placement; i++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 4731015f6101..ed6e5799016e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -130,7 +130,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
while (*((unsigned int *)psp->fence_buf) != index) {
msleep(1);
- };
+ }
amdgpu_bo_free_kernel(&cmd_buf_bo,
&cmd_buf_mc_addr,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index a87de18160a8..ee9d0f346d75 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -226,8 +226,8 @@ TRACE_EVENT(amdgpu_vm_bo_map,
TP_fast_assign(
__entry->bo = bo_va ? bo_va->bo : NULL;
- __entry->start = mapping->it.start;
- __entry->last = mapping->it.last;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
@@ -250,8 +250,8 @@ TRACE_EVENT(amdgpu_vm_bo_unmap,
TP_fast_assign(
__entry->bo = bo_va->bo;
- __entry->start = mapping->it.start;
- __entry->last = mapping->it.last;
+ __entry->start = mapping->start;
+ __entry->last = mapping->last;
__entry->offset = mapping->offset;
__entry->flags = mapping->flags;
),
@@ -270,8 +270,8 @@ DECLARE_EVENT_CLASS(amdgpu_vm_mapping,
),
TP_fast_assign(
- __entry->soffset = mapping->it.start;
- __entry->eoffset = mapping->it.last + 1;
+ __entry->soffset = mapping->start;
+ __entry->eoffset = mapping->last + 1;
__entry->flags = mapping->flags;
),
TP_printk("soffs=%010llx, eoffs=%010llx, flags=%08x",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 244bb9aacf86..35d53a0d9ba6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -529,40 +529,12 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_
case TTM_PL_TT:
break;
case TTM_PL_VRAM:
- if (mem->start == AMDGPU_BO_INVALID_OFFSET)
- return -EINVAL;
-
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size)
return -EINVAL;
mem->bus.base = adev->mc.aper_base;
mem->bus.is_iomem = true;
-#ifdef __alpha__
- /*
- * Alpha: use bus.addr to hold the ioremap() return,
- * so we can modify bus.base below.
- */
- if (mem->placement & TTM_PL_FLAG_WC)
- mem->bus.addr =
- ioremap_wc(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- else
- mem->bus.addr =
- ioremap_nocache(mem->bus.base + mem->bus.offset,
- mem->bus.size);
- if (!mem->bus.addr)
- return -ENOMEM;
-
- /*
- * Alpha: Use just the bus offset plus
- * the hose/domain memory base for bus.base.
- * It then can be used to build PTEs for VRAM
- * access, as done in ttm_bo_vm_fault().
- */
- mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
- adev->ddev->hose->dense_mem_base;
-#endif
break;
default:
return -EINVAL;
@@ -574,6 +546,18 @@ static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
{
}
+static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ struct drm_mm_node *mm = bo->mem.mm_node;
+ uint64_t size = mm->size;
+ uint64_t offset = page_offset;
+
+ page_offset = do_div(offset, size);
+ mm += offset;
+ return (bo->mem.bus.base >> PAGE_SHIFT) + mm->start + page_offset;
+}
+
/*
* TTM backend functions.
*/
@@ -1089,6 +1073,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.fault_reserve_notify = &amdgpu_bo_fault_reserve_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
.io_mem_free = &amdgpu_ttm_io_mem_free,
+ .io_mem_pfn = amdgpu_ttm_io_mem_pfn,
};
int amdgpu_ttm_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 0b92dd0c1d70..2ca09f111f08 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -741,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
start = amdgpu_bo_gpu_offset(bo);
- end = (mapping->it.last + 1 - mapping->it.start);
+ end = (mapping->last + 1 - mapping->start);
end = end * AMDGPU_GPU_PAGE_SIZE + start;
- addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
start += addr;
amdgpu_set_ib_value(ctx->parser, ctx->ib_idx, ctx->data0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0184197eb000..c853400805d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -595,13 +595,13 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
}
if ((addr + (uint64_t)size) >
- ((uint64_t)mapping->it.last + 1) * AMDGPU_GPU_PAGE_SIZE) {
+ (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n",
addr, lo, hi);
return -EINVAL;
}
- addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE;
+ addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
addr += amdgpu_bo_gpu_offset(bo);
addr -= ((uint64_t)size) * ((uint64_t)index);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ecef35a1fe33..ba8b8ae6234f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -122,9 +122,7 @@ uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_rreg(ring, reg);
- amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
@@ -150,9 +148,7 @@ void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
mutex_lock(&adev->virt.lock_kiq);
amdgpu_ring_alloc(ring, 32);
- amdgpu_ring_emit_hdp_flush(ring);
amdgpu_ring_emit_wreg(ring, reg, v);
- amdgpu_ring_emit_hdp_invalidate(ring);
amdgpu_fence_emit(ring, &f);
amdgpu_ring_commit(ring);
mutex_unlock(&adev->virt.lock_kiq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 0235d7933efd..7ed5302b511a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -26,6 +26,7 @@
* Jerome Glisse
*/
#include <linux/dma-fence-array.h>
+#include <linux/interval_tree_generic.h>
#include <drm/drmP.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -51,6 +52,15 @@
* SI supports 16.
*/
+#define START(node) ((node)->start)
+#define LAST(node) ((node)->last)
+
+INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
+ START, LAST, static, amdgpu_vm_it)
+
+#undef START
+#undef LAST
+
/* Local structure. Encapsulate some VM table update parameters to reduce
* the number of function parameters
*/
@@ -90,13 +100,14 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
if (level == 0)
/* For the root directory */
return adev->vm_manager.max_pfn >>
- (amdgpu_vm_block_size * adev->vm_manager.num_level);
+ (adev->vm_manager.block_size *
+ adev->vm_manager.num_level);
else if (level == adev->vm_manager.num_level)
/* For the page tables on the leaves */
- return AMDGPU_VM_PTE_COUNT;
+ return AMDGPU_VM_PTE_COUNT(adev);
else
/* Everything in between */
- return 1 << amdgpu_vm_block_size;
+ return 1 << adev->vm_manager.block_size;
}
/**
@@ -261,7 +272,7 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
unsigned level)
{
unsigned shift = (adev->vm_manager.num_level - level) *
- amdgpu_vm_block_size;
+ adev->vm_manager.block_size;
unsigned pt_idx, from, to;
int r;
@@ -365,11 +376,19 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr, 0);
}
-static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
- struct amdgpu_vm_id *id)
+/**
+ * amdgpu_vm_had_gpu_reset - check if reset occured since last use
+ *
+ * @adev: amdgpu_device pointer
+ * @id: VMID structure
+ *
+ * Check if GPU reset occured since last use of the VMID.
+ */
+static bool amdgpu_vm_had_gpu_reset(struct amdgpu_device *adev,
+ struct amdgpu_vm_id *id)
{
return id->current_gpu_reset_count !=
- atomic_read(&adev->gpu_reset_counter) ? true : false;
+ atomic_read(&adev->gpu_reset_counter);
}
/**
@@ -455,7 +474,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
/* Check all the prerequisites to using this VMID */
if (!id)
continue;
- if (amdgpu_vm_is_gpu_reset(adev, id))
+ if (amdgpu_vm_had_gpu_reset(adev, id))
continue;
if (atomic64_read(&id->owner) != vm->client_id)
@@ -483,7 +502,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
vm->ids[ring->idx] = id;
@@ -504,9 +522,6 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r)
goto error;
- dma_fence_put(id->first);
- id->first = dma_fence_get(fence);
-
dma_fence_put(id->last_flush);
id->last_flush = NULL;
@@ -557,8 +572,8 @@ static u64 amdgpu_vm_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
u64 addr = mc_addr;
- if (adev->mc.mc_funcs && adev->mc.mc_funcs->adjust_mc_addr)
- addr = adev->mc.mc_funcs->adjust_mc_addr(adev, addr);
+ if (adev->gart.gart_funcs->adjust_mc_addr)
+ addr = adev->gart.gart_funcs->adjust_mc_addr(adev, addr);
return addr;
}
@@ -583,60 +598,62 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
id->gws_size != job->gws_size ||
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
+ bool vm_flush_needed = job->vm_needs_flush ||
+ amdgpu_vm_ring_has_compute_vm_bug(ring);
+ unsigned patch_offset = 0;
int r;
- if (job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_is_gpu_reset(adev, id) ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)) {
- unsigned patch_offset = 0;
+ if (amdgpu_vm_had_gpu_reset(adev, id)) {
+ gds_switch_needed = true;
+ vm_flush_needed = true;
+ }
- if (ring->funcs->init_cond_exec)
- patch_offset = amdgpu_ring_init_cond_exec(ring);
+ if (!vm_flush_needed && !gds_switch_needed)
+ return 0;
- if (ring->funcs->emit_pipeline_sync &&
- (job->vm_needs_flush || gds_switch_needed ||
- amdgpu_vm_ring_has_compute_vm_bug(ring)))
- amdgpu_ring_emit_pipeline_sync(ring);
+ if (ring->funcs->init_cond_exec)
+ patch_offset = amdgpu_ring_init_cond_exec(ring);
- if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
- amdgpu_vm_is_gpu_reset(adev, id))) {
- struct dma_fence *fence;
- u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ if (ring->funcs->emit_pipeline_sync)
+ amdgpu_ring_emit_pipeline_sync(ring);
- trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
- amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
+ if (ring->funcs->emit_vm_flush && vm_flush_needed) {
+ u64 pd_addr = amdgpu_vm_adjust_mc_addr(adev, job->vm_pd_addr);
+ struct dma_fence *fence;
- r = amdgpu_fence_emit(ring, &fence);
- if (r)
- return r;
+ trace_amdgpu_vm_flush(pd_addr, ring->idx, job->vm_id);
+ amdgpu_ring_emit_vm_flush(ring, job->vm_id, pd_addr);
- mutex_lock(&adev->vm_manager.lock);
- dma_fence_put(id->last_flush);
- id->last_flush = fence;
- mutex_unlock(&adev->vm_manager.lock);
- }
+ r = amdgpu_fence_emit(ring, &fence);
+ if (r)
+ return r;
- if (gds_switch_needed) {
- id->gds_base = job->gds_base;
- id->gds_size = job->gds_size;
- id->gws_base = job->gws_base;
- id->gws_size = job->gws_size;
- id->oa_base = job->oa_base;
- id->oa_size = job->oa_size;
- amdgpu_ring_emit_gds_switch(ring, job->vm_id,
- job->gds_base, job->gds_size,
- job->gws_base, job->gws_size,
- job->oa_base, job->oa_size);
- }
+ mutex_lock(&adev->vm_manager.lock);
+ dma_fence_put(id->last_flush);
+ id->last_flush = fence;
+ mutex_unlock(&adev->vm_manager.lock);
+ }
- if (ring->funcs->patch_cond_exec)
- amdgpu_ring_patch_cond_exec(ring, patch_offset);
+ if (gds_switch_needed) {
+ id->gds_base = job->gds_base;
+ id->gds_size = job->gds_size;
+ id->gws_base = job->gws_base;
+ id->gws_size = job->gws_size;
+ id->oa_base = job->oa_base;
+ id->oa_size = job->oa_size;
+ amdgpu_ring_emit_gds_switch(ring, job->vm_id, job->gds_base,
+ job->gds_size, job->gws_base,
+ job->gws_size, job->oa_base,
+ job->oa_size);
+ }
- /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
- if (ring->funcs->emit_switch_buffer) {
- amdgpu_ring_emit_switch_buffer(ring);
- amdgpu_ring_emit_switch_buffer(ring);
- }
+ if (ring->funcs->patch_cond_exec)
+ amdgpu_ring_patch_cond_exec(ring, patch_offset);
+
+ /* the double SWITCH_BUFFER here *cannot* be skipped by COND_EXEC */
+ if (ring->funcs->emit_switch_buffer) {
+ amdgpu_ring_emit_switch_buffer(ring);
+ amdgpu_ring_emit_switch_buffer(ring);
}
return 0;
}
@@ -960,7 +977,7 @@ static struct amdgpu_bo *amdgpu_vm_get_pt(struct amdgpu_pte_update_params *p,
unsigned idx, level = p->adev->vm_manager.num_level;
while (entry->entries) {
- idx = addr >> (amdgpu_vm_block_size * level--);
+ idx = addr >> (p->adev->vm_manager.block_size * level--);
idx %= amdgpu_bo_size(entry->bo) / 8;
entry = &entry->entries[idx];
}
@@ -987,7 +1004,8 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
- const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
+ struct amdgpu_device *adev = params->adev;
+ const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
uint64_t cur_pe_start, cur_nptes, cur_dst;
uint64_t addr; /* next GPU address to be updated */
@@ -1011,7 +1029,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+ nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
cur_pe_start = amdgpu_bo_gpu_offset(pt);
cur_pe_start += (addr & mask) * 8;
@@ -1039,7 +1057,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
- nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
+ nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
next_pe_start = amdgpu_bo_gpu_offset(pt);
next_pe_start += (addr & mask) * 8;
@@ -1186,7 +1204,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
* reserve space for one command every (1 << BLOCK_SIZE)
* entries or 2k dwords (whatever is smaller)
*/
- ncmds = (nptes >> min(amdgpu_vm_block_size, 11)) + 1;
+ ncmds = (nptes >> min(adev->vm_manager.block_size, 11u)) + 1;
/* padding, etc. */
ndw = 64;
@@ -1301,7 +1319,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
- uint64_t pfn, src = 0, start = mapping->it.start;
+ uint64_t pfn, src = 0, start = mapping->start;
int r;
/* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
@@ -1353,7 +1371,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
addr += pfn << PAGE_SHIFT;
- last = min((uint64_t)mapping->it.last, start + max_entries - 1);
+ last = min((uint64_t)mapping->last, start + max_entries - 1);
r = amdgpu_vm_bo_update_mapping(adev, exclusive,
src, pages_addr, vm,
start, last, flags, addr,
@@ -1368,7 +1386,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
start = last + 1;
- } while (unlikely(start != mapping->it.last + 1));
+ } while (unlikely(start != mapping->last + 1));
return 0;
}
@@ -1518,7 +1536,7 @@ static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
if (fence)
dma_fence_wait(fence, false);
- amdgpu_vm_prt_put(cb->adev);
+ amdgpu_vm_prt_put(adev);
} else {
cb->adev = adev;
if (!fence || dma_fence_add_callback(fence, &cb->cb,
@@ -1724,9 +1742,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
uint64_t saddr, uint64_t offset,
uint64_t size, uint64_t flags)
{
- struct amdgpu_bo_va_mapping *mapping;
+ struct amdgpu_bo_va_mapping *mapping, *tmp;
struct amdgpu_vm *vm = bo_va->vm;
- struct interval_tree_node *it;
uint64_t eaddr;
/* validate the parameters */
@@ -1743,14 +1760,12 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- if (it) {
- struct amdgpu_bo_va_mapping *tmp;
- tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ if (tmp) {
/* bo and tmp overlap, invalid addr */
dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
- "0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
- tmp->it.start, tmp->it.last + 1);
+ "0x%010Lx-0x%010Lx\n", bo_va->bo, saddr, eaddr,
+ tmp->start, tmp->last + 1);
return -EINVAL;
}
@@ -1759,13 +1774,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -ENOMEM;
INIT_LIST_HEAD(&mapping->list);
- mapping->it.start = saddr;
- mapping->it.last = eaddr;
+ mapping->start = saddr;
+ mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
- interval_tree_insert(&mapping->it, &vm->va);
+ amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
@@ -1823,13 +1838,13 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
- mapping->it.start = saddr;
- mapping->it.last = eaddr;
+ mapping->start = saddr;
+ mapping->last = eaddr;
mapping->offset = offset;
mapping->flags = flags;
list_add(&mapping->list, &bo_va->invalids);
- interval_tree_insert(&mapping->it, &vm->va);
+ amdgpu_vm_it_insert(mapping, &vm->va);
if (flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
@@ -1860,7 +1875,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
saddr /= AMDGPU_GPU_PAGE_SIZE;
list_for_each_entry(mapping, &bo_va->valids, list) {
- if (mapping->it.start == saddr)
+ if (mapping->start == saddr)
break;
}
@@ -1868,7 +1883,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
valid = false;
list_for_each_entry(mapping, &bo_va->invalids, list) {
- if (mapping->it.start == saddr)
+ if (mapping->start == saddr)
break;
}
@@ -1877,7 +1892,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
}
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
if (valid)
@@ -1905,7 +1920,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size)
{
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
- struct interval_tree_node *it;
LIST_HEAD(removed);
uint64_t eaddr;
@@ -1927,43 +1941,42 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
INIT_LIST_HEAD(&after->list);
/* Now gather all removed mappings */
- it = interval_tree_iter_first(&vm->va, saddr, eaddr);
- while (it) {
- tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
- it = interval_tree_iter_next(it, saddr, eaddr);
-
+ tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
+ while (tmp) {
/* Remember mapping split at the start */
- if (tmp->it.start < saddr) {
- before->it.start = tmp->it.start;
- before->it.last = saddr - 1;
+ if (tmp->start < saddr) {
+ before->start = tmp->start;
+ before->last = saddr - 1;
before->offset = tmp->offset;
before->flags = tmp->flags;
list_add(&before->list, &tmp->list);
}
/* Remember mapping split at the end */
- if (tmp->it.last > eaddr) {
- after->it.start = eaddr + 1;
- after->it.last = tmp->it.last;
+ if (tmp->last > eaddr) {
+ after->start = eaddr + 1;
+ after->last = tmp->last;
after->offset = tmp->offset;
- after->offset += after->it.start - tmp->it.start;
+ after->offset += after->start - tmp->start;
after->flags = tmp->flags;
list_add(&after->list, &tmp->list);
}
list_del(&tmp->list);
list_add(&tmp->list, &removed);
+
+ tmp = amdgpu_vm_it_iter_next(tmp, saddr, eaddr);
}
/* And free them up */
list_for_each_entry_safe(tmp, next, &removed, list) {
- interval_tree_remove(&tmp->it, &vm->va);
+ amdgpu_vm_it_remove(tmp, &vm->va);
list_del(&tmp->list);
- if (tmp->it.start < saddr)
- tmp->it.start = saddr;
- if (tmp->it.last > eaddr)
- tmp->it.last = eaddr;
+ if (tmp->start < saddr)
+ tmp->start = saddr;
+ if (tmp->last > eaddr)
+ tmp->last = eaddr;
list_add(&tmp->list, &vm->freed);
trace_amdgpu_vm_bo_unmap(NULL, tmp);
@@ -1971,7 +1984,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping before the range */
if (!list_empty(&before->list)) {
- interval_tree_insert(&before->it, &vm->va);
+ amdgpu_vm_it_insert(before, &vm->va);
if (before->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
@@ -1980,7 +1993,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
/* Insert partial mapping after the range */
if (!list_empty(&after->list)) {
- interval_tree_insert(&after->it, &vm->va);
+ amdgpu_vm_it_insert(after, &vm->va);
if (after->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
} else {
@@ -2014,13 +2027,13 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping);
list_add(&mapping->list, &vm->freed);
}
list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
amdgpu_vm_free_mapping(adev, vm, mapping,
bo_va->last_pt_update);
}
@@ -2051,6 +2064,44 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
}
}
+static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
+{
+ /* Total bits covered by PD + PTs */
+ unsigned bits = ilog2(vm_size) + 18;
+
+ /* Make sure the PD is 4K in size up to 8GB address space.
+ Above that split equal between PD and PTs */
+ if (vm_size <= 8)
+ return (bits - 9);
+ else
+ return ((bits + 3) / 2);
+}
+
+/**
+ * amdgpu_vm_adjust_size - adjust vm size and block size
+ *
+ * @adev: amdgpu_device pointer
+ * @vm_size: the default vm size if it's set auto
+ */
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size)
+{
+ /* adjust vm size firstly */
+ if (amdgpu_vm_size == -1)
+ adev->vm_manager.vm_size = vm_size;
+ else
+ adev->vm_manager.vm_size = amdgpu_vm_size;
+
+ /* block size depends on vm size */
+ if (amdgpu_vm_block_size == -1)
+ adev->vm_manager.block_size =
+ amdgpu_vm_get_block_size(adev->vm_manager.vm_size);
+ else
+ adev->vm_manager.block_size = amdgpu_vm_block_size;
+
+ DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ adev->vm_manager.vm_size, adev->vm_manager.block_size);
+}
+
/**
* amdgpu_vm_init - initialize a vm instance
*
@@ -2062,7 +2113,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
- AMDGPU_VM_PTE_COUNT * 8);
+ AMDGPU_VM_PTE_COUNT(adev) * 8);
unsigned ring_instance;
struct amdgpu_ring *ring;
struct amd_sched_rq *rq;
@@ -2162,9 +2213,9 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (!RB_EMPTY_ROOT(&vm->va)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
- rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, it.rb) {
+ rbtree_postorder_for_each_entry_safe(mapping, tmp, &vm->va, rb) {
list_del(&mapping->list);
- interval_tree_remove(&mapping->it, &vm->va);
+ amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
@@ -2227,7 +2278,6 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_NUM_VM; ++i) {
struct amdgpu_vm_id *id = &adev->vm_manager.ids[i];
- dma_fence_put(adev->vm_manager.ids[i].first);
amdgpu_sync_free(&adev->vm_manager.ids[i].active);
dma_fence_put(id->flushed_updates);
dma_fence_put(id->last_flush);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index fbe17bf73a00..d9e57290dc71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -45,7 +45,7 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
/* number of entries in page table */
-#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
+#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
/* PTBs (Page Table Blocks) need to be aligned to 32K */
#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
@@ -76,6 +76,14 @@ struct amdgpu_bo_list_entry;
#define AMDGPU_VM_FAULT_STOP_FIRST 1
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
+/* max number of VMHUB */
+#define AMDGPU_MAX_VMHUBS 2
+#define AMDGPU_GFXHUB 0
+#define AMDGPU_MMHUB 1
+
+/* hardcode that limit for now */
+#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+
struct amdgpu_vm_pt {
struct amdgpu_bo *bo;
uint64_t addr;
@@ -123,7 +131,6 @@ struct amdgpu_vm {
struct amdgpu_vm_id {
struct list_head list;
- struct dma_fence *first;
struct amdgpu_sync active;
struct dma_fence *last_flush;
atomic64_t owner;
@@ -155,6 +162,8 @@ struct amdgpu_vm_manager {
uint64_t max_pfn;
uint32_t num_level;
+ uint64_t vm_size;
+ uint32_t block_size;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
@@ -225,5 +234,6 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint64_t vm_size);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9e577e3d3147..a4831fe0223b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -93,7 +93,6 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
const struct ttm_place *place,
struct ttm_mem_reg *mem)
{
- struct amdgpu_bo *bo = container_of(tbo, struct amdgpu_bo, tbo);
struct amdgpu_vram_mgr *mgr = man->priv;
struct drm_mm *mm = &mgr->mm;
struct drm_mm_node *nodes;
@@ -106,8 +105,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (!lpfn)
lpfn = man->size;
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS ||
- place->lpfn || amdgpu_vram_page_split == -1) {
+ if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
+ amdgpu_vram_page_split == -1) {
pages_per_node = ~0ul;
num_nodes = 1;
} else {
@@ -124,12 +123,14 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (place->flags & TTM_PL_FLAG_TOPDOWN)
mode = DRM_MM_INSERT_HIGH;
+ mem->start = 0;
pages_left = mem->num_pages;
spin_lock(&mgr->lock);
for (i = 0; i < num_nodes; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
uint32_t alignment = mem->page_alignment;
+ unsigned long start;
if (pages == pages_per_node)
alignment = pages_per_node;
@@ -141,11 +142,19 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
if (unlikely(r))
goto error;
+ /* Calculate a virtual BO start address to easily check if
+ * everything is CPU accessible.
+ */
+ start = nodes[i].start + nodes[i].size;
+ if (start > mem->num_pages)
+ start -= mem->num_pages;
+ else
+ start = 0;
+ mem->start = max(mem->start, start);
pages_left -= pages;
}
spin_unlock(&mgr->lock);
- mem->start = num_nodes == 1 ? nodes[0].start : AMDGPU_BO_INVALID_OFFSET;
mem->mm_node = nodes;
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index daf003dd2351..ba98d35340a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1090,23 +1090,10 @@ static u32 dce_v10_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1214,14 +1201,14 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1236,7 +1223,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1275,7 +1262,7 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 3a7296724457..e59bc42df18c 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1059,23 +1059,10 @@ static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1183,14 +1170,14 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1205,7 +1192,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1244,7 +1231,7 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index 8ccada5d6f39..307269bda4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -861,23 +861,10 @@ static u32 dce_v6_0_latency_watermark(struct dce6_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -986,7 +973,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce6_wm_params wm_low, wm_high;
u32 dram_channels;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
@@ -996,8 +983,8 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
@@ -1016,7 +1003,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1043,7 +1030,7 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 6943f2641c90..6df7a28e8aac 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -974,23 +974,10 @@ static u32 dce_v8_0_latency_watermark(struct dce8_wm_params *wm)
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1098,14 +1085,14 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1120,7 +1107,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1159,7 +1146,7 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e0fa0d30e162..dad8a4cd1b37 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4565,6 +4565,7 @@ static void gfx_v8_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
WREG32(mmCP_MEC_CNTL, (CP_MEC_CNTL__MEC_ME1_HALT_MASK | CP_MEC_CNTL__MEC_ME2_HALT_MASK));
for (i = 0; i < adev->gfx.num_compute_rings; i++)
adev->gfx.compute_ring[i].ready = false;
+ adev->gfx.kiq.ring.ready = false;
}
udelay(50);
}
@@ -4721,14 +4722,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_control = tmp;
/* enable doorbell? */
- tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
-
- if (ring->use_doorbell)
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 1);
- else
- tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
- DOORBELL_EN, 0);
+ tmp = REG_SET_FIELD(RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL),
+ CP_HQD_PQ_DOORBELL_CONTROL,
+ DOORBELL_EN,
+ ring->use_doorbell ? 1 : 0);
mqd->cp_hqd_pq_doorbell_control = tmp;
@@ -4816,13 +4813,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct vi_mqd *mqd = ring->mqd_ptr;
- uint32_t tmp;
int j;
/* disable wptr polling */
- tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL);
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp);
+ WREG32_FIELD(CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo);
WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi);
@@ -4834,10 +4828,10 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control);
/* disable the queue if it's active */
- if (RREG32(mmCP_HQD_ACTIVE) & 1) {
+ if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1);
for (j = 0; j < adev->usec_timeout; j++) {
- if (!(RREG32(mmCP_HQD_ACTIVE) & 1))
+ if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
@@ -4894,11 +4888,8 @@ static int gfx_v8_0_kiq_init_register(struct amdgpu_ring *ring)
/* activate the queue */
WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active);
- if (ring->use_doorbell) {
- tmp = RREG32(mmCP_PQ_STATUS);
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(mmCP_PQ_STATUS, tmp);
- }
+ if (ring->use_doorbell)
+ WREG32_FIELD(CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
@@ -5471,19 +5462,18 @@ static void gfx_v8_0_inactive_hqd(struct amdgpu_device *adev,
{
int i;
+ mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
if (RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK) {
- u32 tmp;
- tmp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
- tmp = REG_SET_FIELD(tmp, CP_HQD_DEQUEUE_REQUEST,
- DEQUEUE_REQ, 2);
- WREG32(mmCP_HQD_DEQUEUE_REQUEST, tmp);
+ WREG32_FIELD(CP_HQD_DEQUEUE_REQUEST, DEQUEUE_REQ, 2);
for (i = 0; i < adev->usec_timeout; i++) {
if (!(RREG32(mmCP_HQD_ACTIVE) & CP_HQD_ACTIVE__ACTIVE_MASK))
break;
udelay(1);
}
}
+ vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_pre_soft_reset(void *handle)
@@ -5589,11 +5579,13 @@ static int gfx_v8_0_soft_reset(void *handle)
static void gfx_v8_0_init_hqd(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
+ mutex_lock(&adev->srbm_mutex);
vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
WREG32(mmCP_HQD_DEQUEUE_REQUEST, 0);
WREG32(mmCP_HQD_PQ_RPTR, 0);
WREG32(mmCP_HQD_PQ_WPTR, 0);
vi_srbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
}
static int gfx_v8_0_post_soft_reset(void *handle)
@@ -6986,40 +6978,24 @@ static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
unsigned int type,
enum amdgpu_interrupt_state state)
{
- uint32_t tmp, target;
struct amdgpu_ring *ring = &(adev->gfx.kiq.ring);
BUG_ON(ring->funcs->type != AMDGPU_RING_TYPE_KIQ);
- if (ring->me == 1)
- target = mmCP_ME1_PIPE0_INT_CNTL;
- else
- target = mmCP_ME2_PIPE0_INT_CNTL;
- target += ring->pipe;
-
switch (type) {
case AMDGPU_CP_KIQ_IRQ_DRIVER0:
- if (state == AMDGPU_IRQ_STATE_DISABLE) {
- tmp = RREG32(mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 0);
- WREG32(target, tmp);
- } else {
- tmp = RREG32(mmCPC_INT_CNTL);
- tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(mmCPC_INT_CNTL, tmp);
-
- tmp = RREG32(target);
- tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
- GENERIC2_INT_ENABLE, 1);
- WREG32(target, tmp);
- }
+ WREG32_FIELD(CPC_INT_CNTL, GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+ if (ring->me == 1)
+ WREG32_FIELD_OFFSET(CP_ME1_PIPE0_INT_CNTL,
+ ring->pipe,
+ GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
+ else
+ WREG32_FIELD_OFFSET(CP_ME2_PIPE0_INT_CNTL,
+ ring->pipe,
+ GENERIC2_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_DISABLE ? 0 : 1);
break;
default:
BUG(); /* kiq only support GENERIC2_INT now */
@@ -7159,8 +7135,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = {
.emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */
.emit_ib = gfx_v8_0_ring_emit_ib_compute,
.emit_fence = gfx_v8_0_ring_emit_fence_kiq,
- .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 669bb98fc45d..a447b70841c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1288,9 +1288,7 @@ static void gfx_v9_0_gpu_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL));
- tmp = REG_SET_FIELD(tmp, GRBM_CNTL, READ_TIMEOUT, 0xff);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v9_0_tiling_mode_table_init(adev);
@@ -1395,13 +1393,9 @@ void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
static void gfx_v9_0_rlc_reset(struct amdgpu_device *adev)
{
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET));
-
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
udelay(50);
- tmp = REG_SET_FIELD(tmp, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_SOFT_RESET), tmp);
+ WREG32_FIELD15(GC, 0, GRBM_SOFT_RESET, SOFT_RESET_RLC, 0);
udelay(50);
}
@@ -1410,10 +1404,8 @@ static void gfx_v9_0_rlc_start(struct amdgpu_device *adev)
#ifdef AMDGPU_RLC_DEBUG_RETRY
u32 rlc_ucode_ver;
#endif
- u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL));
- tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 1);
/* carrizo do enable cp interrupt after cp inited */
if (!(adev->flags & AMD_IS_APU))
@@ -1497,14 +1489,10 @@ static void gfx_v9_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
int i;
u32 tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL));
- if (enable) {
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 0);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 0);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 0);
- } else {
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, 1);
- tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, ME_HALT, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
+ tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
+ if (!enable) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].ready = false;
}
@@ -2020,13 +2008,10 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
struct v9_mqd *mqd = ring->mqd_ptr;
- uint32_t tmp;
int j;
/* disable wptr polling */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_BASE_ADDR),
mqd->cp_hqd_eop_base_addr_lo);
@@ -2118,11 +2103,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE),
mqd->cp_hqd_active);
- if (ring->use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
- }
+ if (ring->use_doorbell)
+ WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
@@ -2366,177 +2348,6 @@ static int gfx_v9_0_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static void gfx_v9_0_print_status(void *handle)
-{
- int i;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- dev_info(adev->dev, "GFX 9.x registers\n");
- dev_info(adev->dev, " GRBM_STATUS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS)));
- dev_info(adev->dev, " GRBM_STATUS2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS2)));
- dev_info(adev->dev, " GRBM_STATUS_SE0=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE0)));
- dev_info(adev->dev, " GRBM_STATUS_SE1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE1)));
- dev_info(adev->dev, " GRBM_STATUS_SE2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE2)));
- dev_info(adev->dev, " GRBM_STATUS_SE3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_STATUS_SE3)));
- dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STAT)));
- dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT1)));
- dev_info(adev->dev, " CP_STALLED_STAT2 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT2)));
- dev_info(adev->dev, " CP_STALLED_STAT3 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_STALLED_STAT3)));
- dev_info(adev->dev, " CP_CPF_BUSY_STAT = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_BUSY_STAT)));
- dev_info(adev->dev, " CP_CPF_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STALLED_STAT1)));
- dev_info(adev->dev, " CP_CPF_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPF_STATUS)));
- dev_info(adev->dev, " CP_CPC_BUSY_STAT = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_BUSY_STAT)));
- dev_info(adev->dev, " CP_CPC_STALLED_STAT1 = 0x%08x\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STALLED_STAT1)));
- dev_info(adev->dev, " CP_CPC_STATUS = 0x%08x\n", RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_CPC_STATUS)));
-
- for (i = 0; i < 32; i++) {
- dev_info(adev->dev, " GB_TILE_MODE%d=0x%08X\n",
- i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_TILE_MODE0 ) + i*4));
- }
- for (i = 0; i < 16; i++) {
- dev_info(adev->dev, " GB_MACROTILE_MODE%d=0x%08X\n",
- i, RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_MACROTILE_MODE0) + i*4));
- }
- for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
- dev_info(adev->dev, " se: %d\n", i);
- gfx_v9_0_select_se_sh(adev, i, 0xffffffff, 0xffffffff);
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG)));
- dev_info(adev->dev, " PA_SC_RASTER_CONFIG_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_RASTER_CONFIG_1)));
- }
- gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
-
- dev_info(adev->dev, " GB_ADDR_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)));
-
- dev_info(adev->dev, " CP_MEQ_THRESHOLDS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEQ_THRESHOLDS)));
- dev_info(adev->dev, " SX_DEBUG_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSX_DEBUG_1)));
- dev_info(adev->dev, " TA_CNTL_AUX=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmTA_CNTL_AUX)));
- dev_info(adev->dev, " SPI_CONFIG_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL)));
- dev_info(adev->dev, " SQ_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CONFIG)));
- dev_info(adev->dev, " DB_DEBUG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG)));
- dev_info(adev->dev, " DB_DEBUG2=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)));
- dev_info(adev->dev, " DB_DEBUG3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG3)));
- dev_info(adev->dev, " CB_HW_CONTROL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCB_HW_CONTROL)));
- dev_info(adev->dev, " SPI_CONFIG_CNTL_1=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CONFIG_CNTL_1)));
- dev_info(adev->dev, " PA_SC_FIFO_SIZE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FIFO_SIZE)));
- dev_info(adev->dev, " VGT_NUM_INSTANCES=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_NUM_INSTANCES)));
- dev_info(adev->dev, " CP_PERFMON_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PERFMON_CNTL)));
- dev_info(adev->dev, " PA_SC_FORCE_EOV_MAX_CNTS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_FORCE_EOV_MAX_CNTS)));
- dev_info(adev->dev, " VGT_CACHE_INVALIDATION=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_CACHE_INVALIDATION)));
- dev_info(adev->dev, " VGT_GS_VERTEX_REUSE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmVGT_GS_VERTEX_REUSE)));
- dev_info(adev->dev, " PA_SC_LINE_STIPPLE_STATE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_LINE_STIPPLE_STATE)));
- dev_info(adev->dev, " PA_CL_ENHANCE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_CL_ENHANCE)));
- dev_info(adev->dev, " PA_SC_ENHANCE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE)));
-
- dev_info(adev->dev, " CP_ME_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)));
- dev_info(adev->dev, " CP_MAX_CONTEXT=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MAX_CONTEXT)));
- dev_info(adev->dev, " CP_DEVICE_ID=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_DEVICE_ID)));
-
- dev_info(adev->dev, " CP_SEM_WAIT_TIMER=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_SEM_WAIT_TIMER)));
-
- dev_info(adev->dev, " CP_RB_WPTR_DELAY=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_WPTR_DELAY)));
- dev_info(adev->dev, " CP_RB_VMID=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB_VMID)));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
- dev_info(adev->dev, " CP_RB0_WPTR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_WPTR)));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR)));
- dev_info(adev->dev, " CP_RB0_RPTR_ADDR_HI=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_RPTR_ADDR_HI)));
- dev_info(adev->dev, " CP_RB0_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_CNTL)));
- dev_info(adev->dev, " CP_RB0_BASE=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE)));
- dev_info(adev->dev, " CP_RB0_BASE_HI=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_RB0_BASE_HI)));
- dev_info(adev->dev, " CP_MEC_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_MEC_CNTL)));
-
- dev_info(adev->dev, " SCRATCH_ADDR=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_ADDR)));
- dev_info(adev->dev, " SCRATCH_UMSK=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSCRATCH_UMSK)));
-
- dev_info(adev->dev, " CP_INT_CNTL_RING0=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0)));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
- dev_info(adev->dev, " RLC_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CNTL)));
- dev_info(adev->dev, " RLC_CGCG_CGLS_CTRL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)));
- dev_info(adev->dev, " RLC_LB_CNTR_INIT=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_INIT)));
- dev_info(adev->dev, " RLC_LB_CNTR_MAX=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTR_MAX)));
- dev_info(adev->dev, " RLC_LB_INIT_CU_MASK=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_INIT_CU_MASK)));
- dev_info(adev->dev, " RLC_LB_PARAMS=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_PARAMS)));
- dev_info(adev->dev, " RLC_LB_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_LB_CNTL)));
- dev_info(adev->dev, " RLC_UCODE_CNTL=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_UCODE_CNTL)));
-
- dev_info(adev->dev, " RLC_GPM_GENERAL_6=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_6)));
- dev_info(adev->dev, " RLC_GPM_GENERAL_12=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_GENERAL_12)));
- dev_info(adev->dev, " RLC_GPM_TIMER_INT_3=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_GPM_TIMER_INT_3)));
- mutex_lock(&adev->srbm_mutex);
- for (i = 0; i < 16; i++) {
- soc15_grbm_select(adev, 0, 0, 0, i);
- dev_info(adev->dev, " VM %d:\n", i);
- dev_info(adev->dev, " SH_MEM_CONFIG=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)));
- dev_info(adev->dev, " SH_MEM_BASES=0x%08X\n",
- RREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES)));
- }
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
static int gfx_v9_0_soft_reset(void *handle)
{
u32 grbm_soft_reset = 0;
@@ -2569,8 +2380,7 @@ static int gfx_v9_0_soft_reset(void *handle)
GRBM_SOFT_RESET, SOFT_RESET_RLC, 1);
- if (grbm_soft_reset ) {
- gfx_v9_0_print_status((void *)adev);
+ if (grbm_soft_reset) {
/* stop the rlc */
gfx_v9_0_rlc_stop(adev);
@@ -2596,7 +2406,6 @@ static int gfx_v9_0_soft_reset(void *handle)
/* Wait a little for things to settle down */
udelay(50);
- gfx_v9_0_print_status((void *)adev);
}
return 0;
}
@@ -3148,6 +2957,7 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->funcs->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -3157,7 +2967,6 @@ static void gfx_v9_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
gfx_v9_0_write_data_to_reg(ring, usepfp, true,
hub->ctx0_ptb_addr_lo32
@@ -3376,21 +3185,12 @@ static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
static void gfx_v9_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl =
- REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- TIME_STAMP_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ TIME_STAMP_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
@@ -3446,20 +3246,12 @@ static int gfx_v9_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ PRIV_REG_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
break;
default:
break;
@@ -3473,21 +3265,12 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- u32 cp_int_cntl;
-
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
case AMDGPU_IRQ_STATE_ENABLE:
- cp_int_cntl = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0));
- cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_INSTR_INT_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_INT_CNTL_RING0), cp_int_cntl);
- break;
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ PRIV_INSTR_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
default:
break;
}
@@ -3759,8 +3542,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
.emit_ib_size = 4, /* gfx_v9_0_ring_emit_ib_compute */
.emit_ib = gfx_v9_0_ring_emit_ib_compute,
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,
- .emit_hdp_flush = gfx_v9_0_ring_emit_hdp_flush,
- .emit_hdp_invalidate = gfx_v9_0_ring_emit_hdp_invalidate,
.test_ring = gfx_v9_0_ring_test_ring,
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
@@ -3975,9 +3756,7 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
ring->pipe,
ring->queue, 0);
/* disable wptr polling */
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL));
- tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL), tmp);
+ WREG32_FIELD15(GC, 0, CP_PQ_WPTR_POLL_CNTL, EN, 0);
/* write the EOP addr */
BUG_ON(ring->me != 1 || ring->pipe != 0); /* can't handle other cases eop address */
@@ -4121,11 +3900,8 @@ static int gfx_v9_0_init_queue(struct amdgpu_ring *ring)
amdgpu_bo_kunmap(ring->mqd_obj);
amdgpu_bo_unreserve(ring->mqd_obj);
- if (use_doorbell) {
- tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS));
- tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
- WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_STATUS), tmp);
- }
+ if (use_doorbell)
+ WREG32_FIELD15(GC, 0, CP_PQ_STATUS, DOORBELL_ENABLE, 1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index 30ef3126c8a9..005075ff00f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -222,7 +222,7 @@ int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@@ -299,36 +299,6 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
WREG32(SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
-static uint32_t gfxhub_v1_0_get_invalidate_req(unsigned int vm_id)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vm_id*/
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
-}
-
-static uint32_t gfxhub_v1_0_get_vm_protection_bits(void)
-{
- return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
-}
-
static int gfxhub_v1_0_early_init(void *handle)
{
return 0;
@@ -361,9 +331,6 @@ static int gfxhub_v1_0_sw_init(void *handle)
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
- hub->get_invalidate_req = gfxhub_v1_0_get_invalidate_req;
- hub->get_vm_protection_bits = gfxhub_v1_0_get_vm_protection_bits;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index d9586601a437..631aef38126d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -543,7 +543,8 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
- ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+ ((adev->vm_manager.block_size - 9)
+ << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v6_0_set_fault_enable_default(adev, false);
else
@@ -848,7 +849,8 @@ static int gmc_v6_0_sw_init(void *handle)
if (r)
return r;
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
adev->mc.mc_mask = 0xffffffffffULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0c0a6015cca5..92abe12d92bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -37,6 +37,8 @@
#include "oss/oss_2_0_d.h"
#include "oss/oss_2_0_sh_mask.h"
+#include "amdgpu_atombios.h"
+
static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
static int gmc_v7_0_wait_for_idle(void *handle);
@@ -325,48 +327,51 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
- int chansize, numchan;
-
- /* Get VRAM informations */
- tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
- chansize = 64;
- } else {
- chansize = 32;
- }
- tmp = RREG32(mmMC_SHARED_CHMAP);
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 4;
- break;
- case 3:
- numchan = 8;
- break;
- case 4:
- numchan = 3;
- break;
- case 5:
- numchan = 6;
- break;
- case 6:
- numchan = 10;
- break;
- case 7:
- numchan = 12;
- break;
- case 8:
- numchan = 16;
- break;
+ adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@@ -639,7 +644,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v7_0_set_fault_enable_default(adev, false);
@@ -998,7 +1003,8 @@ static int gmc_v7_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index d19d1c5e2847..f2ccefc66fd4 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -38,6 +38,8 @@
#include "vid.h"
#include "vi.h"
+#include "amdgpu_atombios.h"
+
static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -487,48 +489,51 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
*/
static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
{
- u32 tmp;
- int chansize, numchan;
-
- /* Get VRAM informations */
- tmp = RREG32(mmMC_ARB_RAMCFG);
- if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
- chansize = 64;
- } else {
- chansize = 32;
- }
- tmp = RREG32(mmMC_SHARED_CHMAP);
- switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
- case 0:
- default:
- numchan = 1;
- break;
- case 1:
- numchan = 2;
- break;
- case 2:
- numchan = 4;
- break;
- case 3:
- numchan = 8;
- break;
- case 4:
- numchan = 3;
- break;
- case 5:
- numchan = 6;
- break;
- case 6:
- numchan = 10;
- break;
- case 7:
- numchan = 12;
- break;
- case 8:
- numchan = 16;
- break;
+ adev->mc.vram_width = amdgpu_atombios_get_vram_width(adev);
+ if (!adev->mc.vram_width) {
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ tmp = RREG32(mmMC_ARB_RAMCFG);
+ if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(mmMC_SHARED_CHMAP);
+ switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ case 4:
+ numchan = 3;
+ break;
+ case 5:
+ numchan = 6;
+ break;
+ case 6:
+ numchan = 10;
+ break;
+ case 7:
+ numchan = 12;
+ break;
+ case 8:
+ numchan = 16;
+ break;
+ }
+ adev->mc.vram_width = numchan * chansize;
}
- adev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
adev->mc.aper_base = pci_resource_start(adev->pdev, 0);
adev->mc.aper_size = pci_resource_len(adev->pdev, 0);
@@ -848,7 +853,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(mmVM_CONTEXT1_CNTL, tmp);
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
gmc_v8_0_set_fault_enable_default(adev, false);
@@ -1082,7 +1087,8 @@ static int gmc_v8_0_sw_init(void *handle)
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
- adev->vm_manager.max_pfn = amdgpu_vm_size << 18;
+ amdgpu_vm_adjust_size(adev, 64);
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index df69aae99df4..3b045e0b114e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -75,11 +75,18 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_vmhub *hub;
u32 tmp, reg, bits, i;
+ bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
+ VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
+
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -89,7 +96,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -100,7 +106,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
case AMDGPU_IRQ_STATE_ENABLE:
/* MM HUB */
hub = &adev->vmhub[AMDGPU_MMHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i< 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -110,7 +115,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
/* GFX HUB */
hub = &adev->vmhub[AMDGPU_GFXHUB];
- bits = hub->get_vm_protection_bits();
for (i = 0; i < 16; i++) {
reg = hub->vm_context0_cntl + i;
tmp = RREG32(reg);
@@ -129,8 +133,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct amdgpu_vmhub *gfxhub = &adev->vmhub[AMDGPU_GFXHUB];
- struct amdgpu_vmhub *mmhub = &adev->vmhub[AMDGPU_MMHUB];
+ struct amdgpu_vmhub *hub = &adev->vmhub[entry->vm_id_src];
uint32_t status = 0;
u64 addr;
@@ -138,13 +141,8 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
if (!amdgpu_sriov_vf(adev)) {
- if (entry->vm_id_src) {
- status = RREG32(mmhub->vm_l2_pro_fault_status);
- WREG32_P(mmhub->vm_l2_pro_fault_cntl, 1, ~1);
- } else {
- status = RREG32(gfxhub->vm_l2_pro_fault_status);
- WREG32_P(gfxhub->vm_l2_pro_fault_cntl, 1, ~1);
- }
+ status = RREG32(hub->vm_l2_pro_fault_status);
+ WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
}
if (printk_ratelimit()) {
@@ -175,6 +173,25 @@ static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->mc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
}
+static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vm_id)
+{
+ u32 req = 0;
+
+ /* invalidate using legacy mode on vm_id*/
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ PER_VMID_INVALIDATE_REQ, 1 << vm_id);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
+ req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
+ CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
+
+ return req;
+}
+
/*
* GART
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -204,7 +221,7 @@ static void gmc_v9_0_gart_flush_gpu_tlb(struct amdgpu_device *adev,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &adev->vmhub[i];
- u32 tmp = hub->get_invalidate_req(vmid);
+ u32 tmp = gmc_v9_0_get_invalidate_req(vmid);
WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
@@ -337,30 +354,23 @@ static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
return pte_flag;
}
-static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
- .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
- .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
- .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags
-};
-
-static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
-{
- if (adev->gart.gart_funcs == NULL)
- adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
-}
-
static u64 gmc_v9_0_adjust_mc_addr(struct amdgpu_device *adev, u64 mc_addr)
{
return adev->vm_manager.vram_base_offset + mc_addr - adev->mc.vram_start;
}
-static const struct amdgpu_mc_funcs gmc_v9_0_mc_funcs = {
+static const struct amdgpu_gart_funcs gmc_v9_0_gart_funcs = {
+ .flush_gpu_tlb = gmc_v9_0_gart_flush_gpu_tlb,
+ .set_pte_pde = gmc_v9_0_gart_set_pte_pde,
+ .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.adjust_mc_addr = gmc_v9_0_adjust_mc_addr,
+ .get_invalidate_req = gmc_v9_0_get_invalidate_req,
};
-static void gmc_v9_0_set_mc_funcs(struct amdgpu_device *adev)
+static void gmc_v9_0_set_gart_funcs(struct amdgpu_device *adev)
{
- adev->mc.mc_funcs = &gmc_v9_0_mc_funcs;
+ if (adev->gart.gart_funcs == NULL)
+ adev->gart.gart_funcs = &gmc_v9_0_gart_funcs;
}
static int gmc_v9_0_early_init(void *handle)
@@ -368,7 +378,6 @@ static int gmc_v9_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
gmc_v9_0_set_gart_funcs(adev);
- gmc_v9_0_set_mc_funcs(adev);
gmc_v9_0_set_irq_funcs(adev);
return 0;
@@ -511,7 +520,12 @@ static int gmc_v9_0_vm_init(struct amdgpu_device *adev)
* amdkfd will use VMIDs 8-15
*/
adev->vm_manager.num_ids = AMDGPU_NUM_OF_VMIDS;
- adev->vm_manager.num_level = 3;
+
+ /* TODO: fix num_level for APU when updating vm size and block size */
+ if (adev->flags & AMD_IS_APU)
+ adev->vm_manager.num_level = 1;
+ else
+ adev->vm_manager.num_level = 3;
amdgpu_vm_manager_init(adev);
/* base offset of vram pages */
@@ -543,9 +557,20 @@ static int gmc_v9_0_sw_init(void *handle)
if (adev->flags & AMD_IS_APU) {
adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN;
+ amdgpu_vm_adjust_size(adev, 64);
} else {
/* XXX Don't know how to get VRAM type yet. */
adev->mc.vram_type = AMDGPU_VRAM_TYPE_HBM;
+ /*
+ * To fulfill 4-level page support,
+ * vm size is 256TB (48bit), maximum size of Vega10,
+ * block size 512 (9bit)
+ */
+ adev->vm_manager.vm_size = 1U << 18;
+ adev->vm_manager.block_size = 9;
+ DRM_INFO("vm size is %llu GB, block size is %u-bit\n",
+ adev->vm_manager.vm_size,
+ adev->vm_manager.block_size);
}
/* This interrupt is VMC page fault.*/
@@ -557,14 +582,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- /* Because of four level VMPTs, vm size is at least 512GB.
- * The maximum size is 256TB (48bit).
- */
- if (amdgpu_vm_size < 512) {
- DRM_WARN("VM size is at least 512GB!\n");
- amdgpu_vm_size = 512;
- }
- adev->vm_manager.max_pfn = (uint64_t)amdgpu_vm_size << 18;
+ adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
/* Set the internal MC address mask
* This is the max address of the GPU's
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 266a0f47a908..62684510ddcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -242,7 +242,7 @@ int mmhub_v1_0_gart_enable(struct amdgpu_device *adev)
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
PAGE_TABLE_BLOCK_SIZE,
- amdgpu_vm_block_size - 9);
+ adev->vm_manager.block_size - 9);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL) + i, tmp);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32) + i*2, 0);
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32) + i*2, 0);
@@ -317,36 +317,6 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
WREG32(SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL), tmp);
}
-static uint32_t mmhub_v1_0_get_invalidate_req(unsigned int vm_id)
-{
- u32 req = 0;
-
- /* invalidate using legacy mode on vm_id*/
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- PER_VMID_INVALIDATE_REQ, 1 << vm_id);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
- req = REG_SET_FIELD(req, VM_INVALIDATE_ENG0_REQ,
- CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
-
- return req;
-}
-
-static uint32_t mmhub_v1_0_get_vm_protection_bits(void)
-{
- return (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK);
-}
-
static int mmhub_v1_0_early_init(void *handle)
{
return 0;
@@ -379,9 +349,6 @@ static int mmhub_v1_0_sw_init(void *handle)
hub->vm_l2_pro_fault_cntl =
SOC15_REG_OFFSET(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL);
- hub->get_invalidate_req = mmhub_v1_0_get_invalidate_req;
- hub->get_vm_protection_bits = mmhub_v1_0_get_vm_protection_bits;
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index cfd5e54777bb..1493301b6a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -28,6 +28,7 @@
#include "vega10/GC/gc_9_0_offset.h"
#include "vega10/GC/gc_9_0_sh_mask.h"
#include "soc15.h"
+#include "vega10_ih.h"
#include "soc15_common.h"
#include "mxgpu_ai.h"
@@ -133,7 +134,7 @@ static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
return r;
}
-static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
+static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
{
int r = 0, timeout = AI_MAILBOX_TIMEDOUT;
@@ -172,7 +173,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
if (req == IDH_REQ_GPU_INIT_ACCESS ||
req == IDH_REQ_GPU_FINI_ACCESS ||
req == IDH_REQ_GPU_RESET_ACCESS) {
- r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
+ r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
if (r)
return r;
}
@@ -180,6 +181,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return 0;
}
+static int xgpu_ai_request_reset(struct amdgpu_device *adev)
+{
+ return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+}
+
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
bool init)
{
@@ -201,7 +207,134 @@ static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
return r;
}
+static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ DRM_DEBUG("get ack intr and do nothing.\n");
+ return 0;
+}
+
+static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
+{
+ struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
+ struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
+
+ /* wait until RCV_MSG become 3 */
+ if (xgpu_ai_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
+ pr_err("failed to recieve FLR_CMPL\n");
+ return;
+ }
+
+ /* Trigger recovery due to world switch failure */
+ amdgpu_sriov_gpu_reset(adev, false);
+}
+
+static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
+
+ tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
+ (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
+ WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
+
+ return 0;
+}
+
+static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ int r;
+
+ /* see what event we get */
+ r = xgpu_ai_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
+
+ /* only handle FLR_NOTIFY now */
+ if (!r)
+ schedule_work(&adev->virt.flr_work);
+
+ return 0;
+}
+
+static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
+ .set = xgpu_ai_set_mailbox_ack_irq,
+ .process = xgpu_ai_mailbox_ack_irq,
+};
+
+static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
+ .set = xgpu_ai_set_mailbox_rcv_irq,
+ .process = xgpu_ai_mailbox_rcv_irq,
+};
+
+void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
+{
+ adev->virt.ack_irq.num_types = 1;
+ adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
+ adev->virt.rcv_irq.num_types = 1;
+ adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
+}
+
+int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
+ if (r)
+ return r;
+
+ r = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ return 0;
+}
+
+int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
+ if (r)
+ return r;
+ r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+ return r;
+ }
+
+ INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
+
+ return 0;
+}
+
+void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
+{
+ amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
+ amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
+ .reset_gpu = xgpu_ai_request_reset,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index bf8ab8fd4367..9aefc44d2c34 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -24,7 +24,7 @@
#ifndef __MXGPU_AI_H__
#define __MXGPU_AI_H__
-#define AI_MAILBOX_TIMEDOUT 150000
+#define AI_MAILBOX_TIMEDOUT 5000
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
@@ -44,4 +44,9 @@ enum idh_event {
extern const struct amdgpu_virt_ops xgpu_ai_virt_ops;
+void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev);
+int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev);
+int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev);
+void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 5191c45ffdf3..c3588d1c7cb0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -491,7 +491,7 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
ucode_size = ucode->ucode_size;
ucode_mem = (uint32_t *)ucode->kaddr;
- while (!ucode_size) {
+ while (ucode_size) {
fw_sram_reg_val = RREG32(fw_sram_data_reg_offset);
if (*ucode_mem != fw_sram_reg_val)
@@ -508,14 +508,10 @@ bool psp_v3_1_compare_sram_data(struct psp_context *psp,
bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- uint32_t reg, reg_val;
+ uint32_t reg;
- reg_val = (smnMP1_FIRMWARE_FLAGS & 0xffffffff) | 0x03b00000;
- WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg_val);
+ reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000;
+ WREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2), reg);
reg = RREG32(SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2));
- if ((reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
- MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
- return true;
-
- return false;
+ return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 2dd2b20d727e..21f38d882335 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1039,6 +1039,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -1048,7 +1049,6 @@ static void sdma_v4_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index bb14a45997b5..385de8617075 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -106,6 +106,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+ else
+ BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
@@ -125,6 +127,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
if (adev->asic_type == CHIP_VEGA10)
nbio_pcie_id = &nbio_v6_1_pcie_index_data;
+ else
+ BUG();
address = nbio_pcie_id->index_offset;
data = nbio_pcie_id->data_offset;
@@ -493,7 +497,8 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
amdgpu_ip_block_add(adev, &mmhub_v1_0_ip_block);
amdgpu_ip_block_add(adev, &gmc_v9_0_ip_block);
amdgpu_ip_block_add(adev, &vega10_ih_ip_block);
- amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
+ if (amdgpu_fw_load_type == 2 || amdgpu_fw_load_type == -1)
+ amdgpu_ip_block_add(adev, &psp_v3_1_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block);
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
@@ -558,6 +563,7 @@ static int soc15_common_early_init(void *handle)
if (amdgpu_sriov_vf(adev)) {
amdgpu_virt_init_setting(adev);
+ xgpu_ai_mailbox_set_irq_funcs(adev);
}
/*
@@ -610,8 +616,23 @@ static int soc15_common_early_init(void *handle)
return 0;
}
+static int soc15_common_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_get_irq(adev);
+
+ return 0;
+}
+
static int soc15_common_sw_init(void *handle)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_add_irq_id(adev);
+
return 0;
}
@@ -642,6 +663,8 @@ static int soc15_common_hw_fini(void *handle)
/* disable the doorbell aperture */
soc15_enable_doorbell_aperture(adev, false);
+ if (amdgpu_sriov_vf(adev))
+ xgpu_ai_mailbox_put_irq(adev);
return 0;
}
@@ -855,7 +878,7 @@ static int soc15_common_set_powergating_state(void *handle,
const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
- .late_init = NULL,
+ .late_init = soc15_common_late_init,
.sw_init = soc15_common_sw_init,
.sw_fini = soc15_common_sw_fini,
.hw_init = soc15_common_hw_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 9a4129d881aa..8ab0f78794a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -135,12 +135,9 @@ static int uvd_v4_2_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
+
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
bool enable);
/**
@@ -230,11 +227,7 @@ static int uvd_v4_2_suspend(void *handle)
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v4_2_resume(void *handle)
@@ -246,11 +239,7 @@ static int uvd_v4_2_resume(void *handle)
if (r)
return r;
- r = uvd_v4_2_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v4_2_hw_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e448f7d86bc0..bb6d46e168a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -131,11 +131,7 @@ static int uvd_v5_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
@@ -228,11 +224,7 @@ static int uvd_v5_0_suspend(void *handle)
return r;
uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_suspend(adev);
}
static int uvd_v5_0_resume(void *handle)
@@ -244,11 +236,7 @@ static int uvd_v5_0_resume(void *handle)
if (r)
return r;
- r = uvd_v5_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v5_0_hw_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 5679a4249bd9..31db356476f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -134,11 +134,7 @@ static int uvd_v6_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
@@ -230,11 +226,8 @@ static int uvd_v6_0_suspend(void *handle)
return r;
/* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
- }
return r;
}
@@ -250,11 +243,7 @@ static int uvd_v6_0_resume(void *handle)
if (r)
return r;
}
- r = uvd_v6_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v6_0_hw_init(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 13f52e0af9b8..9bcf01469282 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -438,11 +438,7 @@ static int uvd_v7_0_sw_fini(void *handle)
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
- r = amdgpu_uvd_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_uvd_sw_fini(adev);
}
/**
@@ -547,11 +543,8 @@ static int uvd_v7_0_suspend(void *handle)
return r;
/* Skip this for APU for now */
- if (!(adev->flags & AMD_IS_APU)) {
+ if (!(adev->flags & AMD_IS_APU))
r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
- }
return r;
}
@@ -567,11 +560,7 @@ static int uvd_v7_0_resume(void *handle)
if (r)
return r;
}
- r = uvd_v7_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return uvd_v7_0_hw_init(adev);
}
/**
@@ -1045,6 +1034,7 @@ static void uvd_v7_0_vm_reg_wait(struct amdgpu_ring *ring,
static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
uint32_t data0, data1, mask;
unsigned eng = ring->idx;
unsigned i;
@@ -1055,7 +1045,6 @@ static void uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
data0 = (hub->ctx0_ptb_addr_hi32 + vm_id * 2) << 2;
data1 = upper_32_bits(pd_addr);
@@ -1091,6 +1080,7 @@ static void uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring)
static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -1100,7 +1090,6 @@ static void uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, HEVC_ENC_CMD_REG_WRITE);
amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 49a6c45e65be..47f70827195b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -451,11 +451,7 @@ static int vce_v2_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v2_0_hw_init(void *handle)
@@ -495,11 +491,7 @@ static int vce_v2_0_suspend(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v2_0_resume(void *handle)
@@ -511,11 +503,7 @@ static int vce_v2_0_resume(void *handle)
if (r)
return r;
- r = vce_v2_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v2_0_hw_init(adev);
}
static int vce_v2_0_soft_reset(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index db0adac073c6..fb0819359909 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -417,11 +417,7 @@ static int vce_v3_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v3_0_hw_init(void *handle)
@@ -471,11 +467,7 @@ static int vce_v3_0_suspend(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v3_0_resume(void *handle)
@@ -487,11 +479,7 @@ static int vce_v3_0_resume(void *handle)
if (r)
return r;
- r = vce_v3_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v3_0_hw_init(adev);
}
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index becc5f744a98..edde5fe938d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -527,11 +527,7 @@ static int vce_v4_0_sw_fini(void *handle)
if (r)
return r;
- r = amdgpu_vce_sw_fini(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_sw_fini(adev);
}
static int vce_v4_0_hw_init(void *handle)
@@ -584,11 +580,7 @@ static int vce_v4_0_suspend(void *handle)
if (r)
return r;
- r = amdgpu_vce_suspend(adev);
- if (r)
- return r;
-
- return r;
+ return amdgpu_vce_suspend(adev);
}
static int vce_v4_0_resume(void *handle)
@@ -600,11 +592,7 @@ static int vce_v4_0_resume(void *handle)
if (r)
return r;
- r = vce_v4_0_hw_init(adev);
- if (r)
- return r;
-
- return r;
+ return vce_v4_0_hw_init(adev);
}
static void vce_v4_0_mc_resume(struct amdgpu_device *adev)
@@ -985,6 +973,7 @@ static void vce_v4_0_ring_insert_end(struct amdgpu_ring *ring)
static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
unsigned int vm_id, uint64_t pd_addr)
{
+ uint32_t req = ring->adev->gart.gart_funcs->get_invalidate_req(vm_id);
unsigned eng = ring->idx;
unsigned i;
@@ -994,7 +983,6 @@ static void vce_v4_0_emit_vm_flush(struct amdgpu_ring *ring,
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vmhub *hub = &ring->adev->vmhub[i];
- uint32_t req = hub->get_invalidate_req(vm_id);
amdgpu_ring_write(ring, VCE_CMD_REG_WRITE);
amdgpu_ring_write(ring,
diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h
index b3a86e0e96e6..5f2ab9c1609a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vid.h
+++ b/drivers/gpu/drm/amd/amdgpu/vid.h
@@ -362,7 +362,89 @@
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SWITCH_BUFFER 0x8B
#define PACKET3_SET_RESOURCES 0xA0
+/* 1. header
+ * 2. CONTROL
+ * 3. QUEUE_MASK_LO [31:0]
+ * 4. QUEUE_MASK_HI [31:0]
+ * 5. GWS_MASK_LO [31:0]
+ * 6. GWS_MASK_HI [31:0]
+ * 7. OAC_MASK [15:0]
+ * 8. GDS_HEAP_SIZE [16:11] | GDS_HEAP_BASE [5:0]
+ */
+# define PACKET3_SET_RESOURCES_VMID_MASK(x) ((x) << 0)
+# define PACKET3_SET_RESOURCES_UNMAP_LATENTY(x) ((x) << 16)
+# define PACKET3_SET_RESOURCES_QUEUE_TYPE(x) ((x) << 29)
#define PACKET3_MAP_QUEUES 0xA2
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. MQD_ADDR_LO [31:0]
+ * 5. MQD_ADDR_HI [31:0]
+ * 6. WPTR_ADDR_LO [31:0]
+ * 7. WPTR_ADDR_HI [31:0]
+ */
+/* CONTROL */
+# define PACKET3_MAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
+# define PACKET3_MAP_QUEUES_VMID(x) ((x) << 8)
+# define PACKET3_MAP_QUEUES_QUEUE_TYPE(x) ((x) << 21)
+# define PACKET3_MAP_QUEUES_ALLOC_FORMAT(x) ((x) << 24)
+# define PACKET3_MAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
+# define PACKET3_MAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
+/* CONTROL2 */
+# define PACKET3_MAP_QUEUES_CHECK_DISABLE(x) ((x) << 1)
+# define PACKET3_MAP_QUEUES_DOORBELL_OFFSET(x) ((x) << 2)
+# define PACKET3_MAP_QUEUES_QUEUE(x) ((x) << 26)
+# define PACKET3_MAP_QUEUES_PIPE(x) ((x) << 29)
+# define PACKET3_MAP_QUEUES_ME(x) ((x) << 31)
+#define PACKET3_UNMAP_QUEUES 0xA3
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. CONTROL3
+ * 5. CONTROL4
+ * 6. CONTROL5
+ */
+/* CONTROL */
+# define PACKET3_UNMAP_QUEUES_ACTION(x) ((x) << 0)
+ /* 0 - PREEMPT_QUEUES
+ * 1 - RESET_QUEUES
+ * 2 - DISABLE_PROCESS_QUEUES
+ * 3 - PREEMPT_QUEUES_NO_UNMAP
+ */
+# define PACKET3_UNMAP_QUEUES_QUEUE_SEL(x) ((x) << 4)
+# define PACKET3_UNMAP_QUEUES_ENGINE_SEL(x) ((x) << 26)
+# define PACKET3_UNMAP_QUEUES_NUM_QUEUES(x) ((x) << 29)
+/* CONTROL2a */
+# define PACKET3_UNMAP_QUEUES_PASID(x) ((x) << 0)
+/* CONTROL2b */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(x) ((x) << 2)
+/* CONTROL3a */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET1(x) ((x) << 2)
+/* CONTROL3b */
+# define PACKET3_UNMAP_QUEUES_RB_WPTR(x) ((x) << 0)
+/* CONTROL4 */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET2(x) ((x) << 2)
+/* CONTROL5 */
+# define PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET3(x) ((x) << 2)
+#define PACKET3_QUERY_STATUS 0xA4
+/* 1. header
+ * 2. CONTROL
+ * 3. CONTROL2
+ * 4. ADDR_LO [31:0]
+ * 5. ADDR_HI [31:0]
+ * 6. DATA_LO [31:0]
+ * 7. DATA_HI [31:0]
+ */
+/* CONTROL */
+# define PACKET3_QUERY_STATUS_CONTEXT_ID(x) ((x) << 0)
+# define PACKET3_QUERY_STATUS_INTERRUPT_SEL(x) ((x) << 28)
+# define PACKET3_QUERY_STATUS_COMMAND(x) ((x) << 30)
+/* CONTROL2a */
+# define PACKET3_QUERY_STATUS_PASID(x) ((x) << 0)
+/* CONTROL2b */
+# define PACKET3_QUERY_STATUS_DOORBELL_OFFSET(x) ((x) << 2)
+# define PACKET3_QUERY_STATUS_ENG_SEL(x) ((x) << 25)
+
#define VCE_CMD_NO_OP 0x00000000
#define VCE_CMD_END 0x00000001
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index dfd4fe6f0578..9da5b0bb66d8 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -493,8 +493,10 @@ static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
{
enum amd_pm_state_type ps;
- if (input == NULL)
- return -EINVAL;
+ if (input == NULL) {
+ ret = -EINVAL;
+ break;
+ }
ps = *(unsigned long *)input;
data.requested_ui_label = power_state_convert(ps);
@@ -539,15 +541,19 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
switch (state->classification.ui_label) {
case PP_StateUILabel_Battery:
pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
case PP_StateUILabel_Balanced:
pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
case PP_StateUILabel_Performance:
pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
default:
if (state->classification.flags & PP_StateClassificationFlag_Boot)
pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
else
pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
}
mutex_unlock(&pp_handle->pp_lock);
@@ -894,7 +900,7 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
mutex_lock(&pp_handle->pp_lock);
ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_lock(&pp_handle->pp_lock);
+ mutex_unlock(&pp_handle->pp_lock);
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
index 8e53d3a5e725..6a907c93fd9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_pptable.h
@@ -250,6 +250,29 @@ typedef struct _ATOM_Vega10_Fan_Table {
USHORT usFanStartTemperature;
} ATOM_Vega10_Fan_Table;
+typedef struct _ATOM_Vega10_Fan_Table_V2 {
+ UCHAR ucRevId;
+ USHORT usFanOutputSensitivity;
+ USHORT usFanAcousticLimitRpm;
+ USHORT usThrottlingRPM;
+ USHORT usTargetTemperature;
+ USHORT usMinimumPWMLimit;
+ USHORT usTargetGfxClk;
+ USHORT usFanGainEdge;
+ USHORT usFanGainHotspot;
+ USHORT usFanGainLiquid;
+ USHORT usFanGainVrVddc;
+ USHORT usFanGainVrMvdd;
+ USHORT usFanGainPlx;
+ USHORT usFanGainHbm;
+ UCHAR ucEnableZeroRPM;
+ USHORT usFanStopTemperature;
+ USHORT usFanStartTemperature;
+ UCHAR ucFanParameters;
+ UCHAR ucFanMinRPM;
+ UCHAR ucFanMaxRPM;
+} ATOM_Vega10_Fan_Table_V2;
+
typedef struct _ATOM_Vega10_Thermal_Controller {
UCHAR ucRevId;
UCHAR ucType; /* one of ATOM_VEGA10_PP_THERMALCONTROLLER_*/
@@ -305,6 +328,33 @@ typedef struct _ATOM_Vega10_PowerTune_Table {
USHORT usTemperatureLimitTedge;
} ATOM_Vega10_PowerTune_Table;
+typedef struct _ATOM_Vega10_PowerTune_Table_V2
+{
+ UCHAR ucRevId;
+ USHORT usSocketPowerLimit;
+ USHORT usBatteryPowerLimit;
+ USHORT usSmallPowerLimit;
+ USHORT usTdcLimit;
+ USHORT usEdcLimit;
+ USHORT usSoftwareShutdownTemp;
+ USHORT usTemperatureLimitHotSpot;
+ USHORT usTemperatureLimitLiquid1;
+ USHORT usTemperatureLimitLiquid2;
+ USHORT usTemperatureLimitHBM;
+ USHORT usTemperatureLimitVrSoc;
+ USHORT usTemperatureLimitVrMem;
+ USHORT usTemperatureLimitPlx;
+ USHORT usLoadLineResistance;
+ UCHAR ucLiquid1_I2C_address;
+ UCHAR ucLiquid2_I2C_address;
+ UCHAR ucLiquid_I2C_Line;
+ UCHAR ucVr_I2C_address;
+ UCHAR ucVr_I2C_Line;
+ UCHAR ucPlx_I2C_address;
+ UCHAR ucPlx_I2C_Line;
+ USHORT usTemperatureLimitTedge;
+} ATOM_Vega10_PowerTune_Table_V2;
+
typedef struct _ATOM_Vega10_Hard_Limit_Record {
ULONG ulSOCCLKLimit;
ULONG ulGFXCLKLimit;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 518634f995e7..8b55ae01132d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -116,14 +116,16 @@ static int init_thermal_controller(
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
{
const ATOM_Vega10_Thermal_Controller *thermal_controller;
- const ATOM_Vega10_Fan_Table *fan_table;
+ const Vega10_PPTable_Generic_SubTable_Header *header;
+ const ATOM_Vega10_Fan_Table *fan_table_v1;
+ const ATOM_Vega10_Fan_Table_V2 *fan_table_v2;
thermal_controller = (ATOM_Vega10_Thermal_Controller *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usThermalControllerOffset));
PP_ASSERT_WITH_CODE((powerplay_table->usThermalControllerOffset != 0),
- "Thermal controller table not set!", return -1);
+ "Thermal controller table not set!", return -EINVAL);
hwmgr->thermal_controller.ucType = thermal_controller->ucType;
hwmgr->thermal_controller.ucI2cLine = thermal_controller->ucI2cLine;
@@ -142,6 +144,9 @@ static int init_thermal_controller(
hwmgr->thermal_controller.fanInfo.ulMaxRPM =
thermal_controller->ucFanMaxRPM * 100UL;
+ hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
+ = 100000;
+
set_hw_cap(
hwmgr,
ATOM_VEGA10_PP_THERMALCONTROLLER_NONE != hwmgr->thermal_controller.ucType,
@@ -150,54 +155,101 @@ static int init_thermal_controller(
if (!powerplay_table->usFanTableOffset)
return 0;
- fan_table = (const ATOM_Vega10_Fan_Table *)
+ header = (const Vega10_PPTable_Generic_SubTable_Header *)
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usFanTableOffset));
- PP_ASSERT_WITH_CODE((fan_table->ucRevId >= 8),
- "Invalid Input Fan Table!", return -1);
+ if (header->ucRevId == 10) {
+ fan_table_v1 = (ATOM_Vega10_Fan_Table *)header;
- hwmgr->thermal_controller.advanceFanControlParameters.ulCycleDelay
- = 100000;
- phm_cap_set(hwmgr->platform_descriptor.platformCaps,
- PHM_PlatformCaps_MicrocodeFanControl);
-
- hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
- le16_to_cpu(fan_table->usFanOutputSensitivity);
- hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
- le16_to_cpu(fan_table->usFanRPMMax);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
- le16_to_cpu(fan_table->usThrottlingRPM);
- hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
- le32_to_cpu((uint32_t)(fan_table->usFanAcousticLimit));
- hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
- le16_to_cpu(fan_table->usTargetTemperature);
- hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
- le16_to_cpu(fan_table->usMinimumPWMLimit);
- hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
- le32_to_cpu((uint32_t)(fan_table->usTargetGfxClk));
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
- le16_to_cpu(fan_table->usFanGainEdge);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
- le16_to_cpu(fan_table->usFanGainHotspot);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
- le16_to_cpu(fan_table->usFanGainLiquid);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
- le16_to_cpu(fan_table->usFanGainVrVddc);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
- le16_to_cpu(fan_table->usFanGainVrMvdd);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
- le16_to_cpu(fan_table->usFanGainPlx);
- hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
- le16_to_cpu(fan_table->usFanGainHbm);
-
- hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
- fan_table->ucEnableZeroRPM;
- hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
- le16_to_cpu(fan_table->usFanStopTemperature);
- hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
- le16_to_cpu(fan_table->usFanStartTemperature);
+ PP_ASSERT_WITH_CODE((fan_table_v1->ucRevId >= 8),
+ "Invalid Input Fan Table!", return -EINVAL);
+
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table_v1->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ le16_to_cpu(fan_table_v1->usFanRPMMax);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
+ le16_to_cpu(fan_table_v1->usThrottlingRPM);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
+ le16_to_cpu(fan_table_v1->usFanAcousticLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table_v1->usTargetTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table_v1->usMinimumPWMLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
+ le16_to_cpu(fan_table_v1->usTargetGfxClk);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
+ le16_to_cpu(fan_table_v1->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
+ le16_to_cpu(fan_table_v1->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
+ le16_to_cpu(fan_table_v1->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
+ le16_to_cpu(fan_table_v1->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
+ le16_to_cpu(fan_table_v1->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
+ le16_to_cpu(fan_table_v1->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
+ le16_to_cpu(fan_table_v1->usFanGainHbm);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
+ fan_table_v1->ucEnableZeroRPM;
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
+ le16_to_cpu(fan_table_v1->usFanStopTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
+ le16_to_cpu(fan_table_v1->usFanStartTemperature);
+ } else if (header->ucRevId > 10) {
+ fan_table_v2 = (ATOM_Vega10_Fan_Table_V2 *)header;
+
+ hwmgr->thermal_controller.fanInfo.ucTachometerPulsesPerRevolution =
+ fan_table_v2->ucFanParameters & ATOM_VEGA10_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ hwmgr->thermal_controller.fanInfo.ulMinRPM = fan_table_v2->ucFanMinRPM * 100UL;
+ hwmgr->thermal_controller.fanInfo.ulMaxRPM = fan_table_v2->ucFanMaxRPM * 100UL;
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_MicrocodeFanControl);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanOutputSensitivity =
+ le16_to_cpu(fan_table_v2->usFanOutputSensitivity);
+ hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
+ fan_table_v2->ucFanMaxRPM * 100UL;
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit =
+ le16_to_cpu(fan_table_v2->usThrottlingRPM);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulMinFanSCLKAcousticLimit =
+ le16_to_cpu(fan_table_v2->usFanAcousticLimitRpm);
+ hwmgr->thermal_controller.advanceFanControlParameters.usTMax =
+ le16_to_cpu(fan_table_v2->usTargetTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usPWMMin =
+ le16_to_cpu(fan_table_v2->usMinimumPWMLimit);
+ hwmgr->thermal_controller.advanceFanControlParameters.ulTargetGfxClk =
+ le16_to_cpu(fan_table_v2->usTargetGfxClk);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainEdge =
+ le16_to_cpu(fan_table_v2->usFanGainEdge);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHotspot =
+ le16_to_cpu(fan_table_v2->usFanGainHotspot);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainLiquid =
+ le16_to_cpu(fan_table_v2->usFanGainLiquid);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrVddc =
+ le16_to_cpu(fan_table_v2->usFanGainVrVddc);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainVrMvdd =
+ le16_to_cpu(fan_table_v2->usFanGainVrMvdd);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainPlx =
+ le16_to_cpu(fan_table_v2->usFanGainPlx);
+ hwmgr->thermal_controller.advanceFanControlParameters.usFanGainHbm =
+ le16_to_cpu(fan_table_v2->usFanGainHbm);
+
+ hwmgr->thermal_controller.advanceFanControlParameters.ucEnableZeroRPM =
+ fan_table_v2->ucEnableZeroRPM;
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStopTemperature =
+ le16_to_cpu(fan_table_v2->usFanStopTemperature);
+ hwmgr->thermal_controller.advanceFanControlParameters.usZeroRPMStartTemperature =
+ le16_to_cpu(fan_table_v2->usFanStartTemperature);
+ }
return 0;
}
@@ -261,6 +313,48 @@ static int get_mm_clock_voltage_table(
return 0;
}
+static void get_scl_sda_value(uint8_t line, uint8_t *scl, uint8_t* sda)
+{
+ switch(line){
+ case Vega10_I2CLineID_DDC1:
+ *scl = Vega10_I2C_DDC1CLK;
+ *sda = Vega10_I2C_DDC1DATA;
+ break;
+ case Vega10_I2CLineID_DDC2:
+ *scl = Vega10_I2C_DDC2CLK;
+ *sda = Vega10_I2C_DDC2DATA;
+ break;
+ case Vega10_I2CLineID_DDC3:
+ *scl = Vega10_I2C_DDC3CLK;
+ *sda = Vega10_I2C_DDC3DATA;
+ break;
+ case Vega10_I2CLineID_DDC4:
+ *scl = Vega10_I2C_DDC4CLK;
+ *sda = Vega10_I2C_DDC4DATA;
+ break;
+ case Vega10_I2CLineID_DDC5:
+ *scl = Vega10_I2C_DDC5CLK;
+ *sda = Vega10_I2C_DDC5DATA;
+ break;
+ case Vega10_I2CLineID_DDC6:
+ *scl = Vega10_I2C_DDC6CLK;
+ *sda = Vega10_I2C_DDC6DATA;
+ break;
+ case Vega10_I2CLineID_SCLSDA:
+ *scl = Vega10_I2C_SCL;
+ *sda = Vega10_I2C_SDA;
+ break;
+ case Vega10_I2CLineID_DDCVGA:
+ *scl = Vega10_I2C_DDCVGACLK;
+ *sda = Vega10_I2C_DDCVGADATA;
+ break;
+ default:
+ *scl = 0;
+ *sda = 0;
+ break;
+ }
+}
+
static int get_tdp_table(
struct pp_hwmgr *hwmgr,
struct phm_tdp_table **info_tdp_table,
@@ -268,59 +362,99 @@ static int get_tdp_table(
{
uint32_t table_size;
struct phm_tdp_table *tdp_table;
-
- const ATOM_Vega10_PowerTune_Table *power_tune_table =
- (ATOM_Vega10_PowerTune_Table *)table;
-
- table_size = sizeof(uint32_t) + sizeof(struct phm_cac_tdp_table);
- hwmgr->dyn_state.cac_dtp_table = (struct phm_cac_tdp_table *)
- kzalloc(table_size, GFP_KERNEL);
-
- if (!hwmgr->dyn_state.cac_dtp_table)
- return -ENOMEM;
+ uint8_t scl;
+ uint8_t sda;
+ const ATOM_Vega10_PowerTune_Table *power_tune_table;
+ const ATOM_Vega10_PowerTune_Table_V2 *power_tune_table_v2;
table_size = sizeof(uint32_t) + sizeof(struct phm_tdp_table);
+
tdp_table = kzalloc(table_size, GFP_KERNEL);
- if (!tdp_table) {
- kfree(hwmgr->dyn_state.cac_dtp_table);
- hwmgr->dyn_state.cac_dtp_table = NULL;
+ if (!tdp_table)
return -ENOMEM;
- }
- tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
- tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
- tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
- tdp_table->usSoftwareShutdownTemp =
- le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
- tdp_table->usTemperatureLimitTedge =
- le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
- tdp_table->usTemperatureLimitHotspot =
- le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
- tdp_table->usTemperatureLimitLiquid1 =
- le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
- tdp_table->usTemperatureLimitLiquid2 =
- le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
- tdp_table->usTemperatureLimitHBM =
- le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
- tdp_table->usTemperatureLimitVrVddc =
- le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
- tdp_table->usTemperatureLimitVrMvdd =
- le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
- tdp_table->usTemperatureLimitPlx =
- le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
- tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
- tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
- tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
- tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
- tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
- tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
- tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
- tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
- tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
- tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
-
- hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ if (table->ucRevId == 5) {
+ power_tune_table = (ATOM_Vega10_PowerTune_Table *)table;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp =
+ le16_to_cpu(power_tune_table->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge =
+ le16_to_cpu(power_tune_table->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot =
+ le16_to_cpu(power_tune_table->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 =
+ le16_to_cpu(power_tune_table->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+ le16_to_cpu(power_tune_table->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM =
+ le16_to_cpu(power_tune_table->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc =
+ le16_to_cpu(power_tune_table->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd =
+ le16_to_cpu(power_tune_table->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx =
+ le16_to_cpu(power_tune_table->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table->ucLiquid2_I2C_address;
+ tdp_table->ucLiquid_I2C_Line = power_tune_table->ucLiquid_I2C_LineSCL;
+ tdp_table->ucLiquid_I2C_LineSDA = power_tune_table->ucLiquid_I2C_LineSDA;
+ tdp_table->ucVr_I2C_address = power_tune_table->ucVr_I2C_address;
+ tdp_table->ucVr_I2C_Line = power_tune_table->ucVr_I2C_LineSCL;
+ tdp_table->ucVr_I2C_LineSDA = power_tune_table->ucVr_I2C_LineSDA;
+ tdp_table->ucPlx_I2C_address = power_tune_table->ucPlx_I2C_address;
+ tdp_table->ucPlx_I2C_Line = power_tune_table->ucPlx_I2C_LineSCL;
+ tdp_table->ucPlx_I2C_LineSDA = power_tune_table->ucPlx_I2C_LineSDA;
+ hwmgr->platform_descriptor.LoadLineSlope = power_tune_table->usLoadLineResistance;
+ } else {
+ power_tune_table_v2 = (ATOM_Vega10_PowerTune_Table_V2 *)table;
+ tdp_table->usMaximumPowerDeliveryLimit = le16_to_cpu(power_tune_table_v2->usSocketPowerLimit);
+ tdp_table->usTDC = le16_to_cpu(power_tune_table_v2->usTdcLimit);
+ tdp_table->usEDCLimit = le16_to_cpu(power_tune_table_v2->usEdcLimit);
+ tdp_table->usSoftwareShutdownTemp =
+ le16_to_cpu(power_tune_table_v2->usSoftwareShutdownTemp);
+ tdp_table->usTemperatureLimitTedge =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitTedge);
+ tdp_table->usTemperatureLimitHotspot =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitHotSpot);
+ tdp_table->usTemperatureLimitLiquid1 =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid1);
+ tdp_table->usTemperatureLimitLiquid2 =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitLiquid2);
+ tdp_table->usTemperatureLimitHBM =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitHBM);
+ tdp_table->usTemperatureLimitVrVddc =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrSoc);
+ tdp_table->usTemperatureLimitVrMvdd =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitVrMem);
+ tdp_table->usTemperatureLimitPlx =
+ le16_to_cpu(power_tune_table_v2->usTemperatureLimitPlx);
+ tdp_table->ucLiquid1_I2C_address = power_tune_table_v2->ucLiquid1_I2C_address;
+ tdp_table->ucLiquid2_I2C_address = power_tune_table_v2->ucLiquid2_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucLiquid_I2C_Line, &scl, &sda);
+
+ tdp_table->ucLiquid_I2C_Line = scl;
+ tdp_table->ucLiquid_I2C_LineSDA = sda;
+
+ tdp_table->ucVr_I2C_address = power_tune_table_v2->ucVr_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucVr_I2C_Line, &scl, &sda);
+
+ tdp_table->ucVr_I2C_Line = scl;
+ tdp_table->ucVr_I2C_LineSDA = sda;
+ tdp_table->ucPlx_I2C_address = power_tune_table_v2->ucPlx_I2C_address;
+
+ get_scl_sda_value(power_tune_table_v2->ucPlx_I2C_Line, &scl, &sda);
+
+ tdp_table->ucPlx_I2C_Line = scl;
+ tdp_table->ucPlx_I2C_LineSDA = sda;
+
+ hwmgr->platform_descriptor.LoadLineSlope =
+ power_tune_table_v2->usLoadLineResistance;
+ }
*info_tdp_table = tdp_table;
@@ -836,7 +970,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddcLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddc_lookup_table, vddc_table, 16);
+ &pp_table_info->vddc_lookup_table, vddc_table, 8);
}
if (powerplay_table->usVddmemLookupTableOffset) {
@@ -845,7 +979,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddmemLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddmem_lookup_table, vdd_mem_table, 16);
+ &pp_table_info->vddmem_lookup_table, vdd_mem_table, 4);
}
if (powerplay_table->usVddciLookupTableOffset) {
@@ -854,7 +988,7 @@ static int init_dpm_2_parameters(
(((unsigned long)powerplay_table) +
le16_to_cpu(powerplay_table->usVddciLookupTableOffset));
result = get_vddc_lookup_table(hwmgr,
- &pp_table_info->vddci_lookup_table, vddci_table, 16);
+ &pp_table_info->vddci_lookup_table, vddci_table, 4);
}
return result;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
index 995d133ba6aa..d83ed2af7aa3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.h
@@ -26,6 +26,34 @@
#include "hwmgr.h"
+enum Vega10_I2CLineID {
+ Vega10_I2CLineID_DDC1 = 0x90,
+ Vega10_I2CLineID_DDC2 = 0x91,
+ Vega10_I2CLineID_DDC3 = 0x92,
+ Vega10_I2CLineID_DDC4 = 0x93,
+ Vega10_I2CLineID_DDC5 = 0x94,
+ Vega10_I2CLineID_DDC6 = 0x95,
+ Vega10_I2CLineID_SCLSDA = 0x96,
+ Vega10_I2CLineID_DDCVGA = 0x97
+};
+
+#define Vega10_I2C_DDC1DATA 0
+#define Vega10_I2C_DDC1CLK 1
+#define Vega10_I2C_DDC2DATA 2
+#define Vega10_I2C_DDC2CLK 3
+#define Vega10_I2C_DDC3DATA 4
+#define Vega10_I2C_DDC3CLK 5
+#define Vega10_I2C_SDA 40
+#define Vega10_I2C_SCL 41
+#define Vega10_I2C_DDC4DATA 65
+#define Vega10_I2C_DDC4CLK 66
+#define Vega10_I2C_DDC5DATA 0x48
+#define Vega10_I2C_DDC5CLK 0x49
+#define Vega10_I2C_DDC6DATA 0x4a
+#define Vega10_I2C_DDC6CLK 0x4b
+#define Vega10_I2C_DDCVGADATA 0x4c
+#define Vega10_I2C_DDCVGACLK 0x4d
+
extern const struct pp_table_func vega10_pptable_funcs;
extern int vega10_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr);
extern int vega10_get_powerplay_table_entry(struct pp_hwmgr *hwmgr, uint32_t entry_index,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
index aee021451d35..2037910adcb1 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h
@@ -30,7 +30,9 @@
* SMU TEAM: Always increment the interface version if
* any structure is changed in this file
*/
-#define SMU9_DRIVER_IF_VERSION 0xa
+#define SMU9_DRIVER_IF_VERSION 0xB
+
+#define PPTABLE_V10_SMU_VERSION 1
#define NUM_GFXCLK_DPM_LEVELS 8
#define NUM_UVD_DPM_LEVELS 8
@@ -87,6 +89,11 @@ typedef struct {
int32_t a0;
int32_t a1;
int32_t a2;
+
+ uint8_t a0_shift;
+ uint8_t a1_shift;
+ uint8_t a2_shift;
+ uint8_t padding;
} GbVdroopTable_t;
typedef struct {
@@ -293,7 +300,9 @@ typedef struct {
uint16_t Platform_sigma;
uint16_t PSM_Age_CompFactor;
- uint32_t Reserved[20];
+ uint32_t DpmLevelPowerDelta;
+
+ uint32_t Reserved[19];
/* Padding - ignore */
uint32_t MmHubPadding[7]; /* SMU internal use */
@@ -350,8 +359,8 @@ typedef struct {
typedef struct {
uint16_t avgPsmCount[30];
uint16_t minPsmCount[30];
- uint16_t avgPsmVoltage[30]; /* in mV with 2 fractional bits */
- uint16_t minPsmVoltage[30]; /* in mV with 2 fractional bits */
+ float avgPsmVoltage[30];
+ float minPsmVoltage[30];
uint32_t MmHubPadding[7]; /* SMU internal use */
} AvfsDebugTable_t;
@@ -414,5 +423,45 @@ typedef struct {
#define UCLK_SWITCH_SLOW 0
#define UCLK_SWITCH_FAST 1
+/* GFX DIDT Configuration */
+#define SQ_Enable_MASK 0x1
+#define SQ_IR_MASK 0x2
+#define SQ_PCC_MASK 0x4
+#define SQ_EDC_MASK 0x8
+
+#define TCP_Enable_MASK 0x100
+#define TCP_IR_MASK 0x200
+#define TCP_PCC_MASK 0x400
+#define TCP_EDC_MASK 0x800
+
+#define TD_Enable_MASK 0x10000
+#define TD_IR_MASK 0x20000
+#define TD_PCC_MASK 0x40000
+#define TD_EDC_MASK 0x80000
+
+#define DB_Enable_MASK 0x1000000
+#define DB_IR_MASK 0x2000000
+#define DB_PCC_MASK 0x4000000
+#define DB_EDC_MASK 0x8000000
+
+#define SQ_Enable_SHIFT 0
+#define SQ_IR_SHIFT 1
+#define SQ_PCC_SHIFT 2
+#define SQ_EDC_SHIFT 3
+
+#define TCP_Enable_SHIFT 8
+#define TCP_IR_SHIFT 9
+#define TCP_PCC_SHIFT 10
+#define TCP_EDC_SHIFT 11
+
+#define TD_Enable_SHIFT 16
+#define TD_IR_SHIFT 17
+#define TD_PCC_SHIFT 18
+#define TD_EDC_SHIFT 19
+
+#define DB_Enable_SHIFT 24
+#define DB_IR_SHIFT 25
+#define DB_PCC_SHIFT 26
+#define DB_EDC_SHIFT 27
#endif
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index f9d665550d3e..9446a673d469 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -16,6 +16,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <video/videomode.h>
#include "malidp_drv.h"
@@ -35,13 +36,6 @@ static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
long rate, req_rate = mode->crtc_clock * 1000;
if (req_rate) {
- rate = clk_round_rate(hwdev->mclk, req_rate);
- if (rate < req_rate) {
- DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
- mode->crtc_clock);
- return false;
- }
-
rate = clk_round_rate(hwdev->pxlclk, req_rate);
if (rate != req_rate) {
DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
@@ -58,9 +52,14 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
struct videomode vm;
+ int err = pm_runtime_get_sync(crtc->dev->dev);
- drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
+ if (err < 0) {
+ DRM_DEBUG_DRIVER("Failed to enable runtime power management: %d\n", err);
+ return;
+ }
+ drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
clk_prepare_enable(hwdev->pxlclk);
/* We rely on firmware to set mclk to a sensible level. */
@@ -75,10 +74,254 @@ static void malidp_crtc_disable(struct drm_crtc *crtc)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
struct malidp_hw_device *hwdev = malidp->dev;
+ int err;
drm_crtc_vblank_off(crtc);
hwdev->enter_config_mode(hwdev);
clk_disable_unprepare(hwdev->pxlclk);
+
+ err = pm_runtime_put(crtc->dev->dev);
+ if (err < 0) {
+ DRM_DEBUG_DRIVER("Failed to disable runtime power management: %d\n", err);
+ }
+}
+
+static const struct gamma_curve_segment {
+ u16 start;
+ u16 end;
+} segments[MALIDP_COEFFTAB_NUM_COEFFS] = {
+ /* sector 0 */
+ { 0, 0 }, { 1, 1 }, { 2, 2 }, { 3, 3 },
+ { 4, 4 }, { 5, 5 }, { 6, 6 }, { 7, 7 },
+ { 8, 8 }, { 9, 9 }, { 10, 10 }, { 11, 11 },
+ { 12, 12 }, { 13, 13 }, { 14, 14 }, { 15, 15 },
+ /* sector 1 */
+ { 16, 19 }, { 20, 23 }, { 24, 27 }, { 28, 31 },
+ /* sector 2 */
+ { 32, 39 }, { 40, 47 }, { 48, 55 }, { 56, 63 },
+ /* sector 3 */
+ { 64, 79 }, { 80, 95 }, { 96, 111 }, { 112, 127 },
+ /* sector 4 */
+ { 128, 159 }, { 160, 191 }, { 192, 223 }, { 224, 255 },
+ /* sector 5 */
+ { 256, 319 }, { 320, 383 }, { 384, 447 }, { 448, 511 },
+ /* sector 6 */
+ { 512, 639 }, { 640, 767 }, { 768, 895 }, { 896, 1023 },
+ { 1024, 1151 }, { 1152, 1279 }, { 1280, 1407 }, { 1408, 1535 },
+ { 1536, 1663 }, { 1664, 1791 }, { 1792, 1919 }, { 1920, 2047 },
+ { 2048, 2175 }, { 2176, 2303 }, { 2304, 2431 }, { 2432, 2559 },
+ { 2560, 2687 }, { 2688, 2815 }, { 2816, 2943 }, { 2944, 3071 },
+ { 3072, 3199 }, { 3200, 3327 }, { 3328, 3455 }, { 3456, 3583 },
+ { 3584, 3711 }, { 3712, 3839 }, { 3840, 3967 }, { 3968, 4095 },
+};
+
+#define DE_COEFTAB_DATA(a, b) ((((a) & 0xfff) << 16) | (((b) & 0xfff)))
+
+static void malidp_generate_gamma_table(struct drm_property_blob *lut_blob,
+ u32 coeffs[MALIDP_COEFFTAB_NUM_COEFFS])
+{
+ struct drm_color_lut *lut = (struct drm_color_lut *)lut_blob->data;
+ int i;
+
+ for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) {
+ u32 a, b, delta_in, out_start, out_end;
+
+ delta_in = segments[i].end - segments[i].start;
+ /* DP has 12-bit internal precision for its LUTs. */
+ out_start = drm_color_lut_extract(lut[segments[i].start].green,
+ 12);
+ out_end = drm_color_lut_extract(lut[segments[i].end].green, 12);
+ a = (delta_in == 0) ? 0 : ((out_end - out_start) * 256) / delta_in;
+ b = out_start;
+ coeffs[i] = DE_COEFTAB_DATA(a, b);
+ }
+}
+
+/*
+ * Check if there is a new gamma LUT and if it is of an acceptable size. Also,
+ * reject any LUTs that use distinct red, green, and blue curves.
+ */
+static int malidp_crtc_atomic_check_gamma(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
+ struct drm_color_lut *lut;
+ size_t lut_size;
+ int i;
+
+ if (!state->color_mgmt_changed || !state->gamma_lut)
+ return 0;
+
+ if (crtc->state->gamma_lut &&
+ (crtc->state->gamma_lut->base.id == state->gamma_lut->base.id))
+ return 0;
+
+ if (state->gamma_lut->length % sizeof(struct drm_color_lut))
+ return -EINVAL;
+
+ lut_size = state->gamma_lut->length / sizeof(struct drm_color_lut);
+ if (lut_size != MALIDP_GAMMA_LUT_SIZE)
+ return -EINVAL;
+
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ for (i = 0; i < lut_size; ++i)
+ if (!((lut[i].red == lut[i].green) &&
+ (lut[i].red == lut[i].blue)))
+ return -EINVAL;
+
+ if (!state->mode_changed) {
+ int ret;
+
+ state->mode_changed = true;
+ /*
+ * Kerneldoc for drm_atomic_helper_check_modeset mandates that
+ * it be invoked when the driver sets ->mode_changed. Since
+ * changing the gamma LUT doesn't depend on any external
+ * resources, it is safe to call it only once.
+ */
+ ret = drm_atomic_helper_check_modeset(crtc->dev, state->state);
+ if (ret)
+ return ret;
+ }
+
+ malidp_generate_gamma_table(state->gamma_lut, mc->gamma_coeffs);
+ return 0;
+}
+
+/*
+ * Check if there is a new CTM and if it contains valid input. Valid here means
+ * that the number is inside the representable range for a Q3.12 number,
+ * excluding truncating the fractional part of the input data.
+ *
+ * The COLORADJ registers can be changed atomically.
+ */
+static int malidp_crtc_atomic_check_ctm(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_crtc_state *mc = to_malidp_crtc_state(state);
+ struct drm_color_ctm *ctm;
+ int i;
+
+ if (!state->color_mgmt_changed)
+ return 0;
+
+ if (!state->ctm)
+ return 0;
+
+ if (crtc->state->ctm && (crtc->state->ctm->base.id ==
+ state->ctm->base.id))
+ return 0;
+
+ /*
+ * The size of the ctm is checked in
+ * drm_atomic_replace_property_blob_from_id.
+ */
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+ for (i = 0; i < ARRAY_SIZE(ctm->matrix); ++i) {
+ /* Convert from S31.32 to Q3.12. */
+ s64 val = ctm->matrix[i];
+ u32 mag = ((((u64)val) & ~BIT_ULL(63)) >> 20) &
+ GENMASK_ULL(14, 0);
+
+ /*
+ * Convert to 2s complement and check the destination's top bit
+ * for overflow. NB: Can't check before converting or it'd
+ * incorrectly reject the case:
+ * sign == 1
+ * mag == 0x2000
+ */
+ if (val & BIT_ULL(63))
+ mag = ~mag + 1;
+ if (!!(val & BIT_ULL(63)) != !!(mag & BIT(14)))
+ return -EINVAL;
+ mc->coloradj_coeffs[i] = mag;
+ }
+
+ return 0;
+}
+
+static int malidp_crtc_atomic_check_scaling(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_crtc_state *cs = to_malidp_crtc_state(state);
+ struct malidp_se_config *s = &cs->scaler_config;
+ struct drm_plane *plane;
+ struct videomode vm;
+ const struct drm_plane_state *pstate;
+ u32 h_upscale_factor = 0; /* U16.16 */
+ u32 v_upscale_factor = 0; /* U16.16 */
+ u8 scaling = cs->scaled_planes_mask;
+ int ret;
+
+ if (!scaling) {
+ s->scale_enable = false;
+ goto mclk_calc;
+ }
+
+ /* The scaling engine can only handle one plane at a time. */
+ if (scaling & (scaling - 1))
+ return -EINVAL;
+
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct malidp_plane *mp = to_malidp_plane(plane);
+ u32 phase;
+
+ if (!(mp->layer->id & scaling))
+ continue;
+
+ /*
+ * Convert crtc_[w|h] to U32.32, then divide by U16.16 src_[w|h]
+ * to get the U16.16 result.
+ */
+ h_upscale_factor = div_u64((u64)pstate->crtc_w << 32,
+ pstate->src_w);
+ v_upscale_factor = div_u64((u64)pstate->crtc_h << 32,
+ pstate->src_h);
+
+ s->enhancer_enable = ((h_upscale_factor >> 16) >= 2 ||
+ (v_upscale_factor >> 16) >= 2);
+
+ s->input_w = pstate->src_w >> 16;
+ s->input_h = pstate->src_h >> 16;
+ s->output_w = pstate->crtc_w;
+ s->output_h = pstate->crtc_h;
+
+#define SE_N_PHASE 4
+#define SE_SHIFT_N_PHASE 12
+ /* Calculate initial_phase and delta_phase for horizontal. */
+ phase = s->input_w;
+ s->h_init_phase =
+ ((phase << SE_N_PHASE) / s->output_w + 1) / 2;
+
+ phase = s->input_w;
+ phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
+ s->h_delta_phase = phase / s->output_w;
+
+ /* Same for vertical. */
+ phase = s->input_h;
+ s->v_init_phase =
+ ((phase << SE_N_PHASE) / s->output_h + 1) / 2;
+
+ phase = s->input_h;
+ phase <<= (SE_SHIFT_N_PHASE + SE_N_PHASE);
+ s->v_delta_phase = phase / s->output_h;
+#undef SE_N_PHASE
+#undef SE_SHIFT_N_PHASE
+ s->plane_src_id = mp->layer->id;
+ }
+
+ s->scale_enable = true;
+ s->hcoeff = malidp_se_select_coeffs(h_upscale_factor);
+ s->vcoeff = malidp_se_select_coeffs(v_upscale_factor);
+
+mclk_calc:
+ drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
+ ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+ if (ret < 0)
+ return -EINVAL;
+ return 0;
}
static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
@@ -90,6 +333,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
const struct drm_plane_state *pstate;
u32 rot_mem_free, rot_mem_usable;
int rotated_planes = 0;
+ int ret;
/*
* check if there is enough rotation memory available for planes
@@ -156,7 +400,11 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
}
}
- return 0;
+ ret = malidp_crtc_atomic_check_gamma(crtc, state);
+ ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
+ ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
+
+ return ret;
}
static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
@@ -166,6 +414,60 @@ static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
.atomic_check = malidp_crtc_atomic_check,
};
+static struct drm_crtc_state *malidp_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct malidp_crtc_state *state, *old_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ old_state = to_malidp_crtc_state(crtc->state);
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+ memcpy(state->gamma_coeffs, old_state->gamma_coeffs,
+ sizeof(state->gamma_coeffs));
+ memcpy(state->coloradj_coeffs, old_state->coloradj_coeffs,
+ sizeof(state->coloradj_coeffs));
+ memcpy(&state->scaler_config, &old_state->scaler_config,
+ sizeof(state->scaler_config));
+ state->scaled_planes_mask = 0;
+
+ return &state->base;
+}
+
+static void malidp_crtc_reset(struct drm_crtc *crtc)
+{
+ struct malidp_crtc_state *state = NULL;
+
+ if (crtc->state) {
+ state = to_malidp_crtc_state(crtc->state);
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ }
+
+ kfree(state);
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
+}
+
+static void malidp_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct malidp_crtc_state *mali_state = NULL;
+
+ if (state) {
+ mali_state = to_malidp_crtc_state(state);
+ __drm_atomic_helper_crtc_destroy_state(state);
+ }
+
+ kfree(mali_state);
+}
+
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
@@ -186,12 +488,13 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc)
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = malidp_crtc_reset,
+ .atomic_duplicate_state = malidp_crtc_duplicate_state,
+ .atomic_destroy_state = malidp_crtc_destroy_state,
.enable_vblank = malidp_crtc_enable_vblank,
.disable_vblank = malidp_crtc_disable_vblank,
};
@@ -223,11 +526,17 @@ int malidp_crtc_init(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
&malidp_crtc_funcs, NULL);
+ if (ret)
+ goto crtc_cleanup_planes;
- if (!ret) {
- drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
- return 0;
- }
+ drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
+ drm_mode_crtc_set_gamma_size(&malidp->crtc, MALIDP_GAMMA_LUT_SIZE);
+ /* No inverse-gamma: it is per-plane. */
+ drm_crtc_enable_color_mgmt(&malidp->crtc, 0, true, MALIDP_GAMMA_LUT_SIZE);
+
+ malidp_se_set_enh_coeffs(malidp->dev);
+
+ return 0;
crtc_cleanup_planes:
malidp_de_planes_destroy(drm);
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 898c2b58e73d..0d3eb537d08b 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -13,9 +13,11 @@
#include <linux/module.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/console.h>
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -32,6 +34,131 @@
#define MALIDP_CONF_VALID_TIMEOUT 250
+static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
+ u32 data[MALIDP_COEFFTAB_NUM_COEFFS])
+{
+ int i;
+ /* Update all channels with a single gamma curve. */
+ const u32 gamma_write_mask = GENMASK(18, 16);
+ /*
+ * Always write an entire table, so the address field in
+ * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask
+ * directly.
+ */
+ malidp_hw_write(hwdev, gamma_write_mask,
+ hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+ for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
+ malidp_hw_write(hwdev, data[i],
+ hwdev->map.coeffs_base +
+ MALIDP_COEF_TABLE_DATA);
+}
+
+static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ if (!crtc->state->color_mgmt_changed)
+ return;
+
+ if (!crtc->state->gamma_lut) {
+ malidp_hw_clearbits(hwdev,
+ MALIDP_DISP_FUNC_GAMMA,
+ MALIDP_DE_DISPLAY_FUNC);
+ } else {
+ struct malidp_crtc_state *mc =
+ to_malidp_crtc_state(crtc->state);
+
+ if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id !=
+ old_state->gamma_lut->base.id))
+ malidp_write_gamma_table(hwdev, mc->gamma_coeffs);
+
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA,
+ MALIDP_DE_DISPLAY_FUNC);
+ }
+}
+
+static
+void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ int i;
+
+ if (!crtc->state->color_mgmt_changed)
+ return;
+
+ if (!crtc->state->ctm) {
+ malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ,
+ MALIDP_DE_DISPLAY_FUNC);
+ } else {
+ struct malidp_crtc_state *mc =
+ to_malidp_crtc_state(crtc->state);
+
+ if (!old_state->ctm || (crtc->state->ctm->base.id !=
+ old_state->ctm->base.id))
+ for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
+ malidp_hw_write(hwdev,
+ mc->coloradj_coeffs[i],
+ hwdev->map.coeffs_base +
+ MALIDP_COLOR_ADJ_COEF + 4 * i);
+
+ malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
+ MALIDP_DE_DISPLAY_FUNC);
+ }
+}
+
+static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state);
+ struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state);
+ struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
+ struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_se_config *s = &cs->scaler_config;
+ struct malidp_se_config *old_s = &old_cs->scaler_config;
+ u32 se_control = hwdev->map.se_base +
+ ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ 0x10 : 0xC);
+ u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
+ u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
+ u32 val;
+
+ /* Set SE_CONTROL */
+ if (!s->scale_enable) {
+ val = malidp_hw_read(hwdev, se_control);
+ val &= ~MALIDP_SE_SCALING_EN;
+ malidp_hw_write(hwdev, val, se_control);
+ return;
+ }
+
+ hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+ val = malidp_hw_read(hwdev, se_control);
+ val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
+
+ val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK);
+ val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0;
+
+ val |= MALIDP_SE_RGBO_IF_EN;
+ malidp_hw_write(hwdev, val, se_control);
+
+ /* Set IN_SIZE & OUT_SIZE. */
+ val = MALIDP_SE_SET_V_SIZE(s->input_h) |
+ MALIDP_SE_SET_H_SIZE(s->input_w);
+ malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE);
+ val = MALIDP_SE_SET_V_SIZE(s->output_h) |
+ MALIDP_SE_SET_H_SIZE(s->output_w);
+ malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE);
+
+ /* Set phase regs. */
+ malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH);
+ malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH);
+ malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH);
+ malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH);
+}
+
/*
* set the "config valid" bit and wait until the hardware acts on it
*/
@@ -66,10 +193,12 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
struct drm_pending_vblank_event *event;
struct drm_device *drm = state->dev;
struct malidp_drm *malidp = drm->dev_private;
- int ret = malidp_set_and_wait_config_valid(drm);
- if (ret)
- DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ if (malidp->crtc.enabled) {
+ /* only set config_valid if the CRTC is enabled */
+ if (malidp_set_and_wait_config_valid(drm))
+ DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
+ }
event = malidp->crtc.state->event;
if (event) {
@@ -88,15 +217,30 @@ static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *drm = state->dev;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ pm_runtime_get_sync(drm->dev);
drm_atomic_helper_commit_modeset_disables(drm, state);
- drm_atomic_helper_commit_modeset_enables(drm, state);
+
+ for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
+ malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
+ malidp_atomic_commit_se_config(crtc, old_crtc_state);
+ }
+
drm_atomic_helper_commit_planes(drm, state, 0);
+ drm_atomic_helper_commit_modeset_enables(drm, state);
+
malidp_atomic_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(drm, state);
+ pm_runtime_put(drm->dev);
+
drm_atomic_helper_cleanup_planes(drm, state);
}
@@ -277,8 +421,65 @@ static bool malidp_has_sufficient_address_space(const struct resource *res,
return true;
}
+static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
+}
+
+DEVICE_ATTR_RO(core_id);
+
+static int malidp_init_sysfs(struct device *dev)
+{
+ int ret = device_create_file(dev, &dev_attr_core_id);
+
+ if (ret)
+ DRM_ERROR("failed to create device file for core_id\n");
+
+ return ret;
+}
+
+static void malidp_fini_sysfs(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_core_id);
+}
+
#define MAX_OUTPUT_CHANNELS 3
+static int malidp_runtime_pm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ /* we can only suspend if the hardware is in config mode */
+ WARN_ON(!hwdev->in_config_mode(hwdev));
+
+ hwdev->pm_suspended = true;
+ clk_disable_unprepare(hwdev->mclk);
+ clk_disable_unprepare(hwdev->aclk);
+ clk_disable_unprepare(hwdev->pclk);
+
+ return 0;
+}
+
+static int malidp_runtime_pm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+ struct malidp_hw_device *hwdev = malidp->dev;
+
+ clk_prepare_enable(hwdev->pclk);
+ clk_prepare_enable(hwdev->aclk);
+ clk_prepare_enable(hwdev->mclk);
+ hwdev->pm_suspended = false;
+
+ return 0;
+}
+
static int malidp_bind(struct device *dev)
{
struct resource *res;
@@ -307,7 +508,6 @@ static int malidp_bind(struct device *dev)
memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
malidp->dev = hwdev;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
hwdev->regs = devm_ioremap_resource(dev, res);
if (IS_ERR(hwdev->regs))
@@ -340,14 +540,17 @@ static int malidp_bind(struct device *dev)
goto alloc_fail;
}
- /* Enable APB clock in order to get access to the registers */
- clk_prepare_enable(hwdev->pclk);
- /*
- * Enable AXI clock and main clock so that prefetch can start once
- * the registers are set
- */
- clk_prepare_enable(hwdev->aclk);
- clk_prepare_enable(hwdev->mclk);
+ drm->dev_private = malidp;
+ dev_set_drvdata(dev, drm);
+
+ /* Enable power management */
+ pm_runtime_enable(dev);
+
+ /* Resume device to enable the clocks */
+ if (pm_runtime_enabled(dev))
+ pm_runtime_get_sync(dev);
+ else
+ malidp_runtime_pm_resume(dev);
dev_id = of_match_device(malidp_drm_of_match, dev);
if (!dev_id) {
@@ -376,6 +579,8 @@ static int malidp_bind(struct device *dev)
DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
(version >> 12) & 0xf, (version >> 8) & 0xf);
+ malidp->core_id = version;
+
/* set the number of lines used for output of RGB data */
ret = of_property_read_u8_array(dev->of_node,
"arm,malidp-output-port-lines",
@@ -387,13 +592,15 @@ static int malidp_bind(struct device *dev)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
- drm->dev_private = malidp;
- dev_set_drvdata(dev, drm);
atomic_set(&malidp->config_valid, 0);
init_waitqueue_head(&malidp->wq);
ret = malidp_init(drm);
if (ret < 0)
+ goto query_hw_fail;
+
+ ret = malidp_init_sysfs(dev);
+ if (ret)
goto init_fail;
/* Set the CRTC's port so that the encoder component can find it */
@@ -416,6 +623,7 @@ static int malidp_bind(struct device *dev)
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
}
+ pm_runtime_put(dev);
drm_mode_config_reset(drm);
@@ -441,7 +649,9 @@ register_fail:
drm_fbdev_cma_fini(malidp->fbdev);
malidp->fbdev = NULL;
}
+ drm_kms_helper_poll_fini(drm);
fbdev_fail:
+ pm_runtime_get_sync(dev);
drm_vblank_cleanup(drm);
vblank_fail:
malidp_se_irq_fini(drm);
@@ -452,14 +662,17 @@ irq_init_fail:
bind_fail:
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
- malidp_fini(drm);
init_fail:
+ malidp_fini_sysfs(dev);
+ malidp_fini(drm);
+query_hw_fail:
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
-query_hw_fail:
- clk_disable_unprepare(hwdev->mclk);
- clk_disable_unprepare(hwdev->aclk);
- clk_disable_unprepare(hwdev->pclk);
drm_dev_unref(drm);
alloc_fail:
of_reserved_mem_device_release(dev);
@@ -471,7 +684,6 @@ static void malidp_unbind(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct malidp_drm *malidp = drm->dev_private;
- struct malidp_hw_device *hwdev = malidp->dev;
drm_dev_unregister(drm);
if (malidp->fbdev) {
@@ -479,18 +691,22 @@ static void malidp_unbind(struct device *dev)
malidp->fbdev = NULL;
}
drm_kms_helper_poll_fini(drm);
+ pm_runtime_get_sync(dev);
+ drm_vblank_cleanup(drm);
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
- drm_vblank_cleanup(drm);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
+ malidp_fini_sysfs(dev);
malidp_fini(drm);
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ else
+ malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- clk_disable_unprepare(hwdev->mclk);
- clk_disable_unprepare(hwdev->aclk);
- clk_disable_unprepare(hwdev->pclk);
drm_dev_unref(drm);
of_reserved_mem_device_release(dev);
}
@@ -533,11 +749,52 @@ static int malidp_platform_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused malidp_pm_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_kms_helper_poll_disable(drm);
+ console_lock();
+ drm_fbdev_cma_set_suspend(malidp->fbdev, 1);
+ console_unlock();
+ malidp->pm_state = drm_atomic_helper_suspend(drm);
+ if (IS_ERR(malidp->pm_state)) {
+ console_lock();
+ drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
+ console_unlock();
+ drm_kms_helper_poll_enable(drm);
+ return PTR_ERR(malidp->pm_state);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused malidp_pm_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct malidp_drm *malidp = drm->dev_private;
+
+ drm_atomic_helper_resume(drm, malidp->pm_state);
+ console_lock();
+ drm_fbdev_cma_set_suspend(malidp->fbdev, 0);
+ console_unlock();
+ drm_kms_helper_poll_enable(drm);
+
+ return 0;
+}
+
+static const struct dev_pm_ops malidp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
+ SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
+};
+
static struct platform_driver malidp_platform_driver = {
.probe = malidp_platform_probe,
.remove = malidp_platform_remove,
.driver = {
.name = "mali-dp",
+ .pm = &malidp_pm_ops,
.of_match_table = malidp_drm_of_match,
},
};
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index dbc617c6e4ef..040311ffcaec 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -24,6 +24,8 @@ struct malidp_drm {
struct drm_crtc crtc;
wait_queue_head_t wq;
atomic_t config_valid;
+ struct drm_atomic_state *pm_state;
+ u32 core_id;
};
#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
@@ -47,6 +49,17 @@ struct malidp_plane_state {
#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
+struct malidp_crtc_state {
+ struct drm_crtc_state base;
+ u32 gamma_coeffs[MALIDP_COEFFTAB_NUM_COEFFS];
+ u32 coloradj_coeffs[MALIDP_COLORADJ_NUM_COEFFS];
+ struct malidp_se_config scaler_config;
+ /* Bitfield of all the planes that have requested a scaled output. */
+ u8 scaled_planes_mask;
+};
+
+#define to_malidp_crtc_state(x) container_of(x, struct malidp_crtc_state, base)
+
int malidp_de_planes_init(struct drm_device *drm);
void malidp_de_planes_destroy(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 9f5513006eee..28360b8542f7 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -12,6 +12,7 @@
* in an attempt to provide to the rest of the driver code a unified view
*/
+#include <linux/clk.h>
#include <linux/types.h>
#include <linux/io.h>
#include <drm/drmP.h>
@@ -86,6 +87,80 @@ static const struct malidp_layer malidp550_layers[] = {
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
};
+#define SE_N_SCALING_COEFFS 96
+static const u16 dp500_se_scaling_coeffs[][SE_N_SCALING_COEFFS] = {
+ [MALIDP_UPSCALING_COEFFS - 1] = {
+ 0x0000, 0x0001, 0x0007, 0x0011, 0x001e, 0x002e, 0x003f, 0x0052,
+ 0x0064, 0x0073, 0x007d, 0x0080, 0x007a, 0x006c, 0x0053, 0x002f,
+ 0x0000, 0x3fc6, 0x3f83, 0x3f39, 0x3eea, 0x3e9b, 0x3e4f, 0x3e0a,
+ 0x3dd4, 0x3db0, 0x3da2, 0x3db1, 0x3dde, 0x3e2f, 0x3ea5, 0x3f40,
+ 0x0000, 0x00e5, 0x01ee, 0x0315, 0x0456, 0x05aa, 0x0709, 0x086c,
+ 0x09c9, 0x0b15, 0x0c4a, 0x0d5d, 0x0e4a, 0x0f06, 0x0f91, 0x0fe5,
+ 0x1000, 0x0fe5, 0x0f91, 0x0f06, 0x0e4a, 0x0d5d, 0x0c4a, 0x0b15,
+ 0x09c9, 0x086c, 0x0709, 0x05aa, 0x0456, 0x0315, 0x01ee, 0x00e5,
+ 0x0000, 0x3f40, 0x3ea5, 0x3e2f, 0x3dde, 0x3db1, 0x3da2, 0x3db0,
+ 0x3dd4, 0x3e0a, 0x3e4f, 0x3e9b, 0x3eea, 0x3f39, 0x3f83, 0x3fc6,
+ 0x0000, 0x002f, 0x0053, 0x006c, 0x007a, 0x0080, 0x007d, 0x0073,
+ 0x0064, 0x0052, 0x003f, 0x002e, 0x001e, 0x0011, 0x0007, 0x0001
+ },
+ [MALIDP_DOWNSCALING_1_5_COEFFS - 1] = {
+ 0x0059, 0x004f, 0x0041, 0x002e, 0x0016, 0x3ffb, 0x3fd9, 0x3fb4,
+ 0x3f8c, 0x3f62, 0x3f36, 0x3f09, 0x3edd, 0x3eb3, 0x3e8d, 0x3e6c,
+ 0x3e52, 0x3e3f, 0x3e35, 0x3e37, 0x3e46, 0x3e61, 0x3e8c, 0x3ec5,
+ 0x3f0f, 0x3f68, 0x3fd1, 0x004a, 0x00d3, 0x0169, 0x020b, 0x02b8,
+ 0x036e, 0x042d, 0x04f2, 0x05b9, 0x0681, 0x0745, 0x0803, 0x08ba,
+ 0x0965, 0x0a03, 0x0a91, 0x0b0d, 0x0b75, 0x0bc6, 0x0c00, 0x0c20,
+ 0x0c28, 0x0c20, 0x0c00, 0x0bc6, 0x0b75, 0x0b0d, 0x0a91, 0x0a03,
+ 0x0965, 0x08ba, 0x0803, 0x0745, 0x0681, 0x05b9, 0x04f2, 0x042d,
+ 0x036e, 0x02b8, 0x020b, 0x0169, 0x00d3, 0x004a, 0x3fd1, 0x3f68,
+ 0x3f0f, 0x3ec5, 0x3e8c, 0x3e61, 0x3e46, 0x3e37, 0x3e35, 0x3e3f,
+ 0x3e52, 0x3e6c, 0x3e8d, 0x3eb3, 0x3edd, 0x3f09, 0x3f36, 0x3f62,
+ 0x3f8c, 0x3fb4, 0x3fd9, 0x3ffb, 0x0016, 0x002e, 0x0041, 0x004f
+ },
+ [MALIDP_DOWNSCALING_2_COEFFS - 1] = {
+ 0x3f19, 0x3f03, 0x3ef0, 0x3edf, 0x3ed0, 0x3ec5, 0x3ebd, 0x3eb9,
+ 0x3eb9, 0x3ebf, 0x3eca, 0x3ed9, 0x3eef, 0x3f0a, 0x3f2c, 0x3f52,
+ 0x3f7f, 0x3fb0, 0x3fe8, 0x0026, 0x006a, 0x00b4, 0x0103, 0x0158,
+ 0x01b1, 0x020d, 0x026c, 0x02cd, 0x032f, 0x0392, 0x03f4, 0x0455,
+ 0x04b4, 0x051e, 0x0585, 0x05eb, 0x064c, 0x06a8, 0x06fe, 0x074e,
+ 0x0796, 0x07d5, 0x080c, 0x0839, 0x085c, 0x0875, 0x0882, 0x0887,
+ 0x0881, 0x0887, 0x0882, 0x0875, 0x085c, 0x0839, 0x080c, 0x07d5,
+ 0x0796, 0x074e, 0x06fe, 0x06a8, 0x064c, 0x05eb, 0x0585, 0x051e,
+ 0x04b4, 0x0455, 0x03f4, 0x0392, 0x032f, 0x02cd, 0x026c, 0x020d,
+ 0x01b1, 0x0158, 0x0103, 0x00b4, 0x006a, 0x0026, 0x3fe8, 0x3fb0,
+ 0x3f7f, 0x3f52, 0x3f2c, 0x3f0a, 0x3eef, 0x3ed9, 0x3eca, 0x3ebf,
+ 0x3eb9, 0x3eb9, 0x3ebd, 0x3ec5, 0x3ed0, 0x3edf, 0x3ef0, 0x3f03
+ },
+ [MALIDP_DOWNSCALING_2_75_COEFFS - 1] = {
+ 0x3f51, 0x3f60, 0x3f71, 0x3f84, 0x3f98, 0x3faf, 0x3fc8, 0x3fe3,
+ 0x0000, 0x001f, 0x0040, 0x0064, 0x008a, 0x00b1, 0x00da, 0x0106,
+ 0x0133, 0x0160, 0x018e, 0x01bd, 0x01ec, 0x021d, 0x024e, 0x0280,
+ 0x02b2, 0x02e4, 0x0317, 0x0349, 0x037c, 0x03ad, 0x03df, 0x0410,
+ 0x0440, 0x0468, 0x048f, 0x04b3, 0x04d6, 0x04f8, 0x0516, 0x0533,
+ 0x054e, 0x0566, 0x057c, 0x0590, 0x05a0, 0x05ae, 0x05ba, 0x05c3,
+ 0x05c9, 0x05c3, 0x05ba, 0x05ae, 0x05a0, 0x0590, 0x057c, 0x0566,
+ 0x054e, 0x0533, 0x0516, 0x04f8, 0x04d6, 0x04b3, 0x048f, 0x0468,
+ 0x0440, 0x0410, 0x03df, 0x03ad, 0x037c, 0x0349, 0x0317, 0x02e4,
+ 0x02b2, 0x0280, 0x024e, 0x021d, 0x01ec, 0x01bd, 0x018e, 0x0160,
+ 0x0133, 0x0106, 0x00da, 0x00b1, 0x008a, 0x0064, 0x0040, 0x001f,
+ 0x0000, 0x3fe3, 0x3fc8, 0x3faf, 0x3f98, 0x3f84, 0x3f71, 0x3f60
+ },
+ [MALIDP_DOWNSCALING_4_COEFFS - 1] = {
+ 0x0094, 0x00a9, 0x00be, 0x00d4, 0x00ea, 0x0101, 0x0118, 0x012f,
+ 0x0148, 0x0160, 0x017a, 0x0193, 0x01ae, 0x01c8, 0x01e4, 0x01ff,
+ 0x021c, 0x0233, 0x024a, 0x0261, 0x0278, 0x028f, 0x02a6, 0x02bd,
+ 0x02d4, 0x02eb, 0x0302, 0x0319, 0x032f, 0x0346, 0x035d, 0x0374,
+ 0x038a, 0x0397, 0x03a3, 0x03af, 0x03bb, 0x03c6, 0x03d1, 0x03db,
+ 0x03e4, 0x03ed, 0x03f6, 0x03fe, 0x0406, 0x040d, 0x0414, 0x041a,
+ 0x0420, 0x041a, 0x0414, 0x040d, 0x0406, 0x03fe, 0x03f6, 0x03ed,
+ 0x03e4, 0x03db, 0x03d1, 0x03c6, 0x03bb, 0x03af, 0x03a3, 0x0397,
+ 0x038a, 0x0374, 0x035d, 0x0346, 0x032f, 0x0319, 0x0302, 0x02eb,
+ 0x02d4, 0x02bd, 0x02a6, 0x028f, 0x0278, 0x0261, 0x024a, 0x0233,
+ 0x021c, 0x01ff, 0x01e4, 0x01c8, 0x01ae, 0x0193, 0x017a, 0x0160,
+ 0x0148, 0x012f, 0x0118, 0x0101, 0x00ea, 0x00d4, 0x00be, 0x00a9
+ },
+};
+
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
static int malidp500_query_hw(struct malidp_hw_device *hwdev)
@@ -211,6 +286,88 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
return w * drm_format_plane_cpp(fmt, 0) * 8;
}
+static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
+ u32 direction,
+ u16 addr,
+ u8 coeffs_id)
+{
+ int i;
+ u16 scaling_control = MALIDP500_SE_CONTROL + MALIDP_SE_SCALING_CONTROL;
+
+ malidp_hw_write(hwdev,
+ direction | (addr & MALIDP_SE_COEFFTAB_ADDR_MASK),
+ scaling_control + MALIDP_SE_COEFFTAB_ADDR);
+ for (i = 0; i < ARRAY_SIZE(dp500_se_scaling_coeffs); ++i)
+ malidp_hw_write(hwdev, MALIDP_SE_SET_COEFFTAB_DATA(
+ dp500_se_scaling_coeffs[coeffs_id][i]),
+ scaling_control + MALIDP_SE_COEFFTAB_DATA);
+}
+
+static int malidp500_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct malidp_se_config *old_config)
+{
+ /* Get array indices into dp500_se_scaling_coeffs. */
+ u8 h = (u8)se_config->hcoeff - 1;
+ u8 v = (u8)se_config->vcoeff - 1;
+
+ if (WARN_ON(h >= ARRAY_SIZE(dp500_se_scaling_coeffs) ||
+ v >= ARRAY_SIZE(dp500_se_scaling_coeffs)))
+ return -EINVAL;
+
+ if ((h == v) && (se_config->hcoeff != old_config->hcoeff ||
+ se_config->vcoeff != old_config->vcoeff)) {
+ malidp500_se_write_pp_coefftab(hwdev,
+ (MALIDP_SE_V_COEFFTAB |
+ MALIDP_SE_H_COEFFTAB),
+ 0, v);
+ } else {
+ if (se_config->vcoeff != old_config->vcoeff)
+ malidp500_se_write_pp_coefftab(hwdev,
+ MALIDP_SE_V_COEFFTAB,
+ 0, v);
+ if (se_config->hcoeff != old_config->hcoeff)
+ malidp500_se_write_pp_coefftab(hwdev,
+ MALIDP_SE_H_COEFFTAB,
+ 0, h);
+ }
+
+ return 0;
+}
+
+static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct videomode *vm)
+{
+ unsigned long mclk;
+ unsigned long pxlclk = vm->pixelclock; /* Hz */
+ unsigned long htotal = vm->hactive + vm->hfront_porch +
+ vm->hback_porch + vm->hsync_len;
+ unsigned long input_size = se_config->input_w * se_config->input_h;
+ unsigned long a = 10;
+ long ret;
+
+ /*
+ * mclk = max(a, 1.5) * pxlclk
+ *
+ * To avoid float calculaiton, using 15 instead of 1.5 and div by
+ * 10 to get mclk.
+ */
+ if (se_config->scale_enable) {
+ a = 15 * input_size / (htotal * se_config->output_h);
+ if (a < 15)
+ a = 15;
+ }
+ mclk = a * pxlclk / 10;
+ ret = clk_get_rate(hwdev->mclk);
+ if (ret < mclk) {
+ DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
+ mclk / 1000);
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int malidp550_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -384,6 +541,53 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
return w * bytes_per_col;
}
+static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct malidp_se_config *old_config)
+{
+ u32 mask = MALIDP550_SE_CTL_VCSEL(MALIDP550_SE_CTL_SEL_MASK) |
+ MALIDP550_SE_CTL_HCSEL(MALIDP550_SE_CTL_SEL_MASK);
+ u32 new_value = MALIDP550_SE_CTL_VCSEL(se_config->vcoeff) |
+ MALIDP550_SE_CTL_HCSEL(se_config->hcoeff);
+
+ malidp_hw_clearbits(hwdev, mask, MALIDP550_SE_CONTROL);
+ malidp_hw_setbits(hwdev, new_value, MALIDP550_SE_CONTROL);
+ return 0;
+}
+
+static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct videomode *vm)
+{
+ unsigned long mclk;
+ unsigned long pxlclk = vm->pixelclock;
+ unsigned long htotal = vm->hactive + vm->hfront_porch +
+ vm->hback_porch + vm->hsync_len;
+ unsigned long numerator = 1, denominator = 1;
+ long ret;
+
+ if (se_config->scale_enable) {
+ numerator = max(se_config->input_w, se_config->output_w) *
+ se_config->input_h;
+ numerator += se_config->output_w *
+ (se_config->output_h -
+ min(se_config->input_h, se_config->output_h));
+ denominator = (htotal - 2) * se_config->output_h;
+ }
+
+ /* mclk can't be slower than pxlclk. */
+ if (numerator < denominator)
+ numerator = denominator = 1;
+ mclk = (pxlclk * numerator) / denominator;
+ ret = clk_get_rate(hwdev->mclk);
+ if (ret < mclk) {
+ DRM_DEBUG_DRIVER("mclk requirement of %lu kHz can't be met.\n",
+ mclk / 1000);
+ return -EINVAL;
+ }
+ return ret;
+}
+
static int malidp650_query_hw(struct malidp_hw_device *hwdev)
{
u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
@@ -415,6 +619,7 @@ static int malidp650_query_hw(struct malidp_hw_device *hwdev)
const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
[MALIDP_500] = {
.map = {
+ .coeffs_base = MALIDP500_COEFFS_BASE,
.se_base = MALIDP500_SE_BASE,
.dc_base = MALIDP500_DC_BASE,
.out_depth_base = MALIDP500_OUTPUT_DEPTH,
@@ -447,10 +652,13 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.set_config_valid = malidp500_set_config_valid,
.modeset = malidp500_modeset,
.rotmem_required = malidp500_rotmem_required,
+ .se_set_scaling_coeffs = malidp500_se_set_scaling_coeffs,
+ .se_calc_mclk = malidp500_se_calc_mclk,
.features = MALIDP_DEVICE_LV_HAS_3_STRIDES,
},
[MALIDP_550] = {
.map = {
+ .coeffs_base = MALIDP550_COEFFS_BASE,
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
@@ -481,10 +689,13 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
+ .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
+ .se_calc_mclk = malidp550_se_calc_mclk,
.features = 0,
},
[MALIDP_650] = {
.map = {
+ .coeffs_base = MALIDP550_COEFFS_BASE,
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
@@ -516,6 +727,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
.rotmem_required = malidp550_rotmem_required,
+ .se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
+ .se_calc_mclk = malidp550_se_calc_mclk,
.features = 0,
},
};
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 00974b59407d..849ad9a30c3a 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -61,12 +61,34 @@ struct malidp_layer {
u16 stride_offset; /* Offset to the first stride register. */
};
+enum malidp_scaling_coeff_set {
+ MALIDP_UPSCALING_COEFFS = 1,
+ MALIDP_DOWNSCALING_1_5_COEFFS = 2,
+ MALIDP_DOWNSCALING_2_COEFFS = 3,
+ MALIDP_DOWNSCALING_2_75_COEFFS = 4,
+ MALIDP_DOWNSCALING_4_COEFFS = 5,
+};
+
+struct malidp_se_config {
+ u8 scale_enable : 1;
+ u8 enhancer_enable : 1;
+ u8 hcoeff : 3;
+ u8 vcoeff : 3;
+ u8 plane_src_id;
+ u16 input_w, input_h;
+ u16 output_w, output_h;
+ u32 h_init_phase, h_delta_phase;
+ u32 v_init_phase, v_delta_phase;
+};
+
/* regmap features */
#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
struct malidp_hw_regmap {
/* address offset of the DE register bank */
/* is always 0x0000 */
+ /* address offset of the DE coefficients registers */
+ const u16 coeffs_base;
/* address offset of the SE registers bank */
const u16 se_base;
/* address offset of the DC registers bank */
@@ -151,11 +173,22 @@ struct malidp_hw_device {
*/
int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+ int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct malidp_se_config *old_config);
+
+ long (*se_calc_mclk)(struct malidp_hw_device *hwdev,
+ struct malidp_se_config *se_config,
+ struct videomode *vm);
+
u8 features;
u8 min_line_size;
u16 max_line_size;
+ /* track the device PM state */
+ bool pm_suspended;
+
/* size of memory used for rotating layers, up to two banks available */
u32 rotation_memory[2];
};
@@ -173,12 +206,14 @@ extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
{
+ WARN_ON(hwdev->pm_suspended);
return readl(hwdev->regs + reg);
}
static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
u32 value, u32 reg)
{
+ WARN_ON(hwdev->pm_suspended);
writel(value, hwdev->regs + reg);
}
@@ -243,6 +278,47 @@ static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
return !(pitch & (hwdev->map.bus_align_bytes - 1));
}
+/* U16.16 */
+#define FP_1_00000 0x00010000 /* 1.0 */
+#define FP_0_66667 0x0000AAAA /* 0.6667 = 1/1.5 */
+#define FP_0_50000 0x00008000 /* 0.5 = 1/2 */
+#define FP_0_36363 0x00005D17 /* 0.36363 = 1/2.75 */
+#define FP_0_25000 0x00004000 /* 0.25 = 1/4 */
+
+static inline enum malidp_scaling_coeff_set
+malidp_se_select_coeffs(u32 upscale_factor)
+{
+ return (upscale_factor >= FP_1_00000) ? MALIDP_UPSCALING_COEFFS :
+ (upscale_factor >= FP_0_66667) ? MALIDP_DOWNSCALING_1_5_COEFFS :
+ (upscale_factor >= FP_0_50000) ? MALIDP_DOWNSCALING_2_COEFFS :
+ (upscale_factor >= FP_0_36363) ? MALIDP_DOWNSCALING_2_75_COEFFS :
+ MALIDP_DOWNSCALING_4_COEFFS;
+}
+
+#undef FP_0_25000
+#undef FP_0_36363
+#undef FP_0_50000
+#undef FP_0_66667
+#undef FP_1_00000
+
+static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
+{
+ static const s32 enhancer_coeffs[] = {
+ -8, -8, -8, -8, 128, -8, -8, -8, -8
+ };
+ u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
+ MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
+ u32 image_enh = hwdev->map.se_base +
+ ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
+ u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
+ int i;
+
+ malidp_hw_write(hwdev, val, image_enh);
+ for (i = 0; i < ARRAY_SIZE(enhancer_coeffs); ++i)
+ malidp_hw_write(hwdev, enhancer_coeffs[i], enh_coeffs + i * 4);
+}
+
/*
* background color components are defined as 12bits values,
* they will be shifted right when stored on hardware that
@@ -252,4 +328,9 @@ static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
#define MALIDP_BGND_COLOR_G 0x000
#define MALIDP_BGND_COLOR_B 0x000
+#define MALIDP_COLORADJ_NUM_COEFFS 12
+#define MALIDP_COEFFTAB_NUM_COEFFS 64
+
+#define MALIDP_GAMMA_LUT_SIZE 4096
+
#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index d5aec082294c..814fda23cead 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -16,6 +16,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "malidp_hw.h"
#include "malidp_drv.h"
@@ -24,6 +25,9 @@
#define MALIDP_LAYER_FORMAT 0x000
#define MALIDP_LAYER_CONTROL 0x004
#define LAYER_ENABLE (1 << 0)
+#define LAYER_FLOWCFG_MASK 7
+#define LAYER_FLOWCFG(x) (((x) & LAYER_FLOWCFG_MASK) << 1)
+#define LAYER_FLOWCFG_SCALE_SE 3
#define LAYER_ROT_OFFSET 8
#define LAYER_H_FLIP (1 << 10)
#define LAYER_V_FLIP (1 << 11)
@@ -60,6 +64,27 @@ static void malidp_de_plane_destroy(struct drm_plane *plane)
devm_kfree(plane->dev->dev, mp);
}
+/*
+ * Replicate what the default ->reset hook does: free the state pointer and
+ * allocate a new empty object. We just need enough space to store
+ * a malidp_plane_state instead of a drm_plane_state.
+ */
+static void malidp_plane_reset(struct drm_plane *plane)
+{
+ struct malidp_plane_state *state = to_malidp_plane_state(plane->state);
+
+ if (state)
+ __drm_atomic_helper_plane_destroy_state(&state->base);
+ kfree(state);
+ plane->state = NULL;
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.plane = plane;
+ state->base.rotation = DRM_ROTATE_0;
+ plane->state = &state->base;
+ }
+}
+
static struct
drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
{
@@ -90,26 +115,71 @@ static void malidp_destroy_plane_state(struct drm_plane *plane,
kfree(m_state);
}
+static void malidp_plane_atomic_print_state(struct drm_printer *p,
+ const struct drm_plane_state *state)
+{
+ struct malidp_plane_state *ms = to_malidp_plane_state(state);
+
+ drm_printf(p, "\trotmem_size=%u\n", ms->rotmem_size);
+ drm_printf(p, "\tformat_id=%u\n", ms->format);
+ drm_printf(p, "\tn_planes=%u\n", ms->n_planes);
+}
+
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.set_property = drm_atomic_helper_plane_set_property,
.destroy = malidp_de_plane_destroy,
- .reset = drm_atomic_helper_plane_reset,
+ .reset = malidp_plane_reset,
.atomic_duplicate_state = malidp_duplicate_plane_state,
.atomic_destroy_state = malidp_destroy_plane_state,
+ .atomic_print_state = malidp_plane_atomic_print_state,
};
+static int malidp_se_check_scaling(struct malidp_plane *mp,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state =
+ drm_atomic_get_existing_crtc_state(state->state, state->crtc);
+ struct malidp_crtc_state *mc;
+ struct drm_rect clip = { 0 };
+ u32 src_w, src_h;
+ int ret;
+
+ if (!crtc_state)
+ return -EINVAL;
+
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ ret = drm_plane_helper_check_state(state, &clip, 0, INT_MAX, true, true);
+ if (ret)
+ return ret;
+
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ if ((state->crtc_w == src_w) && (state->crtc_h == src_h)) {
+ /* Scaling not necessary for this plane. */
+ mc->scaled_planes_mask &= ~(mp->layer->id);
+ return 0;
+ }
+
+ if (mp->layer->id & (DE_SMART | DE_GRAPHICS2))
+ return -EINVAL;
+
+ mc = to_malidp_crtc_state(crtc_state);
+
+ mc->scaled_planes_mask |= mp->layer->id;
+ /* Defer scaling requirements calculation to the crtc check. */
+ return 0;
+}
+
static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
- struct drm_crtc_state *crtc_state;
struct drm_framebuffer *fb;
- struct drm_rect clip = { 0 };
int i, ret;
- u32 src_w, src_h;
if (!state->crtc || !state->fb)
return 0;
@@ -130,9 +200,6 @@ static int malidp_de_plane_check(struct drm_plane *plane,
}
}
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
-
if ((state->crtc_w > mp->hwdev->max_line_size) ||
(state->crtc_h > mp->hwdev->max_line_size) ||
(state->crtc_w < mp->hwdev->min_line_size) ||
@@ -149,22 +216,16 @@ static int malidp_de_plane_check(struct drm_plane *plane,
(state->fb->pitches[1] != state->fb->pitches[2]))
return -EINVAL;
+ ret = malidp_se_check_scaling(mp, state);
+ if (ret)
+ return ret;
+
/* packed RGB888 / BGR888 can't be rotated or flipped */
if (state->rotation != DRM_ROTATE_0 &&
(fb->format->format == DRM_FORMAT_RGB888 ||
fb->format->format == DRM_FORMAT_BGR888))
return -EINVAL;
- crtc_state = drm_atomic_get_existing_crtc_state(state->state, state->crtc);
- clip.x2 = crtc_state->adjusted_mode.hdisplay;
- clip.y2 = crtc_state->adjusted_mode.vdisplay;
- ret = drm_plane_helper_check_state(state, &clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- true, true);
- if (ret)
- return ret;
-
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
@@ -269,6 +330,16 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val &= ~LAYER_COMP_MASK;
val |= LAYER_COMP_PIXEL;
+ val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
+ if (plane->state->crtc) {
+ struct malidp_crtc_state *m =
+ to_malidp_crtc_state(plane->state->crtc->state);
+
+ if (m->scaler_config.scale_enable &&
+ m->scaler_config.plane_src_id == mp->layer->id)
+ val |= LAYER_FLOWCFG(LAYER_FLOWCFG_SCALE_SE);
+ }
+
/* set the 'enable layer' bit */
val |= LAYER_ENABLE;
@@ -281,7 +352,8 @@ static void malidp_de_plane_disable(struct drm_plane *plane,
{
struct malidp_plane *mp = to_malidp_plane(plane);
- malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
+ malidp_hw_clearbits(mp->hwdev,
+ LAYER_ENABLE | LAYER_FLOWCFG(LAYER_FLOWCFG_MASK),
mp->layer->base + MALIDP_LAYER_CONTROL);
}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index b816067a65c5..2039f857f77d 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -63,6 +63,8 @@
/* bit masks that are common between products */
#define MALIDP_CFG_VALID (1 << 0)
+#define MALIDP_DISP_FUNC_GAMMA (1 << 0)
+#define MALIDP_DISP_FUNC_CADJ (1 << 4)
#define MALIDP_DISP_FUNC_ILACED (1 << 8)
/* register offsets for IRQ management */
@@ -99,6 +101,58 @@
#define MALIDP_PRODUCT_ID(__core_id) ((u32)(__core_id) >> 16)
+/* register offsets relative to MALIDP5x0_COEFFS_BASE */
+#define MALIDP_COLOR_ADJ_COEF 0x00000
+#define MALIDP_COEF_TABLE_ADDR 0x00030
+#define MALIDP_COEF_TABLE_DATA 0x00034
+
+/* Scaling engine registers and masks. */
+#define MALIDP_SE_SCALING_EN (1 << 0)
+#define MALIDP_SE_ALPHA_EN (1 << 1)
+#define MALIDP_SE_ENH_MASK 3
+#define MALIDP_SE_ENH(x) (((x) & MALIDP_SE_ENH_MASK) << 2)
+#define MALIDP_SE_RGBO_IF_EN (1 << 4)
+#define MALIDP550_SE_CTL_SEL_MASK 7
+#define MALIDP550_SE_CTL_VCSEL(x) \
+ (((x) & MALIDP550_SE_CTL_SEL_MASK) << 20)
+#define MALIDP550_SE_CTL_HCSEL(x) \
+ (((x) & MALIDP550_SE_CTL_SEL_MASK) << 16)
+
+/* Blocks with offsets from SE_CONTROL register. */
+#define MALIDP_SE_LAYER_CONTROL 0x14
+#define MALIDP_SE_L0_IN_SIZE 0x00
+#define MALIDP_SE_L0_OUT_SIZE 0x04
+#define MALIDP_SE_SET_V_SIZE(x) (((x) & 0x1fff) << 16)
+#define MALIDP_SE_SET_H_SIZE(x) (((x) & 0x1fff) << 0)
+#define MALIDP_SE_SCALING_CONTROL 0x24
+#define MALIDP_SE_H_INIT_PH 0x00
+#define MALIDP_SE_H_DELTA_PH 0x04
+#define MALIDP_SE_V_INIT_PH 0x08
+#define MALIDP_SE_V_DELTA_PH 0x0c
+#define MALIDP_SE_COEFFTAB_ADDR 0x10
+#define MALIDP_SE_COEFFTAB_ADDR_MASK 0x7f
+#define MALIDP_SE_V_COEFFTAB (1 << 8)
+#define MALIDP_SE_H_COEFFTAB (1 << 9)
+#define MALIDP_SE_SET_V_COEFFTAB_ADDR(x) \
+ (MALIDP_SE_V_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK))
+#define MALIDP_SE_SET_H_COEFFTAB_ADDR(x) \
+ (MALIDP_SE_H_COEFFTAB | ((x) & MALIDP_SE_COEFFTAB_ADDR_MASK))
+#define MALIDP_SE_COEFFTAB_DATA 0x14
+#define MALIDP_SE_COEFFTAB_DATA_MASK 0x3fff
+#define MALIDP_SE_SET_COEFFTAB_DATA(x) \
+ ((x) & MALIDP_SE_COEFFTAB_DATA_MASK)
+/* Enhance coeffents reigster offset */
+#define MALIDP_SE_IMAGE_ENH 0x3C
+/* ENH_LIMITS offset 0x0 */
+#define MALIDP_SE_ENH_LOW_LEVEL 24
+#define MALIDP_SE_ENH_HIGH_LEVEL 63
+#define MALIDP_SE_ENH_LIMIT_MASK 0xfff
+#define MALIDP_SE_SET_ENH_LIMIT_LOW(x) \
+ ((x) & MALIDP_SE_ENH_LIMIT_MASK)
+#define MALIDP_SE_SET_ENH_LIMIT_HIGH(x) \
+ (((x) & MALIDP_SE_ENH_LIMIT_MASK) << 16)
+#define MALIDP_SE_ENH_COEFF0 0x04
+
/* register offsets and bits specific to DP500 */
#define MALIDP500_ADDR_SPACE_SIZE 0x01000
#define MALIDP500_DC_BASE 0x00000
@@ -120,6 +174,18 @@
#define MALIDP500_COLOR_ADJ_COEF 0x00078
#define MALIDP500_COEF_TABLE_ADDR 0x000a8
#define MALIDP500_COEF_TABLE_DATA 0x000ac
+
+/*
+ * The YUV2RGB coefficients on the DP500 are not in the video layer's register
+ * block. They belong in a separate block above the layer's registers, hence
+ * the negative offset.
+ */
+#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
+/*
+ * To match DP550/650, the start of the coeffs registers is
+ * at COLORADJ_COEFF0 instead of at YUV_RGB_COEF1.
+ */
+#define MALIDP500_COEFFS_BASE 0x00078
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
#define MALIDP500_DE_LG1_BASE 0x00200
@@ -127,6 +193,7 @@
#define MALIDP500_DE_LG2_BASE 0x00300
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
#define MALIDP500_SE_BASE 0x00c00
+#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_PTR_BASE 0x00e0c
#define MALIDP500_DC_IRQ_BASE 0x00f00
#define MALIDP500_CONFIG_VALID 0x00f00
@@ -145,9 +212,7 @@
#define MALIDP550_DE_DISP_SIDEBAND 0x00040
#define MALIDP550_DE_BGND_COLOR 0x00044
#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
-#define MALIDP550_DE_COLOR_COEF 0x00050
-#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
-#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
+#define MALIDP550_COEFFS_BASE 0x00050
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
#define MALIDP550_DE_LV2_BASE 0x00200
@@ -158,6 +223,7 @@
#define MALIDP550_DE_LS_PTR_BASE 0x0042c
#define MALIDP550_DE_PERF_BASE 0x00500
#define MALIDP550_SE_BASE 0x08000
+#define MALIDP550_SE_CONTROL 0x08010
#define MALIDP550_DC_BASE 0x0c000
#define MALIDP550_DC_CONTROL 0x0c010
#define MALIDP550_DC_CONFIG_REQ (1 << 16)
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 1597458d884e..d6c2a5d190eb 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -529,10 +529,10 @@ static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
.map_dma_buf = armada_gem_prime_map_dma_buf,
.unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .kmap_atomic = armada_gem_dmabuf_no_kmap,
- .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
- .kmap = armada_gem_dmabuf_no_kmap,
- .kunmap = armada_gem_dmabuf_no_kunmap,
+ .map_atomic = armada_gem_dmabuf_no_kmap,
+ .unmap_atomic = armada_gem_dmabuf_no_kunmap,
+ .map = armada_gem_dmabuf_no_kmap,
+ .unmap = armada_gem_dmabuf_no_kunmap,
.mmap = armada_gem_dmabuf_mmap,
};
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 50c910efa13d..e879496b8a42 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver ast_bo_driver = {
.verify_access = ast_bo_verify_access,
.io_mem_reserve = &ast_ttm_io_mem_reserve,
.io_mem_free = &ast_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int ast_mm_init(struct ast_private *ast)
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index f987b4572d4a..65a3bd7a0c00 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -221,7 +221,8 @@ err_encoder_cleanup:
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
struct device_node *remote;
- int ret, endpoint = 0;
+ int ret = -ENODEV;
+ int endpoint = 0;
while (true) {
/* Loop thru possible multiple connections to the output */
@@ -236,7 +237,5 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
return ret;
}
- if (!endpoint)
- return -ENODEV;
return ret;
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 857755ac2d70..c4cadb638460 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -205,6 +205,7 @@ struct ttm_bo_driver bochs_bo_driver = {
.verify_access = bochs_bo_verify_access,
.io_mem_reserve = &bochs_ttm_io_mem_reserve,
.io_mem_free = &bochs_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int bochs_mm_init(struct bochs_device *bochs)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 80a6d09cf73d..8737de8c1c52 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1973,6 +1973,20 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
return 0;
}
+static bool dw_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *orig_mode,
+ struct drm_display_mode *mode)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+ struct drm_connector *connector = &hdmi->connector;
+ enum drm_mode_status status;
+
+ status = dw_hdmi_connector_mode_valid(connector, mode);
+ if (status != MODE_OK)
+ return false;
+ return true;
+}
+
static void dw_hdmi_bridge_mode_set(struct drm_bridge *bridge,
struct drm_display_mode *orig_mode,
struct drm_display_mode *mode)
@@ -2014,6 +2028,7 @@ static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
+ .mode_fixup = dw_hdmi_bridge_mode_fixup,
};
static irqreturn_t dw_hdmi_i2c_irq(struct dw_hdmi *hdmi)
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index f53aa8f4a143..93dbcd38355d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
.verify_access = cirrus_bo_verify_access,
.io_mem_reserve = &cirrus_ttm_io_mem_reserve,
.io_mem_free = &cirrus_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int cirrus_mm_init(struct cirrus_device *cirrus)
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9fb65b736a90..954eb848b5e2 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -403,10 +403,10 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
.map_dma_buf = drm_gem_map_dma_buf,
.unmap_dma_buf = drm_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .kmap = drm_gem_dmabuf_kmap,
- .kmap_atomic = drm_gem_dmabuf_kmap_atomic,
- .kunmap = drm_gem_dmabuf_kunmap,
- .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+ .map = drm_gem_dmabuf_kmap,
+ .map_atomic = drm_gem_dmabuf_kmap_atomic,
+ .unmap = drm_gem_dmabuf_kunmap,
+ .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
.vunmap = drm_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 3feef0659940..3e88fa24eab3 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -476,7 +476,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
list_for_each_entry(prop_enum, &property->enum_list, head) {
enum_count++;
- if (out_resp->count_enum_blobs <= enum_count)
+ if (out_resp->count_enum_blobs < enum_count)
continue;
if (copy_to_user(&enum_ptr[copied].value,
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index cc1731c5289c..71cee4e9fefb 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -5,6 +5,7 @@ config DRM_ETNAVIV
depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
depends on MMU
select SHMEM
+ select SYNC_FILE
select TMPFS
select IOMMU_API
select IOMMU_SUPPORT
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 587e45043542..5255278dde56 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file)
+static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx = file->driver_priv;
@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = {
DRIVER_PRIME |
DRIVER_RENDER,
.open = etnaviv_open,
- .preclose = etnaviv_preclose,
+ .postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM",
.date = "20151214",
.major = 1,
- .minor = 0,
+ .minor = 1,
};
/*
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index e63ff116a3b3..c4a091e87426 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -20,6 +20,7 @@
#include <linux/reservation.h>
#include "etnaviv_drv.h"
+struct dma_fence;
struct etnaviv_gem_ops;
struct etnaviv_gem_object;
@@ -104,9 +105,10 @@ struct etnaviv_gem_submit {
struct drm_device *dev;
struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket;
- u32 fence;
+ struct dma_fence *fence;
unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0];
+ u32 flags;
};
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 726090d7a6ac..e1909429837e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -14,7 +14,9 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/dma-fence-array.h>
#include <linux/reservation.h>
+#include <linux/sync_file.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h"
#include "etnaviv_gpu.h"
@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
+ bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
- ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write);
+ ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
+ explicit);
if (ret)
break;
}
@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
}
ww_acquire_fini(&submit->ticket);
+ dma_fence_put(submit->fence);
kfree(submit);
}
@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu;
+ struct dma_fence *in_fence = NULL;
+ struct sync_file *sync_file = NULL;
+ int out_fence_fd = -1;
void *stream;
int ret;
@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
+ if (args->flags & ~ETNA_SUBMIT_FLAGS) {
+ DRM_ERROR("invalid flags: 0x%x\n", args->flags);
+ return -EINVAL;
+ }
+
/*
* Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks.
@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_cmds;
}
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto err_submit_cmds;
+ }
+ }
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto err_submit_cmds;
}
+ submit->flags = args->flags;
+
ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret)
goto err_submit_objects;
@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
}
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
+ in_fence = sync_file_get_fence(args->fence_fd);
+ if (!in_fence) {
+ ret = -EINVAL;
+ goto err_submit_objects;
+ }
+
+ /*
+ * Wait if the fence is from a foreign context, or if the fence
+ * array contains any fence from a foreign context.
+ */
+ if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
+ ret = dma_fence_wait(in_fence, true);
+ if (ret)
+ goto err_submit_objects;
+ }
+ }
+
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_objects;
@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0)
cmdbuf = NULL;
- args->fence = submit->fence;
+ if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
+ /*
+ * This can be improved: ideally we want to allocate the sync
+ * file before kicking off the GPU job and just attach the
+ * fence to the sync file here, eliminating the ENOMEM
+ * possibility at this stage.
+ */
+ sync_file = sync_file_create(submit->fence);
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ fd_install(out_fence_fd, sync_file->file);
+ }
+
+ args->fence_fd = out_fence_fd;
+ args->fence = submit->fence->seqno;
out:
submit_unpin_objects(submit);
@@ -419,9 +476,13 @@ out:
flush_workqueue(priv->wq);
err_submit_objects:
+ if (in_fence)
+ dma_fence_put(in_fence);
submit_cleanup(submit);
err_submit_cmds:
+ if (ret && (out_fence_fd >= 0))
+ put_unused_fd(out_fence_fd);
/* if we still own the cmdbuf */
if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index da48819ff2e6..9a9c40717801 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -18,6 +18,7 @@
#include <linux/dma-fence.h>
#include <linux/moduleparam.h>
#include <linux/of_device.h>
+#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h"
@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
}
+static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
+{
+ unsigned int fscale = 1 << (6 - gpu->freq_scale);
+ u32 clock;
+
+ clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
+ VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
+
+ etnaviv_gpu_load_clock(gpu, clock);
+}
+
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
{
u32 control, idle;
@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) {
- control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
/* enable clock */
- etnaviv_gpu_load_clock(gpu, control);
+ etnaviv_gpu_update_clock(gpu);
+
+ control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
/* Wait for stable clock. Vivante's code waited for 1ms */
usleep_range(1000, 10000);
@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
}
/* We rely on the GPU running, so program the clock */
- control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
- /* enable clock */
- etnaviv_gpu_load_clock(gpu, control);
+ etnaviv_gpu_update_clock(gpu);
return 0;
}
@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{
struct etnaviv_fence *f;
+ /*
+ * GPU lock must already be held, otherwise fence completion order might
+ * not match the seqno order assigned here.
+ */
+ lockdep_assert_held(&gpu->lock);
+
f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f)
return NULL;
@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
}
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive)
+ unsigned int context, bool exclusive, bool explicit)
{
struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj;
@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
return ret;
}
+ if (explicit)
+ return 0;
+
/*
* If we have any shared fences, then the exclusive fence
* should be ignored as it will already have been signalled.
@@ -1317,12 +1333,12 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
if (!fence) {
event_free(gpu, event);
ret = -ENOMEM;
- goto out_pm_put;
+ goto out_unlock;
}
gpu->event[event].fence = fence;
- submit->fence = fence->seqno;
- gpu->active_fence = submit->fence;
+ submit->fence = dma_fence_get(fence);
+ gpu->active_fence = submit->fence->seqno;
if (gpu->lastctx != cmdbuf->ctx) {
gpu->mmu->need_flush = true;
@@ -1357,6 +1373,7 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
hangcheck_timer_reset(gpu);
ret = 0;
+out_unlock:
mutex_unlock(&gpu->lock);
out_pm_put:
@@ -1526,17 +1543,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
#ifdef CONFIG_PM
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{
- u32 clock;
int ret;
ret = mutex_lock_killable(&gpu->lock);
if (ret)
return ret;
- clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
- VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
-
- etnaviv_gpu_load_clock(gpu, clock);
+ etnaviv_gpu_update_clock(gpu);
etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true;
@@ -1548,6 +1561,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
}
#endif
+static int
+etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ *state = 6;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long *state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ *state = gpu->freq_scale;
+
+ return 0;
+}
+
+static int
+etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
+ unsigned long state)
+{
+ struct etnaviv_gpu *gpu = cdev->devdata;
+
+ mutex_lock(&gpu->lock);
+ gpu->freq_scale = state;
+ if (!pm_runtime_suspended(gpu->dev))
+ etnaviv_gpu_update_clock(gpu);
+ mutex_unlock(&gpu->lock);
+
+ return 0;
+}
+
+static struct thermal_cooling_device_ops cooling_ops = {
+ .get_max_state = etnaviv_gpu_cooling_get_max_state,
+ .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
+ .set_cur_state = etnaviv_gpu_cooling_set_cur_state,
+};
+
static int etnaviv_gpu_bind(struct device *dev, struct device *master,
void *data)
{
@@ -1556,13 +1610,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret;
+ gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
+ (char *)dev_name(dev), gpu, &cooling_ops);
+ if (IS_ERR(gpu->cooling))
+ return PTR_ERR(gpu->cooling);
+
#ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev);
#else
ret = etnaviv_gpu_clk_enable(gpu);
#endif
- if (ret < 0)
+ if (ret < 0) {
+ thermal_cooling_device_unregister(gpu->cooling);
return ret;
+ }
gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1);
@@ -1616,6 +1677,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
}
gpu->drm = NULL;
+
+ thermal_cooling_device_unregister(gpu->cooling);
+ gpu->cooling = NULL;
}
static const struct component_ops gpu_ops = {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index 1c0606ea7d5e..9227a9740447 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
struct etnaviv_gpu {
struct drm_device *drm;
+ struct thermal_cooling_device *cooling;
struct device *dev;
struct mutex lock;
struct etnaviv_chip_identity identity;
@@ -150,6 +151,7 @@ struct etnaviv_gpu {
u32 hangcheck_fence;
u32 hangcheck_dma_addr;
struct work_struct recover_work;
+ unsigned int freq_scale;
};
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
- unsigned int context, bool exclusive);
+ unsigned int context, bool exclusive, bool implicit);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index b7d7721e72fa..40af17ec6312 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -285,9 +285,6 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{
int ret;
- if (vgpu->failsafe)
- return 0;
-
if (WARN_ON(bytes > 4))
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 6f972afbdbc3..41b2c3aaa04a 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -616,9 +616,6 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
{
struct decode_info *d_info;
- if (ring_id >= I915_NUM_ENGINES)
- return INVALID_OP;
-
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
@@ -661,9 +658,6 @@ static inline void print_opcode(u32 cmd, int ring_id)
struct decode_info *d_info;
int i;
- if (ring_id >= I915_NUM_ENGINES)
- return;
-
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -1215,7 +1209,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip)
return 0;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1243,7 +1237,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12);
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
@@ -1267,7 +1261,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1278,7 +1272,9 @@ static int check_mi_display_flip(struct parser_exec_state *s,
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_check_mi_display_flip(s, info);
return -ENODEV;
}
@@ -1289,7 +1285,9 @@ static int update_plane_mmio_from_mi_display_flip(
{
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
- if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv)
+ || IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv))
return gen8_update_plane_mmio_from_mi_display_flip(s, info);
return -ENODEV;
}
@@ -1569,7 +1567,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
return 0;
@@ -2478,7 +2477,7 @@ static int cmd_parser_exec(struct parser_exec_state *s)
t1 = get_cycles();
- memcpy(&s_before_advance_custom, s, sizeof(struct parser_exec_state));
+ s_before_advance_custom = *s;
if (info->handler) {
ret = info->handler(s);
@@ -2604,6 +2603,9 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
struct parser_exec_state s;
int ret = 0;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
/* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
@@ -2618,14 +2620,14 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
- s.vgpu = wa_ctx->workload->vgpu;
- s.ring_id = wa_ctx->workload->ring_id;
+ s.vgpu = workload->vgpu;
+ s.ring_id = workload->ring_id;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
s.ring_tail = gma_tail;
s.rb_va = wa_ctx->indirect_ctx.shadow_va;
- s.workload = wa_ctx->workload;
+ s.workload = workload;
ret = ip_gma_set(&s, gma_head);
if (ret)
@@ -2708,12 +2710,15 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ctx_size = wa_ctx->indirect_ctx.size;
unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
struct drm_i915_gem_object *obj;
int ret = 0;
void *map;
- obj = i915_gem_object_create(wa_ctx->workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -2733,8 +2738,8 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
goto unmap_src;
}
- ret = copy_gma_to_hva(wa_ctx->workload->vgpu,
- wa_ctx->workload->vgpu->gtt.ggtt_mm,
+ ret = copy_gma_to_hva(workload->vgpu,
+ workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
if (ret < 0) {
@@ -2772,7 +2777,10 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ret;
- struct intel_vgpu *vgpu = wa_ctx->workload->vgpu;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ struct intel_vgpu *vgpu = workload->vgpu;
if (wa_ctx->indirect_ctx.size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 5419ae6ec633..e0261fcc5b50 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -161,8 +161,9 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
#define DPCD_HEADER_SIZE 0xb
+/* let the virtual display supports DP1.2 */
static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
- 0x11, 0x0a, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
@@ -172,26 +173,64 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
+ vgpu_vreg(vgpu, SKL_FUSE_STATUS) |=
+ SKL_FUSE_DOWNLOAD_STATUS |
+ SKL_FUSE_PG0_DIST_STATUS |
+ SKL_FUSE_PG1_DIST_STATUS |
+ SKL_FUSE_PG2_DIST_STATUS;
+ vgpu_vreg(vgpu, LCPLL1_CTL) |=
+ LCPLL_PLL_ENABLE |
+ LCPLL_PLL_LOCK;
+ vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
+
+ }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
- vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_B << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
+ vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_C << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
+ ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
+ TRANS_DDI_PORT_MASK);
+ vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (PORT_D << TRANS_DDI_PORT_SHIFT) |
+ TRANS_DDI_FUNC_ENABLE);
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
}
- if (IS_SKYLAKE(dev_priv) &&
+ if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
}
@@ -353,7 +392,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D);
else
clean_virtual_dp_monitor(vgpu, PORT_B);
@@ -375,7 +414,7 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu);
- if (IS_SKYLAKE(dev_priv))
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution);
else
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f1f426a97aa9..dca989eb2d42 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -56,8 +56,8 @@ static int context_switch_events[] = {
static int ring_id_to_context_switch_event(int ring_id)
{
- if (WARN_ON(ring_id < RCS && ring_id >
- ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id < RCS ||
+ ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@@ -394,9 +394,11 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- int ring_id = wa_ctx->workload->ring_id;
- struct i915_gem_context *shadow_ctx =
- wa_ctx->workload->vgpu->shadow_ctx;
+ struct intel_vgpu_workload *workload = container_of(wa_ctx,
+ struct intel_vgpu_workload,
+ wa_ctx);
+ int ring_id = workload->ring_id;
+ struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -680,15 +682,12 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
- workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}
if (emulate_schedule_in)
- memcpy(&workload->elsp_dwords,
- &vgpu->execlist[ring_id].elsp_dwords,
- sizeof(workload->elsp_dwords));
+ workload->elsp_dwords = vgpu->execlist[ring_id].elsp_dwords;
gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
workload, ring_id, head, tail, start, ctl);
@@ -775,7 +774,8 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
_EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
- ctx_status_ptr.read_ptr = ctx_status_ptr.write_ptr = 0x7;
+ ctx_status_ptr.read_ptr = 0;
+ ctx_status_ptr.write_ptr = 0x7;
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 933a7c211a1c..dce8d15f706f 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -75,11 +75,11 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct gvt_firmware_header *h;
void *firmware;
void *p;
- unsigned long size;
+ unsigned long size, crc32_start;
int i;
int ret;
- size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
+ size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
@@ -112,6 +112,9 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
memcpy(gvt->firmware.mmio, p, info->mmio_size);
+ crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
+ h->crc32 = crc32_le(0, firmware + crc32_start, size - crc32_start);
+
firmware_attr.size = size;
firmware_attr.private = firmware;
@@ -234,7 +237,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->mmio = mem;
- sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%04x.golden_hw_state",
+ sprintf(path, "%s/vid_0x%04x_did_0x%04x_rid_0x%02x.golden_hw_state",
GVT_FIRMWARE_PATH, pdev->vendor, pdev->device,
pdev->revision);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index b832bea64e03..c6f0077f590d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2224,7 +2224,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
gvt_dbg_core("init gtt\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
gvt->gtt.mm_alloc_page_table = gen8_mm_alloc_page_table;
@@ -2293,12 +2294,15 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
u32 index;
u32 offset;
u32 num_entries;
struct intel_gvt_gtt_entry e;
+ intel_runtime_pm_get(dev_priv);
+
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
@@ -2313,6 +2317,8 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
+
+ intel_runtime_pm_put(dev_priv);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 3b9d59e457ba..7dea5e5d5567 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -52,6 +52,8 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
+ .vgpu_activate = intel_gvt_activate_vgpu,
+ .vgpu_deactivate = intel_gvt_deactivate_vgpu,
};
/**
@@ -106,7 +108,8 @@ static void init_device_info(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
info->max_support_vgpus = 8;
info->cfg_space_size = 256;
info->mmio_size = 2 * 1024 * 1024;
@@ -143,6 +146,11 @@ static int gvt_service_thread(void *data)
intel_gvt_emulate_vblank(gvt);
mutex_unlock(&gvt->lock);
}
+
+ if (test_and_clear_bit(INTEL_GVT_REQUEST_SCHED,
+ (void *)&gvt->service_request)) {
+ intel_gvt_schedule(gvt);
+ }
}
return 0;
@@ -196,6 +204,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
idr_destroy(&gvt->vgpu_idr);
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -214,6 +224,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
{
struct intel_gvt *gvt;
+ struct intel_vgpu *vgpu;
int ret;
/*
@@ -286,6 +297,14 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_types;
}
+ vgpu = intel_gvt_create_idle_vgpu(gvt);
+ if (IS_ERR(vgpu)) {
+ ret = PTR_ERR(vgpu);
+ gvt_err("failed to create idle vgpu\n");
+ goto out_clean_types;
+ }
+ gvt->idle_vgpu = vgpu;
+
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt;
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 6dfc48b63b71..930732e5c780 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -138,6 +138,10 @@ struct intel_vgpu_display {
struct intel_vgpu_sbi sbi;
};
+struct vgpu_sched_ctl {
+ int weight;
+};
+
struct intel_vgpu {
struct intel_gvt *gvt;
int id;
@@ -147,6 +151,7 @@ struct intel_vgpu {
bool failsafe;
bool resetting;
void *sched_data;
+ struct vgpu_sched_ctl sched_ctl;
struct intel_vgpu_fence fence;
struct intel_vgpu_gm gm;
@@ -160,6 +165,7 @@ struct intel_vgpu {
struct list_head workload_q_head[I915_NUM_ENGINES];
struct kmem_cache *workloads;
atomic_t running_workload_num;
+ ktime_t last_ctx_submit_time;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
@@ -215,6 +221,7 @@ struct intel_vgpu_type {
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
+ unsigned int weight;
enum intel_vgpu_edid resolution;
};
@@ -236,6 +243,7 @@ struct intel_gvt {
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
+ struct intel_vgpu *idle_vgpu;
struct task_struct *service_thread;
wait_queue_head_t service_thread_wq;
@@ -249,6 +257,7 @@ static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
enum {
INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
+ INTEL_GVT_REQUEST_SCHED = 1,
};
static inline void intel_gvt_request_service(struct intel_gvt *gvt,
@@ -322,6 +331,8 @@ struct intel_vgpu_creation_params {
__u64 resolution;
__s32 primary;
__u64 vgpu_id;
+
+ __u32 weight;
};
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
@@ -376,13 +387,16 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
+struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
+void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
-
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
@@ -449,6 +463,8 @@ struct intel_gvt_ops {
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *);
void (*vgpu_reset)(struct intel_vgpu *);
+ void (*vgpu_activate)(struct intel_vgpu *);
+ void (*vgpu_deactivate)(struct intel_vgpu *);
};
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6da9ae1618e3..0ad1a508e2af 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -68,6 +68,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_BDW;
else if (IS_SKYLAKE(gvt->dev_priv))
return D_SKL;
+ else if (IS_KABYLAKE(gvt->dev_priv))
+ return D_KBL;
return 0;
}
@@ -234,7 +236,8 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -823,8 +826,9 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv) &&
- offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
+ if ((IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv))
+ && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
} else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
@@ -1311,7 +1315,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1324,7 +1329,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1418,6 +1424,7 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
execlist->elsp_dwords.data[execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
+ vgpu->last_ctx_submit_time = ktime_get();
ret = intel_vgpu_submit_execlist(vgpu, ring_id);
if(ret)
gvt_vgpu_err("fail submit workload on ring %d\n",
@@ -2592,219 +2599,232 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL);
- MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
- MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL, NULL, dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPB_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPC_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
+ MMIO_F(_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL,
+ dp_aux_ch_ctl_mmio_write);
- MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
- MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
+ MMIO_D(HSW_PWR_WELL_BIOS, D_SKL_PLUS);
+ MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL_PLUS, NULL,
+ skl_power_well_ctl_write);
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL_PLUS, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL, NULL, skl_misc_ctl_write);
- MMIO_D(0x45504, D_SKL);
- MMIO_D(0x45520, D_SKL);
- MMIO_D(0x46000, D_SKL);
- MMIO_DH(0x46010, D_SKL, NULL, skl_lcpll_write);
- MMIO_DH(0x46014, D_SKL, NULL, skl_lcpll_write);
- MMIO_D(0x6C040, D_SKL);
- MMIO_D(0x6C048, D_SKL);
- MMIO_D(0x6C050, D_SKL);
- MMIO_D(0x6C044, D_SKL);
- MMIO_D(0x6C04C, D_SKL);
- MMIO_D(0x6C054, D_SKL);
- MMIO_D(0x6c058, D_SKL);
- MMIO_D(0x6c05c, D_SKL);
- MMIO_DH(0X6c060, D_SKL, dpll_status_read, NULL);
-
- MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL, NULL, pf_write);
- MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL, NULL, pf_write);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL, NULL, NULL);
- MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL, NULL, NULL);
- MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL, NULL, NULL);
-
- MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL, NULL, NULL);
- MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL, NULL, NULL);
- MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL, NULL, NULL);
-
- MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL, NULL, NULL);
- MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL, NULL, NULL);
-
- MMIO_D(0x70380, D_SKL);
- MMIO_D(0x71380, D_SKL);
- MMIO_D(0x72380, D_SKL);
- MMIO_D(0x7039c, D_SKL);
-
- MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL, NULL, NULL);
- MMIO_D(0x8f074, D_SKL);
- MMIO_D(0x8f004, D_SKL);
- MMIO_D(0x8f034, D_SKL);
-
- MMIO_D(0xb11c, D_SKL);
-
- MMIO_D(0x51000, D_SKL);
- MMIO_D(0x6c00c, D_SKL);
-
- MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
- MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
-
- MMIO_D(0xd08, D_SKL);
- MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_D(0x45504, D_SKL_PLUS);
+ MMIO_D(0x45520, D_SKL_PLUS);
+ MMIO_D(0x46000, D_SKL_PLUS);
+ MMIO_DH(0x46010, D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_DH(0x46014, D_SKL | D_KBL, NULL, skl_lcpll_write);
+ MMIO_D(0x6C040, D_SKL | D_KBL);
+ MMIO_D(0x6C048, D_SKL | D_KBL);
+ MMIO_D(0x6C050, D_SKL | D_KBL);
+ MMIO_D(0x6C044, D_SKL | D_KBL);
+ MMIO_D(0x6C04C, D_SKL | D_KBL);
+ MMIO_D(0x6C054, D_SKL | D_KBL);
+ MMIO_D(0x6c058, D_SKL | D_KBL);
+ MMIO_D(0x6c05c, D_SKL | D_KBL);
+ MMIO_DH(0X6c060, D_SKL | D_KBL, dpll_status_read, NULL);
+
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_POS(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_WIN_SZ(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_B, 1), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 0), D_SKL_PLUS, NULL, pf_write);
+ MMIO_DH(SKL_PS_CTRL(PIPE_C, 1), D_SKL_PLUS, NULL, pf_write);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(CUR_BUF_CFG(PIPE_A), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_B), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_BUF_CFG(PIPE_C), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_A, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_A, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_B, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_B, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(PLANE_WM(PIPE_C, 0, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 1, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(PLANE_WM(PIPE_C, 2, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_F(CUR_WM(PIPE_A, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_B, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_F(CUR_WM(PIPE_C, 0), 4 * 8, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_WM_TRANS(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(CUR_WM_TRANS(PIPE_A), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_B), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CUR_WM_TRANS(PIPE_C), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 0), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(PLANE_NV12_BUF_CFG(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C0(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C0(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_A, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_A, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_B, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_B, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_DH(_REG_701C4(PIPE_C, 1), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 2), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 3), D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(_REG_701C4(PIPE_C, 4), D_SKL_PLUS, NULL, NULL);
+
+ MMIO_D(0x70380, D_SKL_PLUS);
+ MMIO_D(0x71380, D_SKL_PLUS);
+ MMIO_D(0x72380, D_SKL_PLUS);
+ MMIO_D(0x7039c, D_SKL_PLUS);
+
+ MMIO_F(0x80000, 0x3000, 0, 0, 0, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(0x8f074, D_SKL | D_KBL);
+ MMIO_D(0x8f004, D_SKL | D_KBL);
+ MMIO_D(0x8f034, D_SKL | D_KBL);
+
+ MMIO_D(0xb11c, D_SKL | D_KBL);
+
+ MMIO_D(0x51000, D_SKL | D_KBL);
+ MMIO_D(0x6c00c, D_SKL_PLUS);
+
+ MMIO_F(0xc800, 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+ MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL | D_KBL, NULL, NULL);
+
+ MMIO_D(0xd08, D_SKL_PLUS);
+ MMIO_DFH(0x20e0, D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20ec, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
- MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(0x4de0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de8, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4dec, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df0, D_SKL | D_KBL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df4, D_SKL | D_KBL, F_CMD_ACCESS, NULL, gen9_trtte_write);
+ MMIO_DH(0x4dfc, D_SKL | D_KBL, NULL, gen9_trtt_chicken_write);
- MMIO_D(0x45008, D_SKL);
+ MMIO_D(0x45008, D_SKL | D_KBL);
- MMIO_D(0x46430, D_SKL);
+ MMIO_D(0x46430, D_SKL | D_KBL);
- MMIO_D(0x46520, D_SKL);
+ MMIO_D(0x46520, D_SKL | D_KBL);
- MMIO_D(0xc403c, D_SKL);
- MMIO_D(0xb004, D_SKL);
+ MMIO_D(0xc403c, D_SKL | D_KBL);
+ MMIO_D(0xb004, D_SKL_PLUS);
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
- MMIO_D(0x65900, D_SKL);
- MMIO_D(0x1082c0, D_SKL);
- MMIO_D(0x4068, D_SKL);
- MMIO_D(0x67054, D_SKL);
- MMIO_D(0x6e560, D_SKL);
- MMIO_D(0x6e554, D_SKL);
- MMIO_D(0x2b20, D_SKL);
- MMIO_D(0x65f00, D_SKL);
- MMIO_D(0x65f08, D_SKL);
- MMIO_D(0x320f0, D_SKL);
-
- MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x70034, D_SKL);
- MMIO_D(0x71034, D_SKL);
- MMIO_D(0x72034, D_SKL);
-
- MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL);
- MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL);
- MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
-
- MMIO_D(0x44500, D_SKL);
+ MMIO_D(0x65900, D_SKL_PLUS);
+ MMIO_D(0x1082c0, D_SKL | D_KBL);
+ MMIO_D(0x4068, D_SKL | D_KBL);
+ MMIO_D(0x67054, D_SKL | D_KBL);
+ MMIO_D(0x6e560, D_SKL | D_KBL);
+ MMIO_D(0x6e554, D_SKL | D_KBL);
+ MMIO_D(0x2b20, D_SKL | D_KBL);
+ MMIO_D(0x65f00, D_SKL | D_KBL);
+ MMIO_D(0x65f08, D_SKL | D_KBL);
+ MMIO_D(0x320f0, D_SKL | D_KBL);
+
+ MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_REG_VECS_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_D(0x70034, D_SKL_PLUS);
+ MMIO_D(0x71034, D_SKL_PLUS);
+ MMIO_D(0x72034, D_SKL_PLUS);
+
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_A), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_B), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYVAL_1(PIPE_C), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_A), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_B), D_SKL_PLUS);
+ MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL_PLUS);
+
+ MMIO_D(0x44500, D_SKL_PLUS);
MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL | D_KBL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
+
+ MMIO_D(0x4ab8, D_KBL);
+ MMIO_D(0x940c, D_SKL_PLUS);
+ MMIO_D(0x2248, D_SKL_PLUS | D_KBL);
+ MMIO_D(0x4ab0, D_SKL | D_KBL);
+ MMIO_D(0x20d4, D_SKL | D_KBL);
+
return 0;
}
@@ -2881,7 +2901,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(dev_priv)
+ || IS_KABYLAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 92bb247e3478..9d6812f0957f 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -580,7 +580,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (IS_SKYLAKE(gvt->dev_priv)) {
+ } else if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -690,7 +690,8 @@ int intel_gvt_init_irq(struct intel_gvt *gvt)
gvt_dbg_core("init irq framework\n");
- if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv)) {
irq->ops = &gen8_irq_ops;
irq->irq_map = gen8_irq_map;
} else {
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index d641214578a7..1ae0b4083ce1 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -295,10 +295,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\nresolution: %s\n",
+ "fence: %d\nresolution: %s\n"
+ "weight: %d\n",
BYTES_TO_MB(type->low_gm_size),
BYTES_TO_MB(type->high_gm_size),
- type->fence, vgpu_edid_str(type->resolution));
+ type->fence, vgpu_edid_str(type->resolution),
+ type->weight);
}
static MDEV_TYPE_ATTR_RO(available_instances);
@@ -544,6 +546,8 @@ static int intel_vgpu_open(struct mdev_device *mdev)
if (ret)
goto undo_group;
+ intel_gvt_ops->vgpu_activate(vgpu);
+
atomic_set(&vgpu->vdev.released, 0);
return ret;
@@ -569,6 +573,8 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
+ intel_gvt_ops->vgpu_deactivate(vgpu);
+
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
@@ -1146,8 +1152,40 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
return 0;
}
+static ssize_t
+vgpu_id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mdev_device *mdev = mdev_from_dev(dev);
+
+ if (mdev) {
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)
+ mdev_get_drvdata(mdev);
+ return sprintf(buf, "%d\n", vgpu->id);
+ }
+ return sprintf(buf, "\n");
+}
+
+static DEVICE_ATTR_RO(vgpu_id);
+
+static struct attribute *intel_vgpu_attrs[] = {
+ &dev_attr_vgpu_id.attr,
+ NULL
+};
+
+static const struct attribute_group intel_vgpu_group = {
+ .name = "intel_vgpu",
+ .attrs = intel_vgpu_attrs,
+};
+
+static const struct attribute_group *intel_vgpu_groups[] = {
+ &intel_vgpu_group,
+ NULL,
+};
+
static const struct mdev_parent_ops intel_vgpu_ops = {
.supported_type_groups = intel_vgpu_type_groups,
+ .mdev_attr_groups = intel_vgpu_groups,
.create = intel_vgpu_create,
.remove = intel_vgpu_remove,
@@ -1340,13 +1378,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{
- struct intel_vgpu *vgpu = info->vgpu;
-
- if (!info) {
- gvt_vgpu_err("kvmgt_guest_info invalid\n");
- return false;
- }
-
kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvm_put_kvm(info->kvm);
kvmgt_protect_table_destroy(info);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index a3a027025cd0..7edd66f38ef9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -44,20 +44,21 @@ struct intel_vgpu;
#define D_HSW (1 << 2)
#define D_BDW (1 << 3)
#define D_SKL (1 << 4)
+#define D_KBL (1 << 5)
-#define D_GEN9PLUS (D_SKL)
-#define D_GEN8PLUS (D_BDW | D_SKL)
-#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL)
-#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_GEN9PLUS (D_SKL | D_KBL)
+#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL)
+#define D_GEN75PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_GEN7PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
-#define D_SKL_PLUS (D_SKL)
-#define D_BDW_PLUS (D_BDW | D_SKL)
-#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL)
-#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_SKL_PLUS (D_SKL | D_KBL)
+#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL)
+#define D_HSW_PLUS (D_HSW | D_BDW | D_SKL | D_KBL)
+#define D_IVB_PLUS (D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
#define D_PRE_BDW (D_SNB | D_IVB | D_HSW)
#define D_PRE_SKL (D_SNB | D_IVB | D_HSW | D_BDW)
-#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL)
+#define D_ALL (D_SNB | D_IVB | D_HSW | D_BDW | D_SKL | D_KBL)
struct intel_gvt_mmio_info {
u32 offset;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 0beb83563b08..c6e7972ac21d 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -44,7 +44,7 @@ struct render_mmio {
u32 value;
};
-static struct render_mmio gen8_render_mmio_list[] = {
+static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
@@ -75,7 +75,7 @@ static struct render_mmio gen8_render_mmio_list[] = {
{BCS, _MMIO(0x22028), 0x0, false},
};
-static struct render_mmio gen9_render_mmio_list[] = {
+static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
{RCS, _MMIO(0x229c), 0xffff, false},
{RCS, _MMIO(0x2248), 0x0, false},
{RCS, _MMIO(0x2098), 0x0, false},
@@ -126,6 +126,18 @@ static struct render_mmio gen9_render_mmio_list[] = {
{VCS2, _MMIO(0x1c028), 0xffff, false},
{VECS, _MMIO(0x1a028), 0xffff, false},
+
+ {RCS, _MMIO(0x7304), 0xffff, true},
+ {RCS, _MMIO(0x2248), 0x0, false},
+ {RCS, _MMIO(0x940c), 0x0, false},
+ {RCS, _MMIO(0x4ab8), 0x0, false},
+
+ {RCS, _MMIO(0x4ab0), 0x0, false},
+ {RCS, _MMIO(0x20d4), 0x0, false},
+
+ {RCS, _MMIO(0xb004), 0x0, false},
+ {RCS, _MMIO(0x20a0), 0x0, false},
+ {RCS, _MMIO(0x20e4), 0xffff, false},
};
static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
@@ -159,7 +171,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && IS_SKYLAKE(dev_priv))
+ if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw);
@@ -192,9 +204,6 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
- return;
-
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ(offset);
@@ -230,9 +239,6 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if (!IS_SKYLAKE(dev_priv))
- return;
-
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ(offset);
@@ -265,7 +271,8 @@ void intel_gvt_load_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)
+ || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
load_mocs(vgpu, ring_id);
@@ -312,7 +319,7 @@ void intel_gvt_restore_render_mmio(struct intel_vgpu *vgpu, int ring_id)
u32 v;
int i, array_size;
- if (IS_SKYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
mmio = gen9_render_mmio_list;
array_size = ARRAY_SIZE(gen9_render_mmio_list);
restore_mocs(vgpu, ring_id);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 34b9acdf3479..79ba4b3440aa 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -47,19 +47,92 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
return false;
}
+struct vgpu_sched_data {
+ struct list_head lru_list;
+ struct intel_vgpu *vgpu;
+
+ ktime_t sched_in_time;
+ ktime_t sched_out_time;
+ ktime_t sched_time;
+ ktime_t left_ts;
+ ktime_t allocated_ts;
+
+ struct vgpu_sched_ctl sched_ctl;
+};
+
+struct gvt_sched_data {
+ struct intel_gvt *gvt;
+ struct hrtimer timer;
+ unsigned long period;
+ struct list_head lru_runq_head;
+};
+
+static void vgpu_update_timeslice(struct intel_vgpu *pre_vgpu)
+{
+ ktime_t delta_ts;
+ struct vgpu_sched_data *vgpu_data = pre_vgpu->sched_data;
+
+ delta_ts = vgpu_data->sched_out_time - vgpu_data->sched_in_time;
+
+ vgpu_data->sched_time += delta_ts;
+ vgpu_data->left_ts -= delta_ts;
+}
+
+#define GVT_TS_BALANCE_PERIOD_MS 100
+#define GVT_TS_BALANCE_STAGE_NUM 10
+
+static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
+{
+ struct vgpu_sched_data *vgpu_data;
+ struct list_head *pos;
+ static uint64_t stage_check;
+ int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
+
+ /* The timeslice accumulation reset at stage 0, which is
+ * allocated again without adding previous debt.
+ */
+ if (stage == 0) {
+ int total_weight = 0;
+ ktime_t fair_timeslice;
+
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+ total_weight += vgpu_data->sched_ctl.weight;
+ }
+
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+ fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) *
+ vgpu_data->sched_ctl.weight /
+ total_weight;
+
+ vgpu_data->allocated_ts = fair_timeslice;
+ vgpu_data->left_ts = vgpu_data->allocated_ts;
+ }
+ } else {
+ list_for_each(pos, &sched_data->lru_runq_head) {
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
+
+ /* timeslice for next 100ms should add the left/debt
+ * slice of previous stages.
+ */
+ vgpu_data->left_ts += vgpu_data->allocated_ts;
+ }
+ }
+}
+
static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id i;
struct intel_engine_cs *engine;
+ struct vgpu_sched_data *vgpu_data;
+ ktime_t cur_time;
/* no target to schedule */
if (!scheduler->next_vgpu)
return;
- gvt_dbg_sched("try to schedule next vgpu %d\n",
- scheduler->next_vgpu->id);
-
/*
* after the flag is set, workload dispatch thread will
* stop dispatching workload for current vgpu
@@ -68,14 +141,18 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
/* still have uncompleted workload? */
for_each_engine(engine, gvt->dev_priv, i) {
- if (scheduler->current_workload[i]) {
- gvt_dbg_sched("still have running workload\n");
+ if (scheduler->current_workload[i])
return;
- }
}
- gvt_dbg_sched("switch to next vgpu %d\n",
- scheduler->next_vgpu->id);
+ cur_time = ktime_get();
+ if (scheduler->current_vgpu) {
+ vgpu_data = scheduler->current_vgpu->sched_data;
+ vgpu_data->sched_out_time = cur_time;
+ vgpu_update_timeslice(scheduler->current_vgpu);
+ }
+ vgpu_data = scheduler->next_vgpu->sched_data;
+ vgpu_data->sched_in_time = cur_time;
/* switch current vgpu */
scheduler->current_vgpu = scheduler->next_vgpu;
@@ -88,97 +165,106 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
wake_up(&scheduler->waitq[i]);
}
-struct tbs_vgpu_data {
- struct list_head list;
- struct intel_vgpu *vgpu;
- /* put some per-vgpu sched stats here */
-};
-
-struct tbs_sched_data {
- struct intel_gvt *gvt;
- struct delayed_work work;
- unsigned long period;
- struct list_head runq_head;
-};
-
-#define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1))
-
-static void tbs_sched_func(struct work_struct *work)
+static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
{
- struct tbs_sched_data *sched_data = container_of(work,
- struct tbs_sched_data, work.work);
- struct tbs_vgpu_data *vgpu_data;
-
- struct intel_gvt *gvt = sched_data->gvt;
- struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-
+ struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;
- struct list_head *pos, *head;
-
- mutex_lock(&gvt->lock);
-
- /* no vgpu or has already had a target */
- if (list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- goto out;
-
- if (scheduler->current_vgpu) {
- vgpu_data = scheduler->current_vgpu->sched_data;
- head = &vgpu_data->list;
- } else {
- head = &sched_data->runq_head;
- }
+ struct list_head *head = &sched_data->lru_runq_head;
+ struct list_head *pos;
/* search a vgpu with pending workload */
list_for_each(pos, head) {
- if (pos == &sched_data->runq_head)
- continue;
- vgpu_data = container_of(pos, struct tbs_vgpu_data, list);
+ vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;
- vgpu = vgpu_data->vgpu;
- break;
+ /* Return the vGPU only if it has time slice left */
+ if (vgpu_data->left_ts > 0) {
+ vgpu = vgpu_data->vgpu;
+ break;
+ }
}
+ return vgpu;
+}
+
+/* in nanosecond */
+#define GVT_DEFAULT_TIME_SLICE 1000000
+
+static void tbs_sched_func(struct gvt_sched_data *sched_data)
+{
+ struct intel_gvt *gvt = sched_data->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+ struct vgpu_sched_data *vgpu_data;
+ struct intel_vgpu *vgpu = NULL;
+ static uint64_t timer_check;
+
+ if (!(timer_check++ % GVT_TS_BALANCE_PERIOD_MS))
+ gvt_balance_timeslice(sched_data);
+
+ /* no active vgpu or has already had a target */
+ if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
+ goto out;
+
+ vgpu = find_busy_vgpu(sched_data);
if (vgpu) {
scheduler->next_vgpu = vgpu;
- gvt_dbg_sched("pick next vgpu %d\n", vgpu->id);
+
+ /* Move the last used vGPU to the tail of lru_list */
+ vgpu_data = vgpu->sched_data;
+ list_del_init(&vgpu_data->lru_list);
+ list_add_tail(&vgpu_data->lru_list,
+ &sched_data->lru_runq_head);
+ } else {
+ scheduler->next_vgpu = gvt->idle_vgpu;
}
out:
- if (scheduler->next_vgpu) {
- gvt_dbg_sched("try to schedule next vgpu %d\n",
- scheduler->next_vgpu->id);
+ if (scheduler->next_vgpu)
try_to_schedule_next_vgpu(gvt);
- }
+}
- /*
- * still have vgpu on runq
- * or last schedule haven't finished due to running workload
- */
- if (!list_empty(&sched_data->runq_head) || scheduler->next_vgpu)
- schedule_delayed_work(&sched_data->work, sched_data->period);
+void intel_gvt_schedule(struct intel_gvt *gvt)
+{
+ struct gvt_sched_data *sched_data = gvt->scheduler.sched_data;
+ mutex_lock(&gvt->lock);
+ tbs_sched_func(sched_data);
mutex_unlock(&gvt->lock);
}
+static enum hrtimer_restart tbs_timer_fn(struct hrtimer *timer_data)
+{
+ struct gvt_sched_data *data;
+
+ data = container_of(timer_data, struct gvt_sched_data, timer);
+
+ intel_gvt_request_service(data->gvt, INTEL_GVT_REQUEST_SCHED);
+
+ hrtimer_add_expires_ns(&data->timer, data->period);
+
+ return HRTIMER_RESTART;
+}
+
static int tbs_sched_init(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
- struct tbs_sched_data *data;
+ struct gvt_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- INIT_LIST_HEAD(&data->runq_head);
- INIT_DELAYED_WORK(&data->work, tbs_sched_func);
+ INIT_LIST_HEAD(&data->lru_runq_head);
+ hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ data->timer.function = tbs_timer_fn;
data->period = GVT_DEFAULT_TIME_SLICE;
data->gvt = gvt;
scheduler->sched_data = data;
+
return 0;
}
@@ -186,25 +272,28 @@ static void tbs_sched_clean(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler =
&gvt->scheduler;
- struct tbs_sched_data *data = scheduler->sched_data;
+ struct gvt_sched_data *data = scheduler->sched_data;
+
+ hrtimer_cancel(&data->timer);
- cancel_delayed_work(&data->work);
kfree(data);
scheduler->sched_data = NULL;
}
static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu)
{
- struct tbs_vgpu_data *data;
+ struct vgpu_sched_data *data;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
+ data->sched_ctl.weight = vgpu->sched_ctl.weight;
data->vgpu = vgpu;
- INIT_LIST_HEAD(&data->list);
+ INIT_LIST_HEAD(&data->lru_list);
vgpu->sched_data = data;
+
return 0;
}
@@ -216,21 +305,24 @@ static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu)
static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
- struct tbs_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
- struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+ struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- if (!list_empty(&vgpu_data->list))
+ if (!list_empty(&vgpu_data->lru_list))
return;
- list_add_tail(&vgpu_data->list, &sched_data->runq_head);
- schedule_delayed_work(&sched_data->work, 0);
+ list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
+
+ if (!hrtimer_active(&sched_data->timer))
+ hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
+ sched_data->period), HRTIMER_MODE_ABS);
}
static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
{
- struct tbs_vgpu_data *vgpu_data = vgpu->sched_data;
+ struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- list_del_init(&vgpu_data->list);
+ list_del_init(&vgpu_data->lru_list);
}
static struct intel_gvt_sched_policy_ops tbs_schedule_ops = {
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.h b/drivers/gpu/drm/i915/gvt/sched_policy.h
index bb8b9097e41a..ba00a5f7455f 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.h
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.h
@@ -43,6 +43,8 @@ struct intel_gvt_sched_policy_ops {
void (*stop_schedule)(struct intel_vgpu *vgpu);
};
+void intel_gvt_schedule(struct intel_gvt *gvt);
+
int intel_gvt_init_sched_policy(struct intel_gvt *gvt);
void intel_gvt_clean_sched_policy(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d9a735310e75..bada32b33237 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -279,11 +279,8 @@ static struct intel_vgpu_workload *pick_next_workload(
goto out;
}
- if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) {
- gvt_dbg_sched("ring id %d stop - no available workload\n",
- ring_id);
+ if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
goto out;
- }
/*
* still have current workload, maybe the workload disptacher
@@ -453,7 +450,8 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
+ bool need_force_wake = IS_SKYLAKE(gvt->dev_priv)
+ || IS_KABYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 2833dfa8c9ae..2cd725c0573e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -67,7 +67,6 @@ struct shadow_per_ctx {
};
struct intel_shadow_wa_ctx {
- struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx;
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 41cfa5ccae84..6e3cbd8caec2 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,18 +64,28 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+#define VGPU_MAX_WEIGHT 16
+#define VGPU_WEIGHT(vgpu_num) \
+ (VGPU_MAX_WEIGHT / (vgpu_num))
+
static struct {
unsigned int low_mm;
unsigned int high_mm;
unsigned int fence;
+
+ /* A vGPU with a weight of 8 will get twice as much GPU as a vGPU
+ * with a weight of 4 on a contended host, different vGPU type has
+ * different weight set. Legal weights range from 1 to 16.
+ */
+ unsigned int weight;
enum intel_vgpu_edid edid;
char *name;
} vgpu_types[] = {
/* Fixed vGPU type table */
- { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
- { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
- { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
- { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+ { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" },
+ { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, VGPU_WEIGHT(4), GVT_EDID_1920_1200, "4" },
+ { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, VGPU_WEIGHT(2), GVT_EDID_1920_1200, "2" },
+ { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, VGPU_WEIGHT(1), GVT_EDID_1920_1200, "1" },
};
/**
@@ -120,6 +130,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
gvt->types[i].fence = vgpu_types[i].fence;
+
+ if (vgpu_types[i].weight < 1 ||
+ vgpu_types[i].weight > VGPU_MAX_WEIGHT)
+ return -EINVAL;
+
+ gvt->types[i].weight = vgpu_types[i].weight;
gvt->types[i].resolution = vgpu_types[i].edid;
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
@@ -131,11 +147,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
sprintf(gvt->types[i].name, "GVTg_V5_%s",
vgpu_types[i].name);
- gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+ gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence,
+ gvt->types[i].weight,
vgpu_edid_str(gvt->types[i].resolution));
}
@@ -179,20 +196,34 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
}
/**
- * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * intel_gvt_active_vgpu - activate a virtual GPU
* @vgpu: virtual GPU
*
- * This function is called when user wants to destroy a virtual GPU.
+ * This function is called when user wants to activate a virtual GPU.
*
*/
-void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
+{
+ mutex_lock(&vgpu->gvt->lock);
+ vgpu->active = true;
+ mutex_unlock(&vgpu->gvt->lock);
+}
+
+/**
+ * intel_gvt_deactive_vgpu - deactivate a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to deactivate a virtual GPU.
+ * All virtual GPU runtime information will be destroyed.
+ *
+ */
+void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&gvt->lock);
vgpu->active = false;
- idr_remove(&gvt->vgpu_idr, vgpu->id);
if (atomic_read(&vgpu->running_workload_num)) {
mutex_unlock(&gvt->lock);
@@ -201,6 +232,26 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
}
intel_vgpu_stop_schedule(vgpu);
+
+ mutex_unlock(&gvt->lock);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy a virtual GPU.
+ *
+ */
+void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+
+ mutex_lock(&gvt->lock);
+
+ WARN(vgpu->active, "vGPU is still active!\n");
+
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_gvt_context(vgpu);
intel_vgpu_clean_execlist(vgpu);
@@ -216,6 +267,59 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&gvt->lock);
}
+#define IDLE_VGPU_IDR 0
+
+/**
+ * intel_gvt_create_idle_vgpu - create an idle virtual GPU
+ * @gvt: GVT device
+ *
+ * This function is called when user wants to create an idle virtual GPU.
+ *
+ * Returns:
+ * pointer to intel_vgpu, error pointer if failed.
+ */
+struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ enum intel_engine_id i;
+ int ret;
+
+ vgpu = vzalloc(sizeof(*vgpu));
+ if (!vgpu)
+ return ERR_PTR(-ENOMEM);
+
+ vgpu->id = IDLE_VGPU_IDR;
+ vgpu->gvt = gvt;
+
+ for (i = 0; i < I915_NUM_ENGINES; i++)
+ INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
+
+ ret = intel_vgpu_init_sched_policy(vgpu);
+ if (ret)
+ goto out_free_vgpu;
+
+ vgpu->active = false;
+
+ return vgpu;
+
+out_free_vgpu:
+ vfree(vgpu);
+ return ERR_PTR(ret);
+}
+
+/**
+ * intel_gvt_destroy_vgpu - destroy an idle virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to destroy an idle virtual GPU.
+ *
+ */
+void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_vgpu_clean_sched_policy(vgpu);
+ vfree(vgpu);
+}
+
static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param)
{
@@ -232,13 +336,15 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_lock(&gvt->lock);
- ret = idr_alloc(&gvt->vgpu_idr, vgpu, 1, GVT_MAX_VGPU, GFP_KERNEL);
+ ret = idr_alloc(&gvt->vgpu_idr, vgpu, IDLE_VGPU_IDR + 1, GVT_MAX_VGPU,
+ GFP_KERNEL);
if (ret < 0)
goto out_free_vgpu;
vgpu->id = ret;
vgpu->handle = param->handle;
vgpu->gvt = gvt;
+ vgpu->sched_ctl.weight = param->weight;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
intel_vgpu_init_cfg_space(vgpu, param->primary);
@@ -277,7 +383,6 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_shadow_ctx;
- vgpu->active = true;
mutex_unlock(&gvt->lock);
return vgpu;
@@ -325,6 +430,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
+ param.weight = type->weight;
param.resolution = type->resolution;
/* XXX current param based on MB */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47e707d83c4d..d689e511744e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1012,9 +1012,12 @@ static int gpu_state_release(struct inode *inode, struct file *file)
static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
+ struct drm_i915_private *i915 = inode->i_private;
struct i915_gpu_state *gpu;
- gpu = i915_capture_gpu_state(inode->i_private);
+ intel_runtime_pm_get(i915);
+ gpu = i915_capture_gpu_state(i915);
+ intel_runtime_pm_put(i915);
if (!gpu)
return -ENOMEM;
@@ -1459,16 +1462,14 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_i915_private *i915 = node_to_i915(m->private);
struct intel_uncore_forcewake_domain *fw_domain;
+ unsigned int tmp;
- spin_lock_irq(&dev_priv->uncore.lock);
- for_each_fw_domain(fw_domain, dev_priv) {
+ for_each_fw_domain(fw_domain, i915, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
- fw_domain->wake_count);
- }
- spin_unlock_irq(&dev_priv->uncore.lock);
+ READ_ONCE(fw_domain->wake_count));
return 0;
}
@@ -1938,9 +1939,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
- seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
- ring->space, ring->head, ring->tail,
- ring->last_retired_head);
+ seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u)",
+ ring->space, ring->head, ring->tail);
}
static int i915_context_status(struct seq_file *m, void *unused)
@@ -2474,9 +2474,9 @@ static void i915_guc_client_info(struct seq_file *m,
enum intel_engine_id id;
uint64_t tot = 0;
- seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n",
- client->priority, client->ctx_index, client->proc_desc_offset);
- seq_printf(m, "\tDoorbell id %d, offset: 0x%x, cookie 0x%x\n",
+ seq_printf(m, "\tPriority %d, GuC stage index: %u, PD offset 0x%x\n",
+ client->priority, client->stage_id, client->proc_desc_offset);
+ seq_printf(m, "\tDoorbell id %d, offset: 0x%lx, cookie 0x%x\n",
client->doorbell_id, client->doorbell_offset, client->doorbell_cookie);
seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
client->wq_size, client->wq_offset, client->wq_tail);
@@ -2511,7 +2511,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
}
seq_printf(m, "Doorbell map:\n");
- seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc->doorbell_bitmap);
+ seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap);
seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc->db_cacheline);
seq_printf(m, "GuC total action count: %llu\n", guc->action_count);
@@ -4129,7 +4129,9 @@ i915_wedged_get(void *data, u64 *val)
static int
i915_wedged_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct drm_i915_private *i915 = data;
+ struct intel_engine_cs *engine;
+ unsigned int tmp;
/*
* There is no safeguard against this debugfs entry colliding
@@ -4139,13 +4141,17 @@ i915_wedged_set(void *data, u64 val)
* while it is writing to 'i915_wedged'
*/
- if (i915_reset_backoff(&dev_priv->gpu_error))
+ if (i915_reset_backoff(&i915->gpu_error))
return -EAGAIN;
- i915_handle_error(dev_priv, val,
- "Manually setting wedged to %llu", val);
+ for_each_engine_masked(engine, i915, val, tmp) {
+ engine->hangcheck.seqno = intel_engine_get_seqno(engine);
+ engine->hangcheck.stalled = true;
+ }
+
+ i915_handle_error(i915, val, "Manually setting wedged to %llu", val);
- wait_on_bit(&dev_priv->gpu_error.flags,
+ wait_on_bit(&i915->gpu_error.flags,
I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE);
@@ -4173,10 +4179,6 @@ fault_irq_set(struct drm_i915_private *i915,
if (err)
goto err_unlock;
- /* Retire to kick idle work */
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
-
*irq = val;
mutex_unlock(&i915->drm.struct_mutex);
@@ -4280,7 +4282,7 @@ i915_drop_caches_set(void *data, u64 val)
goto unlock;
}
- if (val & (DROP_RETIRE | DROP_ACTIVE))
+ if (val & DROP_RETIRE)
i915_gem_retire_requests(dev_priv);
lockdep_set_current_reclaim_state(GFP_KERNEL);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 98b17070a123..3036d4835b0f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -549,6 +549,7 @@ static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
static void i915_gem_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->drm.struct_mutex);
+ intel_uc_fini_hw(dev_priv);
i915_gem_cleanup_engines(dev_priv);
i915_gem_context_fini(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -609,7 +610,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
ret = i915_gem_init(dev_priv);
if (ret)
- goto cleanup_irq;
+ goto cleanup_uc;
intel_modeset_gem_init(dev);
@@ -631,9 +632,9 @@ cleanup_gem:
if (i915_gem_suspend(dev_priv))
DRM_ERROR("failed to idle hardware; continuing to unload!\n");
i915_gem_fini(dev_priv);
+cleanup_uc:
+ intel_uc_fini_fw(dev_priv);
cleanup_irq:
- intel_guc_fini(dev_priv);
- intel_huc_fini(dev_priv);
drm_irq_uninstall(dev);
intel_teardown_gmbus(dev_priv);
cleanup_csr:
@@ -1351,9 +1352,8 @@ void i915_driver_unload(struct drm_device *dev)
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
- intel_guc_fini(dev_priv);
- intel_huc_fini(dev_priv);
i915_gem_fini(dev_priv);
+ intel_uc_fini_fw(dev_priv);
intel_fbc_cleanup_cfb(dev_priv);
intel_power_domains_fini(dev_priv);
@@ -1469,8 +1469,6 @@ static int i915_drm_suspend(struct drm_device *dev)
goto out;
}
- intel_guc_suspend(dev_priv);
-
intel_display_suspend(dev);
intel_dp_mst_suspend(dev);
@@ -2177,6 +2175,20 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
}
+static int vlv_wait_for_pw_status(struct drm_i915_private *dev_priv,
+ u32 mask, u32 val)
+{
+ /* The HW does not like us polling for PW_STATUS frequently, so
+ * use the sleeping loop rather than risk the busy spin within
+ * intel_wait_for_register().
+ *
+ * Transitioning between RC6 states should be at most 2ms (see
+ * valleyview_enable_rps) so use a 3ms timeout.
+ */
+ return wait_for((I915_READ_NOTRACE(VLV_GTLC_PW_STATUS) & mask) == val,
+ 3);
+}
+
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
{
u32 val;
@@ -2205,8 +2217,9 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
{
+ u32 mask;
u32 val;
- int err = 0;
+ int err;
val = I915_READ(VLV_GTLC_WAKE_CTRL);
val &= ~VLV_GTLC_ALLOWWAKEREQ;
@@ -2215,45 +2228,32 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
POSTING_READ(VLV_GTLC_WAKE_CTRL);
- err = intel_wait_for_register(dev_priv,
- VLV_GTLC_PW_STATUS,
- VLV_GTLC_ALLOWWAKEACK,
- allow,
- 1);
+ mask = VLV_GTLC_ALLOWWAKEACK;
+ val = allow ? mask : 0;
+
+ err = vlv_wait_for_pw_status(dev_priv, mask, val);
if (err)
DRM_ERROR("timeout disabling GT waking\n");
return err;
}
-static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
- bool wait_for_on)
+static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
{
u32 mask;
u32 val;
- int err;
mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
val = wait_for_on ? mask : 0;
- if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
- return 0;
-
- DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
- onoff(wait_for_on),
- I915_READ(VLV_GTLC_PW_STATUS));
/*
* RC6 transitioning can be delayed up to 2 msec (see
* valleyview_enable_rps), use 3 msec for safety.
*/
- err = intel_wait_for_register(dev_priv,
- VLV_GTLC_PW_STATUS, mask, val,
- 3);
- if (err)
+ if (vlv_wait_for_pw_status(dev_priv, mask, val))
DRM_ERROR("timeout waiting for GT wells to go %s\n",
onoff(wait_for_on));
-
- return err;
}
static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -2274,7 +2274,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
* Bspec defines the following GT well on flags as debug only, so
* don't treat them as hard failures.
*/
- (void)vlv_wait_for_gt_wells(dev_priv, false);
+ vlv_wait_for_gt_wells(dev_priv, false);
mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a5947a496d0a..c9b0949f6c1a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,26 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170320"
-#define DRIVER_TIMESTAMP 1489994464
-
-#undef WARN_ON
-/* Many gcc seem to no see through this and fall over :( */
-#if 0
-#define WARN_ON(x) ({ \
- bool __i915_warn_cond = (x); \
- if (__builtin_constant_p(__i915_warn_cond)) \
- BUILD_BUG_ON(__i915_warn_cond); \
- WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
-#else
-#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
-#endif
-
-#undef WARN_ON_ONCE
-#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
-
-#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
- (long) (x), __func__);
+#define DRIVER_DATE "20170403"
+#define DRIVER_TIMESTAMP 1491198738
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -703,9 +685,9 @@ enum forcewake_domain_id {
};
enum forcewake_domains {
- FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
- FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
- FORCEWAKE_MEDIA = (1 << FW_DOMAIN_ID_MEDIA),
+ FORCEWAKE_RENDER = BIT(FW_DOMAIN_ID_RENDER),
+ FORCEWAKE_BLITTER = BIT(FW_DOMAIN_ID_BLITTER),
+ FORCEWAKE_MEDIA = BIT(FW_DOMAIN_ID_MEDIA),
FORCEWAKE_ALL = (FORCEWAKE_RENDER |
FORCEWAKE_BLITTER |
FORCEWAKE_MEDIA)
@@ -732,21 +714,25 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
+ enum forcewake_domains domains);
void (*force_wake_put)(struct drm_i915_private *dev_priv,
- enum forcewake_domains domains);
-
- uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
- uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, i915_reg_t r, bool trace);
-
- void (*mmio_writeb)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint8_t val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint16_t val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv, i915_reg_t r,
- uint32_t val, bool trace);
+ enum forcewake_domains domains);
+
+ uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+ uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, bool trace);
+
+ void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint8_t val, bool trace);
+ void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint16_t val, bool trace);
+ void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ i915_reg_t r, uint32_t val, bool trace);
};
struct intel_forcewake_range {
@@ -770,32 +756,35 @@ struct intel_uncore {
enum forcewake_domains fw_domains;
enum forcewake_domains fw_domains_active;
+ u32 fw_set;
+ u32 fw_clear;
+ u32 fw_reset;
+
struct intel_uncore_forcewake_domain {
- struct drm_i915_private *i915;
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned wake_count;
struct hrtimer timer;
i915_reg_t reg_set;
- u32 val_set;
- u32 val_clear;
i915_reg_t reg_ack;
- i915_reg_t reg_post;
- u32 val_reset;
} fw_domain[FW_DOMAIN_ID_COUNT];
int unclaimed_mmio_check;
};
+#define __mask_next_bit(mask) ({ \
+ int __idx = ffs(mask) - 1; \
+ mask &= ~BIT(__idx); \
+ __idx; \
+})
+
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
- for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
- (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
- (domain__)++) \
- for_each_if ((mask__) & (domain__)->mask)
+#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+ for (tmp__ = (mask__); \
+ tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
-#define for_each_fw_domain(domain__, dev_priv__) \
- for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
+#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
+ for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
@@ -846,6 +835,7 @@ struct intel_csr {
func(has_resource_streamer); \
func(has_runtime_pm); \
func(has_snoop); \
+ func(unfenced_needs_alignment); \
func(cursor_needs_physical); \
func(hws_needs_physical); \
func(overlay_needs_physical); \
@@ -2578,12 +2568,6 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
(id__)++) \
for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
-#define __mask_next_bit(mask) ({ \
- int __idx = ffs(mask) - 1; \
- mask &= ~BIT(__idx); \
- __idx; \
-})
-
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
for (tmp__ = mask__ & INTEL_INFO(dev_priv__)->ring_mask; \
@@ -3956,14 +3940,14 @@ u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(struct drm_i915_private *dev_priv, \
+static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg) \
{ \
return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
}
#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(struct drm_i915_private *dev_priv, \
+static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
i915_reg_t reg, uint##x##_t val) \
{ \
write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 58e1db77d70e..532a577ff7a1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2321,7 +2321,7 @@ rebuild_st:
st->nents = 0;
for (i = 0; i < page_count; i++) {
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (IS_ERR(page)) {
+ if (unlikely(IS_ERR(page))) {
i915_gem_shrink(dev_priv,
page_count,
I915_SHRINK_BOUND |
@@ -2329,12 +2329,21 @@ rebuild_st:
I915_SHRINK_PURGEABLE);
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
}
- if (IS_ERR(page)) {
+ if (unlikely(IS_ERR(page))) {
+ gfp_t reclaim;
+
/* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and
* go down in flames if truly OOM.
+ *
+ * However, since graphics tend to be disposable,
+ * defer the oom here by reporting the ENOMEM back
+ * to userspace.
*/
- page = shmem_read_mapping_page(mapping, i);
+ reclaim = mapping_gfp_mask(mapping);
+ reclaim |= __GFP_NORETRY; /* reclaim, but no oom */
+
+ page = shmem_read_mapping_page_gfp(mapping, i, reclaim);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err_sg;
@@ -2989,10 +2998,15 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
+ /* Retire completed requests first so the list of inflight/incomplete
+ * requests is accurate and we don't try and mark successful requests
+ * as in error during __i915_gem_set_wedged_BKL().
+ */
+ i915_gem_retire_requests(dev_priv);
+
stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
i915_gem_context_lost(dev_priv);
- i915_gem_retire_requests(dev_priv);
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
@@ -3098,9 +3112,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Wait for last execlists context complete, but bail out in case a
* new request is submitted.
*/
- wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
- intel_engines_are_idle(dev_priv),
- 10);
+ wait_for(intel_engines_are_idle(dev_priv), 10);
if (READ_ONCE(dev_priv->gt.active_requests))
return;
@@ -3259,6 +3271,29 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
return 0;
}
+static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
+{
+ return wait_for(intel_engine_is_idle(engine), timeout_ms);
+}
+
+static int wait_for_engines(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id) {
+ if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
+ i915_gem_set_wedged(i915);
+ return -EIO;
+ }
+
+ GEM_BUG_ON(intel_engine_get_seqno(engine) !=
+ intel_engine_last_submit(engine));
+ }
+
+ return 0;
+}
+
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
int ret;
@@ -3273,13 +3308,16 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
if (ret)
return ret;
}
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
+
+ ret = wait_for_engines(i915);
} else {
ret = wait_for_timeline(&i915->gt.global_timeline, flags);
- if (ret)
- return ret;
}
- return 0;
+ return ret;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3307,8 +3345,14 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
* system agents we cannot reproduce this behaviour).
*/
wmb();
- if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
- POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+ if (intel_runtime_pm_get_if_in_use(dev_priv)) {
+ spin_lock_irq(&dev_priv->uncore.lock);
+ POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_runtime_pm_put(dev_priv);
+ }
+ }
intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
@@ -4408,13 +4452,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
if (ret)
goto err_unlock;
- i915_gem_retire_requests(dev_priv);
- GEM_BUG_ON(dev_priv->gt.active_requests);
-
assert_kernel_context_is_current(dev_priv);
i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
+ intel_guc_suspend(dev_priv);
+
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
cancel_delayed_work_sync(&dev_priv->gt.retire_work);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index d925fb582ba7..ffd01e02fe94 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -168,7 +168,7 @@ void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
i915_sw_fence_await_reservation(&clflush->wait,
obj->resv, NULL,
- false, I915_FENCE_TIMEOUT,
+ true, I915_FENCE_TIMEOUT,
GFP_KERNEL);
reservation_object_lock(obj->resv, NULL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 486051ed681d..8bd0c4966913 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -576,25 +576,25 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
}
static inline int
-mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
+mi_set_context(struct drm_i915_gem_request *req, u32 flags)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
- u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
- /* Use an extended w/a on ivb+ if signalling from other rings */
- i915.semaphores ?
+ /* Use an extended w/a on gen7 if signalling from other rings */
+ (i915.semaphores && INTEL_GEN(dev_priv) == 7) ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
int len;
+ u32 *cs;
- /* These flags are for resource streamer on HSW+ */
+ flags |= MI_MM_SPACE_GTT;
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
- flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
- else if (INTEL_GEN(dev_priv) < 8)
- flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
-
+ /* These flags are for resource streamer on HSW+ */
+ flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
+ else
+ flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
len = 4;
if (INTEL_GEN(dev_priv) >= 7)
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 11898cd97596..f225bf680b6d 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -200,10 +200,10 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
- .kmap = i915_gem_dmabuf_kmap,
- .kmap_atomic = i915_gem_dmabuf_kmap_atomic,
- .kunmap = i915_gem_dmabuf_kunmap,
- .kunmap_atomic = i915_gem_dmabuf_kunmap_atomic,
+ .map = i915_gem_dmabuf_kmap,
+ .map_atomic = i915_gem_dmabuf_kmap_atomic,
+ .unmap = i915_gem_dmabuf_kunmap,
+ .unmap_atomic = i915_gem_dmabuf_kunmap_atomic,
.mmap = i915_gem_dmabuf_mmap,
.vmap = i915_gem_dmabuf_vmap,
.vunmap = i915_gem_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 2da3a94fc9f3..51e365f70464 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -196,7 +196,6 @@ search_again:
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
goto search_again;
found:
@@ -383,7 +382,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
if (ret)
return ret;
- i915_gem_retire_requests(dev_priv);
WARN_ON(!list_empty(&vm->active_list));
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dd7181ed5eca..a3e59c8ef27b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -890,6 +890,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
struct list_head ordered_vmas;
struct list_head pinned_vmas;
bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
+ bool needs_unfenced_map = INTEL_INFO(engine->i915)->unfenced_needs_alignment;
int retry;
vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
@@ -910,7 +911,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
if (!has_fenced_gpu_access)
entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ (entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
+ needs_unfenced_map) &&
i915_gem_object_is_tiled(obj);
need_mappable = need_fence || need_reloc_mappable(vma);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index cee9c4fec52a..8bab4aea63e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2364,7 +2364,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps)) {
- if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
+ if (i915_gem_wait_for_idle(dev_priv, 0)) {
DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0e8d1010cecb..5ddbc9499775 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -37,6 +37,17 @@ static const char *i915_fence_get_driver_name(struct dma_fence *fence)
static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
{
+ /* The timeline struct (as part of the ppgtt underneath a context)
+ * may be freed when the request is no longer in use by the GPU.
+ * We could extend the life of a context to beyond that of all
+ * fences, possibly keeping the hw resource around indefinitely,
+ * or we just give them a false name. Since
+ * dma_fence_ops.get_timeline_name is a debug feature, the occasional
+ * lie seems justifiable.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return "signaled";
+
return to_request(fence)->timeline->common->name;
}
@@ -180,7 +191,6 @@ i915_priotree_init(struct i915_priotree *pt)
static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
{
- struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
struct intel_engine_cs *engine;
enum intel_engine_id id;
int ret;
@@ -192,15 +202,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
if (ret)
return ret;
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests > 1);
-
/* If the seqno wraps around, we need to clear the breadcrumb rbtree */
for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- if (wait_for(intel_engine_is_idle(engine), 50))
- return -EBUSY;
+ struct i915_gem_timeline *timeline;
+ struct intel_timeline *tl = engine->timeline;
if (!i915_seqno_passed(seqno, tl->seqno)) {
/* spin until threads are complete */
@@ -211,14 +216,10 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
/* Finally reset hw state */
tl->seqno = seqno;
intel_engine_init_global_seqno(engine, seqno);
- }
-
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
- for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
- memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
- }
+ list_for_each_entry(timeline, &i915->gt.timelines, link)
+ memset(timeline->engine[id].sync_seqno, 0,
+ sizeof(timeline->engine[id].sync_seqno));
}
return 0;
@@ -295,7 +296,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
* completion order.
*/
list_del(&request->ring_link);
- request->ring->last_retired_head = request->postfix;
+ request->ring->head = request->postfix;
if (!--request->i915->gt.active_requests) {
GEM_BUG_ON(!request->i915->gt.awake);
mod_delayed_work(request->i915->wq,
@@ -651,6 +652,9 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
GEM_BUG_ON(to == from);
+ if (i915_gem_request_completed(from))
+ return 0;
+
if (to->engine->schedule) {
ret = i915_priotree_add_dependency(to->i915,
&to->priotree,
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 2978acdd995e..129ed303a6c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,6 +53,17 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
BUG();
}
+static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock)
+{
+ if (!unlock)
+ return;
+
+ mutex_unlock(&dev->struct_mutex);
+
+ /* expedite the RCU grace period to free some request slabs */
+ synchronize_rcu_expedited();
+}
+
static bool any_vma_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -232,11 +243,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
intel_runtime_pm_put(dev_priv);
i915_gem_retire_requests(dev_priv);
- if (unlock)
- mutex_unlock(&dev_priv->drm.struct_mutex);
- /* expedite the RCU grace period to free some request slabs */
- synchronize_rcu_expedited();
+ i915_gem_shrinker_unlock(&dev_priv->drm, unlock);
return count;
}
@@ -296,8 +304,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
count += obj->base.size >> PAGE_SHIFT;
}
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_shrinker_unlock(dev, unlock);
return count;
}
@@ -324,8 +331,8 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
sc->nr_to_scan - freed,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND);
- if (unlock)
- mutex_unlock(&dev->struct_mutex);
+
+ i915_gem_shrinker_unlock(dev, unlock);
return freed;
}
@@ -367,8 +374,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
struct shrinker_lock_uninterruptible *slu)
{
dev_priv->mm.interruptible = slu->was_interruptible;
- if (slu->unlock)
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ i915_gem_shrinker_unlock(&dev_priv->drm, slu->unlock);
}
static int
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 832ac9e45801..1642fff9cf13 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -30,16 +30,25 @@
/**
* DOC: GuC-based command submission
*
- * i915_guc_client:
- * We use the term client to avoid confusion with contexts. A i915_guc_client is
- * equivalent to GuC object guc_context_desc. This context descriptor is
- * allocated from a pool of 1024 entries. Kernel driver will allocate doorbell
- * and workqueue for it. Also the process descriptor (guc_process_desc), which
- * is mapped to client space. So the client can write Work Item then ring the
- * doorbell.
+ * GuC client:
+ * A i915_guc_client refers to a submission path through GuC. Currently, there
+ * is only one of these (the execbuf_client) and this one is charged with all
+ * submissions to the GuC. This struct is the owner of a doorbell, a process
+ * descriptor and a workqueue (all of them inside a single gem object that
+ * contains all required pages for these elements).
*
- * To simplify the implementation, we allocate one gem object that contains all
- * pages for doorbell, process descriptor and workqueue.
+ * GuC stage descriptor:
+ * During initialization, the driver allocates a static pool of 1024 such
+ * descriptors, and shares them with the GuC.
+ * Currently, there exists a 1:1 mapping between a i915_guc_client and a
+ * guc_stage_desc (via the client's stage_id), so effectively only one
+ * gets used. This stage descriptor lets the GuC know about the doorbell,
+ * workqueue and process descriptor. Theoretically, it also lets the GuC
+ * know about our HW contexts (context ID, etc...), but we actually
+ * employ a kind of submission where the GuC uses the LRCA sent via the work
+ * item instead (the single guc_stage_desc associated to execbuf client
+ * contains information about the default kernel context only, but this is
+ * essentially unused). This is called a "proxy" submission.
*
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
@@ -62,34 +71,91 @@
* ELSP context descriptor dword into Work Item.
* See guc_wq_item_append()
*
+ * ADS:
+ * The Additional Data Struct (ADS) has pointers for different buffers used by
+ * the GuC. One single gem object contains the ADS struct itself (guc_ads), the
+ * scheduling policies (guc_policies), a structure describing a collection of
+ * register sets (guc_mmio_reg_state) and some extra pages for the GuC to save
+ * its internal state for sleep.
+ *
*/
+static inline bool is_high_priority(struct i915_guc_client* client)
+{
+ return client->priority <= GUC_CLIENT_PRIORITY_HIGH;
+}
+
+static int __reserve_doorbell(struct i915_guc_client *client)
+{
+ unsigned long offset;
+ unsigned long end;
+ u16 id;
+
+ GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
+
+ /*
+ * The bitmap tracks which doorbell registers are currently in use.
+ * It is split into two halves; the first half is used for normal
+ * priority contexts, the second half for high-priority ones.
+ */
+ offset = 0;
+ end = GUC_NUM_DOORBELLS/2;
+ if (is_high_priority(client)) {
+ offset = end;
+ end += offset;
+ }
+
+ id = find_next_zero_bit(client->guc->doorbell_bitmap, offset, end);
+ if (id == end)
+ return -ENOSPC;
+
+ __set_bit(id, client->guc->doorbell_bitmap);
+ client->doorbell_id = id;
+ DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
+ client->stage_id, yesno(is_high_priority(client)),
+ id);
+ return 0;
+}
+
+static void __unreserve_doorbell(struct i915_guc_client *client)
+{
+ GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
+
+ __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+ client->doorbell_id = GUC_DOORBELL_INVALID;
+}
+
/*
* Tell the GuC to allocate or deallocate a specific doorbell
*/
-static int guc_allocate_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
{
u32 action[] = {
INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
- client->ctx_index
+ stage_id
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-static int guc_release_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
{
u32 action[] = {
INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
- client->ctx_index
+ stage_id
};
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+static struct guc_stage_desc *__get_stage_desc(struct i915_guc_client *client)
+{
+ struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
+
+ return &base[client->stage_id];
+}
+
/*
* Initialise, update, or clear doorbell data shared with the GuC
*
@@ -97,107 +163,129 @@ static int guc_release_doorbell(struct intel_guc *guc,
* client object which contains the page being used for the doorbell
*/
-static int guc_update_doorbell_id(struct intel_guc *guc,
- struct i915_guc_client *client,
- u16 new_id)
+static void __update_doorbell_desc(struct i915_guc_client *client, u16 new_id)
{
- struct sg_table *sg = guc->ctx_pool_vma->pages;
- void *doorbell_bitmap = guc->doorbell_bitmap;
- struct guc_doorbell_info *doorbell;
- struct guc_context_desc desc;
- size_t len;
+ struct guc_stage_desc *desc;
- doorbell = client->vaddr + client->doorbell_offset;
+ /* Update the GuC's idea of the doorbell ID */
+ desc = __get_stage_desc(client);
+ desc->db_id = new_id;
+}
- if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
- test_bit(client->doorbell_id, doorbell_bitmap)) {
- /* Deactivate the old doorbell */
- doorbell->db_status = GUC_DOORBELL_DISABLED;
- (void)guc_release_doorbell(guc, client);
- __clear_bit(client->doorbell_id, doorbell_bitmap);
- }
+static struct guc_doorbell_info *__get_doorbell(struct i915_guc_client *client)
+{
+ return client->vaddr + client->doorbell_offset;
+}
- /* Update the GuC's idea of the doorbell ID */
- len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
- if (len != sizeof(desc))
- return -EFAULT;
- desc.db_id = new_id;
- len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
- if (len != sizeof(desc))
- return -EFAULT;
-
- client->doorbell_id = new_id;
- if (new_id == GUC_INVALID_DOORBELL_ID)
- return 0;
+static bool has_doorbell(struct i915_guc_client *client)
+{
+ if (client->doorbell_id == GUC_DOORBELL_INVALID)
+ return false;
- /* Activate the new doorbell */
- __set_bit(new_id, doorbell_bitmap);
+ return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
+}
+
+static int __create_doorbell(struct i915_guc_client *client)
+{
+ struct guc_doorbell_info *doorbell;
+ int err;
+
+ doorbell = __get_doorbell(client);
doorbell->db_status = GUC_DOORBELL_ENABLED;
doorbell->cookie = client->doorbell_cookie;
- return guc_allocate_doorbell(guc, client);
+
+ err = __guc_allocate_doorbell(client->guc, client->stage_id);
+ if (err) {
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ doorbell->cookie = 0;
+ }
+ return err;
}
-static void guc_disable_doorbell(struct intel_guc *guc,
- struct i915_guc_client *client)
+static int __destroy_doorbell(struct i915_guc_client *client)
{
- (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
+ struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
+ struct guc_doorbell_info *doorbell;
+ u16 db_id = client->doorbell_id;
- /* XXX: wait for any interrupts */
- /* XXX: wait for workqueue to drain */
+ GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
+
+ doorbell = __get_doorbell(client);
+ doorbell->db_status = GUC_DOORBELL_DISABLED;
+ doorbell->cookie = 0;
+
+ /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
+ * to go to zero after updating db_status before we call the GuC to
+ * release the doorbell */
+ if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
+ WARN_ONCE(true, "Doorbell never became invalid after disable\n");
+
+ return __guc_deallocate_doorbell(client->guc, client->stage_id);
}
-static uint16_t
-select_doorbell_register(struct intel_guc *guc, uint32_t priority)
+static int create_doorbell(struct i915_guc_client *client)
{
- /*
- * The bitmap tracks which doorbell registers are currently in use.
- * It is split into two halves; the first half is used for normal
- * priority contexts, the second half for high-priority ones.
- * Note that logically higher priorities are numerically less than
- * normal ones, so the test below means "is it high-priority?"
- */
- const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
- const uint16_t half = GUC_MAX_DOORBELLS / 2;
- const uint16_t start = hi_pri ? half : 0;
- const uint16_t end = start + half;
- uint16_t id;
+ int ret;
- id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
- if (id == end)
- id = GUC_INVALID_DOORBELL_ID;
+ ret = __reserve_doorbell(client);
+ if (ret)
+ return ret;
- DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
- hi_pri ? "high" : "normal", id);
+ __update_doorbell_desc(client, client->doorbell_id);
+
+ ret = __create_doorbell(client);
+ if (ret)
+ goto err;
+
+ return 0;
- return id;
+err:
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+ __unreserve_doorbell(client);
+ return ret;
}
-/*
- * Select, assign and relase doorbell cachelines
- *
- * These functions track which doorbell cachelines are in use.
- * The data they manipulate is protected by the intel_guc_send lock.
- */
+static int destroy_doorbell(struct i915_guc_client *client)
+{
+ int err;
+
+ GEM_BUG_ON(!has_doorbell(client));
+
+ /* XXX: wait for any interrupts */
+ /* XXX: wait for workqueue to drain */
+
+ err = __destroy_doorbell(client);
+ if (err)
+ return err;
+
+ __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
+
+ __unreserve_doorbell(client);
-static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
+ return 0;
+}
+
+static unsigned long __select_cacheline(struct intel_guc* guc)
{
- const uint32_t cacheline_size = cache_line_size();
- uint32_t offset;
+ unsigned long offset;
/* Doorbell uses a single cache line within a page */
offset = offset_in_page(guc->db_cacheline);
/* Moving to next cache line to reduce contention */
- guc->db_cacheline += cacheline_size;
-
- DRM_DEBUG_DRIVER("selected doorbell cacheline 0x%x, next 0x%x, linesize %u\n",
- offset, guc->db_cacheline, cacheline_size);
+ guc->db_cacheline += cache_line_size();
+ DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
+ offset, guc->db_cacheline, cache_line_size());
return offset;
}
+static inline struct guc_process_desc *
+__get_process_desc(struct i915_guc_client *client)
+{
+ return client->vaddr + client->proc_desc_offset;
+}
+
/*
* Initialise the process descriptor shared with the GuC firmware.
*/
@@ -206,9 +294,7 @@ static void guc_proc_desc_init(struct intel_guc *guc,
{
struct guc_process_desc *desc;
- desc = client->vaddr + client->proc_desc_offset;
-
- memset(desc, 0, sizeof(*desc));
+ desc = memset(__get_process_desc(client), 0, sizeof(*desc));
/*
* XXX: pDoorbell and WQVBaseAddress are pointers in process address
@@ -219,42 +305,41 @@ static void guc_proc_desc_init(struct intel_guc *guc,
desc->wq_base_addr = 0;
desc->db_base_addr = 0;
- desc->context_id = client->ctx_index;
+ desc->stage_id = client->stage_id;
desc->wq_size_bytes = client->wq_size;
desc->wq_status = WQ_STATUS_ACTIVE;
desc->priority = client->priority;
}
/*
- * Initialise/clear the context descriptor shared with the GuC firmware.
+ * Initialise/clear the stage descriptor shared with the GuC firmware.
*
* This descriptor tells the GuC where (in GGTT space) to find the important
* data structures relating to this client (doorbell, process descriptor,
* write queue, etc).
*/
-
-static void guc_ctx_desc_init(struct intel_guc *guc,
- struct i915_guc_client *client)
+static void guc_stage_desc_init(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct intel_engine_cs *engine;
struct i915_gem_context *ctx = client->owner;
- struct guc_context_desc desc;
- struct sg_table *sg;
+ struct guc_stage_desc *desc;
unsigned int tmp;
u32 gfx_addr;
- memset(&desc, 0, sizeof(desc));
+ desc = __get_stage_desc(client);
+ memset(desc, 0, sizeof(*desc));
- desc.attribute = GUC_CTX_DESC_ATTR_ACTIVE | GUC_CTX_DESC_ATTR_KERNEL;
- desc.context_id = client->ctx_index;
- desc.priority = client->priority;
- desc.db_id = client->doorbell_id;
+ desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE | GUC_STAGE_DESC_ATTR_KERNEL;
+ desc->stage_id = client->stage_id;
+ desc->priority = client->priority;
+ desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
struct intel_context *ce = &ctx->engine[engine->id];
uint32_t guc_engine_id = engine->guc_id;
- struct guc_execlist_context *lrc = &desc.lrc[guc_engine_id];
+ struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
/* TODO: We have a design issue to be solved here. Only when we
* receive the first batch, we know which engine is used by the
@@ -266,12 +351,22 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
if (!ce->state)
break; /* XXX: continue? */
+ /*
+ * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
+ * submission or, in other words, not using a direct submission
+ * model) the KMD's LRCA is not used for any work submission.
+ * Instead, the GuC uses the LRCA of the user mode context (see
+ * guc_wq_item_append below).
+ */
lrc->context_desc = lower_32_bits(ce->lrc_desc);
/* The state page is after PPHWSP */
- lrc->ring_lcra =
+ lrc->ring_lrca =
guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
- lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
+
+ /* XXX: In direct submission, the GuC wants the HW context id
+ * here. In proxy submission, it wants the stage id */
+ lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
(guc_engine_id << GUC_ELC_ENGINE_OFFSET);
lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
@@ -279,50 +374,36 @@ static void guc_ctx_desc_init(struct intel_guc *guc,
lrc->ring_next_free_location = lrc->ring_begin;
lrc->ring_current_tail_pointer_value = 0;
- desc.engines_used |= (1 << guc_engine_id);
+ desc->engines_used |= (1 << guc_engine_id);
}
DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
- client->engines, desc.engines_used);
- WARN_ON(desc.engines_used == 0);
+ client->engines, desc->engines_used);
+ WARN_ON(desc->engines_used == 0);
/*
* The doorbell, process descriptor, and workqueue are all parts
* of the client object, which the GuC will reference via the GGTT
*/
gfx_addr = guc_ggtt_offset(client->vma);
- desc.db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
+ desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
client->doorbell_offset;
- desc.db_trigger_cpu =
- (uintptr_t)client->vaddr + client->doorbell_offset;
- desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
- desc.process_desc = gfx_addr + client->proc_desc_offset;
- desc.wq_addr = gfx_addr + client->wq_offset;
- desc.wq_size = client->wq_size;
-
- /*
- * XXX: Take LRCs from an existing context if this is not an
- * IsKMDCreatedContext client
- */
- desc.desc_private = (uintptr_t)client;
+ desc->db_trigger_cpu = (uintptr_t)__get_doorbell(client);
+ desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
+ desc->process_desc = gfx_addr + client->proc_desc_offset;
+ desc->wq_addr = gfx_addr + client->wq_offset;
+ desc->wq_size = client->wq_size;
- /* Pool context is pinned already */
- sg = guc->ctx_pool_vma->pages;
- sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
+ desc->desc_private = (uintptr_t)client;
}
-static void guc_ctx_desc_fini(struct intel_guc *guc,
- struct i915_guc_client *client)
+static void guc_stage_desc_fini(struct intel_guc *guc,
+ struct i915_guc_client *client)
{
- struct guc_context_desc desc;
- struct sg_table *sg;
-
- memset(&desc, 0, sizeof(desc));
+ struct guc_stage_desc *desc;
- sg = guc->ctx_pool_vma->pages;
- sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
- sizeof(desc) * client->ctx_index);
+ desc = __get_stage_desc(client);
+ memset(desc, 0, sizeof(*desc));
}
/**
@@ -345,8 +426,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
{
const size_t wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
- struct guc_process_desc *desc = client->vaddr +
- client->proc_desc_offset;
+ struct guc_process_desc *desc = __get_process_desc(client);
u32 freespace;
int ret;
@@ -391,19 +471,17 @@ static void guc_wq_item_append(struct i915_guc_client *client,
const size_t wqi_size = sizeof(struct guc_wq_item);
const u32 wqi_len = wqi_size/sizeof(u32) - 1;
struct intel_engine_cs *engine = rq->engine;
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = __get_process_desc(client);
struct guc_wq_item *wqi;
u32 freespace, tail, wq_off;
- desc = client->vaddr + client->proc_desc_offset;
-
/* Free space is guaranteed, see i915_guc_wq_reserve() above */
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
GEM_BUG_ON(freespace < wqi_size);
/* The GuC firmware wants the tail index in QWords, not bytes */
tail = rq->tail;
- GEM_BUG_ON(tail & 7);
+ assert_ring_tail_valid(rq->ring, rq->tail);
tail >>= 3;
GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
@@ -436,19 +514,27 @@ static void guc_wq_item_append(struct i915_guc_client *client,
/* The GuC wants only the low-order word of the context descriptor */
wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, engine);
- wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
+ wqi->submit_element_info = tail << WQ_RING_TAIL_SHIFT;
wqi->fence_id = rq->global_seqno;
}
+static void guc_reset_wq(struct i915_guc_client *client)
+{
+ struct guc_process_desc *desc = __get_process_desc(client);
+
+ desc->head = 0;
+ desc->tail = 0;
+
+ client->wq_tail = 0;
+}
+
static int guc_ring_doorbell(struct i915_guc_client *client)
{
- struct guc_process_desc *desc;
+ struct guc_process_desc *desc = __get_process_desc(client);
union guc_doorbell_qw db_cmp, db_exc, db_ret;
union guc_doorbell_qw *db;
int attempt = 2, ret = -EAGAIN;
- desc = client->vaddr + client->proc_desc_offset;
-
/* Update the tail so it is visible to GuC */
desc->tail = client->wq_tail;
@@ -463,7 +549,7 @@ static int guc_ring_doorbell(struct i915_guc_client *client)
db_exc.cookie = 1;
/* pointer of current doorbell cacheline */
- db = client->vaddr + client->doorbell_offset;
+ db = (union guc_doorbell_qw *)__get_doorbell(client);
while (attempt--) {
/* lets ring the doorbell */
@@ -573,23 +659,10 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
{
struct execlist_port *port = engine->execlist_port;
struct drm_i915_gem_request *last = port[0].request;
- unsigned long flags;
struct rb_node *rb;
bool submit = false;
- /* After execlist_first is updated, the tasklet will be rescheduled.
- *
- * If we are currently running (inside the tasklet) and a third
- * party queues a request and so updates engine->execlist_first under
- * the spinlock (which we have elided), it will atomically set the
- * TASKLET_SCHED flag causing the us to be re-executed and pick up
- * the change in state (the update to TASKLET_SCHED incurs a memory
- * barrier making this cross-cpu checking safe).
- */
- if (!READ_ONCE(engine->execlist_first))
- return false;
-
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *rq =
@@ -609,8 +682,8 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
RB_CLEAR_NODE(&rq->priotree.node);
rq->priotree.priority = INT_MAX;
- trace_i915_gem_request_in(rq, port - engine->execlist_port);
i915_guc_submit(rq);
+ trace_i915_gem_request_in(rq, port - engine->execlist_port);
last = rq;
submit = true;
}
@@ -619,7 +692,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
nested_enable_signaling(last);
engine->execlist_first = rb;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irq(&engine->timeline->lock);
return submit;
}
@@ -695,93 +768,100 @@ err:
return vma;
}
-static void
-guc_client_free(struct drm_i915_private *dev_priv,
- struct i915_guc_client *client)
+/* Check that a doorbell register is in the expected state */
+static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
{
- struct intel_guc *guc = &dev_priv->guc;
-
- if (!client)
- return;
-
- /*
- * XXX: wait for any outstanding submissions before freeing memory.
- * Be sure to drop any locks
- */
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ u32 drbregl;
+ bool valid;
- if (client->vaddr) {
- /*
- * If we got as far as setting up a doorbell, make sure we
- * shut it down before unmapping & deallocating the memory.
- */
- guc_disable_doorbell(guc, client);
+ GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
- i915_gem_object_unpin_map(client->vma->obj);
- }
+ drbregl = I915_READ(GEN8_DRBREGL(db_id));
+ valid = drbregl & GEN8_DRB_VALID;
- i915_vma_unpin_and_release(&client->vma);
+ if (test_bit(db_id, guc->doorbell_bitmap) == valid)
+ return true;
- if (client->ctx_index != GUC_INVALID_CTX_ID) {
- guc_ctx_desc_fini(guc, client);
- ida_simple_remove(&guc->ctx_ids, client->ctx_index);
- }
+ DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
+ db_id, drbregl, yesno(valid));
- kfree(client);
+ return false;
}
-/* Check that a doorbell register is in the expected state */
-static bool guc_doorbell_check(struct intel_guc *guc, uint16_t db_id)
+/*
+ * If the GuC thinks that the doorbell is unassigned (e.g. because we reset and
+ * reloaded the GuC FW) we can use this function to tell the GuC to reassign the
+ * doorbell to the rightful owner.
+ */
+static int __reset_doorbell(struct i915_guc_client* client, u16 db_id)
{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- i915_reg_t drbreg = GEN8_DRBREGL(db_id);
- uint32_t value = I915_READ(drbreg);
- bool enabled = (value & GUC_DOORBELL_ENABLED) != 0;
- bool expected = test_bit(db_id, guc->doorbell_bitmap);
-
- if (enabled == expected)
- return true;
+ int err;
- DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) 0x%x, should be %s\n",
- db_id, drbreg.reg, value,
- expected ? "active" : "inactive");
+ __update_doorbell_desc(client, db_id);
+ err = __create_doorbell(client);
+ if (!err)
+ err = __destroy_doorbell(client);
- return false;
+ return err;
}
/*
- * Borrow the first client to set up & tear down each unused doorbell
- * in turn, to ensure that all doorbell h/w is (re)initialised.
+ * Set up & tear down each unused doorbell in turn, to ensure that all doorbell
+ * HW is (re)initialised. For that end, we might have to borrow the first
+ * client. Also, tell GuC about all the doorbells in use by all clients.
+ * We do this because the KMD, the GuC and the doorbell HW can easily go out of
+ * sync (e.g. we can reset the GuC, but not the doorbel HW).
*/
-static void guc_init_doorbell_hw(struct intel_guc *guc)
+static int guc_init_doorbell_hw(struct intel_guc *guc)
{
struct i915_guc_client *client = guc->execbuf_client;
- uint16_t db_id;
- int i, err;
-
- guc_disable_doorbell(guc, client);
+ bool recreate_first_client = false;
+ u16 db_id;
+ int ret;
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
- /* Skip if doorbell is OK */
- if (guc_doorbell_check(guc, i))
+ /* For unused doorbells, make sure they are disabled */
+ for_each_clear_bit(db_id, guc->doorbell_bitmap, GUC_NUM_DOORBELLS) {
+ if (doorbell_ok(guc, db_id))
continue;
- err = guc_update_doorbell_id(guc, client, i);
- if (err)
- DRM_DEBUG_DRIVER("Doorbell %d update failed, err %d\n",
- i, err);
+ if (has_doorbell(client)) {
+ /* Borrow execbuf_client (we will recreate it later) */
+ destroy_doorbell(client);
+ recreate_first_client = true;
+ }
+
+ ret = __reset_doorbell(client, db_id);
+ WARN(ret, "Doorbell %u reset failed, err %d\n", db_id, ret);
}
- db_id = select_doorbell_register(guc, client->priority);
- WARN_ON(db_id == GUC_INVALID_DOORBELL_ID);
+ if (recreate_first_client) {
+ ret = __reserve_doorbell(client);
+ if (unlikely(ret)) {
+ DRM_ERROR("Couldn't re-reserve first client db: %d\n", ret);
+ return ret;
+ }
+
+ __update_doorbell_desc(client, client->doorbell_id);
+ }
- err = guc_update_doorbell_id(guc, client, db_id);
- if (err)
- DRM_WARN("Failed to restore doorbell to %d, err %d\n",
- db_id, err);
+ /* Now for every client (and not only execbuf_client) make sure their
+ * doorbells are known by the GuC */
+ //for (client = client_list; client != NULL; client = client->next)
+ {
+ ret = __create_doorbell(client);
+ if (ret) {
+ DRM_ERROR("Couldn't recreate client %u doorbell: %d\n",
+ client->stage_id, ret);
+ return ret;
+ }
+ }
- /* Read back & verify all doorbell registers */
- for (i = 0; i < GUC_MAX_DOORBELLS; ++i)
- (void)guc_doorbell_check(guc, i);
+ /* Read back & verify all (used & unused) doorbell registers */
+ for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
+ WARN_ON(!doorbell_ok(guc, db_id));
+
+ return 0;
}
/**
@@ -807,49 +887,46 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
void *vaddr;
- uint16_t db_id;
+ int ret;
client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- client->owner = ctx;
client->guc = guc;
+ client->owner = ctx;
client->engines = engines;
client->priority = priority;
- client->doorbell_id = GUC_INVALID_DOORBELL_ID;
+ client->doorbell_id = GUC_DOORBELL_INVALID;
+ client->wq_offset = GUC_DB_SIZE;
+ client->wq_size = GUC_WQ_SIZE;
+ spin_lock_init(&client->wq_lock);
- client->ctx_index = (uint32_t)ida_simple_get(&guc->ctx_ids, 0,
- GUC_MAX_GPU_CONTEXTS, GFP_KERNEL);
- if (client->ctx_index >= GUC_MAX_GPU_CONTEXTS) {
- client->ctx_index = GUC_INVALID_CTX_ID;
- goto err;
- }
+ ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err_client;
+
+ client->stage_id = ret;
/* The first page is doorbell/proc_desc. Two followed pages are wq. */
vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
- if (IS_ERR(vma))
- goto err;
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_id;
+ }
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
client->vma = vma;
vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
- if (IS_ERR(vaddr))
- goto err;
-
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_vma;
+ }
client->vaddr = vaddr;
- spin_lock_init(&client->wq_lock);
- client->wq_offset = GUC_DB_SIZE;
- client->wq_size = GUC_WQ_SIZE;
-
- db_id = select_doorbell_register(guc, client->priority);
- if (db_id == GUC_INVALID_DOORBELL_ID)
- /* XXX: evict a doorbell instead? */
- goto err;
-
- client->doorbell_offset = select_doorbell_cacheline(guc);
+ client->doorbell_offset = __select_cacheline(guc);
/*
* Since the doorbell only requires a single cacheline, we can save
@@ -862,28 +939,47 @@ guc_client_alloc(struct drm_i915_private *dev_priv,
client->proc_desc_offset = (GUC_DB_SIZE / 2);
guc_proc_desc_init(guc, client);
- guc_ctx_desc_init(guc, client);
+ guc_stage_desc_init(guc, client);
- /* For runtime client allocation we need to enable the doorbell. Not
- * required yet for the static execbuf_client as this special kernel
- * client is enabled from i915_guc_submission_enable().
- *
- * guc_update_doorbell_id(guc, client, db_id);
- */
+ ret = create_doorbell(client);
+ if (ret)
+ goto err_vaddr;
- DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: ctx_index %u\n",
- priority, client, client->engines, client->ctx_index);
- DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
- client->doorbell_id, client->doorbell_offset);
+ DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
+ priority, client, client->engines, client->stage_id);
+ DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
+ client->doorbell_id, client->doorbell_offset);
return client;
-err:
- guc_client_free(dev_priv, client);
- return NULL;
+err_vaddr:
+ i915_gem_object_unpin_map(client->vma->obj);
+err_vma:
+ i915_vma_unpin_and_release(&client->vma);
+err_id:
+ ida_simple_remove(&guc->stage_ids, client->stage_id);
+err_client:
+ kfree(client);
+ return ERR_PTR(ret);
}
+static void guc_client_free(struct i915_guc_client *client)
+{
+ /*
+ * XXX: wait for any outstanding submissions before freeing memory.
+ * Be sure to drop any locks
+ */
+ /* FIXME: in many cases, by the time we get here the GuC has been
+ * reset, so we cannot destroy the doorbell properly. Ignore the
+ * error message for now */
+ destroy_doorbell(client);
+ guc_stage_desc_fini(client->guc, client);
+ i915_gem_object_unpin_map(client->vma->obj);
+ i915_vma_unpin_and_release(&client->vma);
+ ida_simple_remove(&client->guc->stage_ids, client->stage_id);
+ kfree(client);
+}
static void guc_policies_init(struct guc_policies *policies)
{
@@ -893,7 +989,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->dpc_promote_time = 500000;
policies->max_num_work_items = POLICY_MAX_NUM_WI;
- for (p = 0; p < GUC_CTX_PRIORITY_NUM; p++) {
+ for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
for (i = GUC_RENDER_ENGINE; i < GUC_MAX_ENGINES_NUM; i++) {
policy = &policies->policy[p][i];
@@ -907,7 +1003,7 @@ static void guc_policies_init(struct guc_policies *policies)
policies->is_valid = 1;
}
-static void guc_addon_create(struct intel_guc *guc)
+static int guc_ads_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma;
@@ -923,14 +1019,13 @@ static void guc_addon_create(struct intel_guc *guc)
enum intel_engine_id id;
u32 base;
- vma = guc->ads_vma;
- if (!vma) {
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
- if (IS_ERR(vma))
- return;
+ GEM_BUG_ON(guc->ads_vma);
- guc->ads_vma = vma;
- }
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ guc->ads_vma = vma;
page = i915_vma_first_page(vma);
blob = kmap(page);
@@ -940,11 +1035,11 @@ static void guc_addon_create(struct intel_guc *guc)
/* MMIO reg state */
for_each_engine(engine, dev_priv, id) {
- blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
+ blob->reg_state.white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
+ blob->reg_state.white_list[engine->guc_id].count = 0;
}
/*
@@ -967,67 +1062,75 @@ static void guc_addon_create(struct intel_guc *guc)
blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page);
+
+ return 0;
+}
+
+static void guc_ads_destroy(struct intel_guc *guc)
+{
+ i915_vma_unpin_and_release(&guc->ads_vma);
}
/*
- * Set up the memory resources to be shared with the GuC. At this point,
- * we require just one object that can be mapped through the GGTT.
+ * Set up the memory resources to be shared with the GuC (via the GGTT)
+ * at firmware loading time.
*/
int i915_guc_submission_init(struct drm_i915_private *dev_priv)
{
- const size_t ctxsize = sizeof(struct guc_context_desc);
- const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
- const size_t gemsize = round_up(poolsize, PAGE_SIZE);
struct intel_guc *guc = &dev_priv->guc;
struct i915_vma *vma;
+ void *vaddr;
+ int ret;
- if (!HAS_GUC_SCHED(dev_priv))
+ if (guc->stage_desc_pool)
return 0;
- /* Wipe bitmap & delete client in case of reinitialisation */
- bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
- i915_guc_submission_disable(dev_priv);
-
- if (!i915.enable_guc_submission)
- return 0; /* not enabled */
-
- if (guc->ctx_pool_vma)
- return 0; /* already allocated */
-
- vma = intel_guc_allocate_vma(guc, gemsize);
+ vma = intel_guc_allocate_vma(guc,
+ PAGE_ALIGN(sizeof(struct guc_stage_desc) *
+ GUC_MAX_STAGE_DESCRIPTORS));
if (IS_ERR(vma))
return PTR_ERR(vma);
- guc->ctx_pool_vma = vma;
- ida_init(&guc->ctx_ids);
- intel_guc_log_create(guc);
- guc_addon_create(guc);
-
- guc->execbuf_client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
- GUC_CTX_PRIORITY_KMD_NORMAL,
- dev_priv->kernel_context);
- if (!guc->execbuf_client) {
- DRM_ERROR("Failed to create GuC client for execbuf!\n");
- goto err;
+ guc->stage_desc_pool = vma;
+
+ vaddr = i915_gem_object_pin_map(guc->stage_desc_pool->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto err_vma;
}
+ guc->stage_desc_pool_vaddr = vaddr;
+
+ ret = intel_guc_log_create(guc);
+ if (ret < 0)
+ goto err_vaddr;
+
+ ret = guc_ads_create(guc);
+ if (ret < 0)
+ goto err_log;
+
+ ida_init(&guc->stage_ids);
+
return 0;
-err:
- i915_guc_submission_fini(dev_priv);
- return -ENOMEM;
+err_log:
+ intel_guc_log_destroy(guc);
+err_vaddr:
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+err_vma:
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
+ return ret;
}
-static void guc_reset_wq(struct i915_guc_client *client)
+void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
{
- struct guc_process_desc *desc = client->vaddr +
- client->proc_desc_offset;
-
- desc->head = 0;
- desc->tail = 0;
+ struct intel_guc *guc = &dev_priv->guc;
- client->wq_tail = 0;
+ ida_destroy(&guc->stage_ids);
+ guc_ads_destroy(guc);
+ intel_guc_log_destroy(guc);
+ i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
+ i915_vma_unpin_and_release(&guc->stage_desc_pool);
}
static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
@@ -1072,20 +1175,60 @@ static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
}
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int irqs;
+
+ /*
+ * tell all command streamers NOT to forward interrupts or vblank
+ * to GuC.
+ */
+ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+ irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+ /* route all GT interrupts to the host */
+ I915_WRITE(GUC_BCS_RCS_IER, 0);
+ I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+ I915_WRITE(GUC_WD_VECS_IER, 0);
+
+ dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+}
+
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
struct i915_guc_client *client = guc->execbuf_client;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err;
+
+ if (!client) {
+ client = guc_client_alloc(dev_priv,
+ INTEL_INFO(dev_priv)->ring_mask,
+ GUC_CLIENT_PRIORITY_KMD_NORMAL,
+ dev_priv->kernel_context);
+ if (IS_ERR(client)) {
+ DRM_ERROR("Failed to create GuC client for execbuf!\n");
+ return PTR_ERR(client);
+ }
- if (!client)
- return -ENODEV;
+ guc->execbuf_client = client;
+ }
- intel_guc_sample_forcewake(guc);
+ err = intel_guc_sample_forcewake(guc);
+ if (err)
+ goto err_execbuf_client;
guc_reset_wq(client);
- guc_init_doorbell_hw(guc);
+
+ err = guc_init_doorbell_hw(guc);
+ if (err)
+ goto err_execbuf_client;
/* Take over from manual control of ELSP (execlists) */
guc_interrupts_capture(dev_priv);
@@ -1112,30 +1255,11 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
}
return 0;
-}
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
-
- /*
- * tell all command streamers NOT to forward interrupts or vblank
- * to GuC.
- */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
- /* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
-
- dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
- dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+err_execbuf_client:
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
+ return err;
}
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
@@ -1144,30 +1268,11 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
guc_interrupts_release(dev_priv);
- if (!guc->execbuf_client)
- return;
-
/* Revert back to manual ELSP submission */
intel_engines_reset_default_submission(dev_priv);
-}
-
-void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_guc *guc = &dev_priv->guc;
- struct i915_guc_client *client;
- client = fetch_and_zero(&guc->execbuf_client);
- if (!client)
- return;
-
- guc_client_free(dev_priv, client);
-
- i915_vma_unpin_and_release(&guc->ads_vma);
- i915_vma_unpin_and_release(&guc->log.vma);
-
- if (guc->ctx_pool_vma)
- ida_destroy(&guc->ctx_ids);
- i915_vma_unpin_and_release(&guc->ctx_pool_vma);
+ guc_client_free(guc->execbuf_client);
+ guc->execbuf_client = NULL;
}
/**
@@ -1196,7 +1301,6 @@ int intel_guc_suspend(struct drm_i915_private *dev_priv)
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-
/**
* intel_guc_resume() - notify GuC resuming from suspend state
* @dev_priv: i915 device private
@@ -1222,5 +1326,3 @@ int intel_guc_resume(struct drm_i915_private *dev_priv)
return intel_guc_send(guc, data, ARRAY_SIZE(data));
}
-
-
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 8163d5024ff8..fd97fe00cd0d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1742,8 +1742,8 @@ static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
I915_WRITE(SOFT_SCRATCH(15), msg & ~flush);
/* Handle flush interrupt in bottom half */
- queue_work(dev_priv->guc.log.flush_wq,
- &dev_priv->guc.log.flush_work);
+ queue_work(dev_priv->guc.log.runtime.flush_wq,
+ &dev_priv->guc.log.runtime.flush_work);
dev_priv->guc.log.flush_interrupt_count++;
} else {
@@ -4252,12 +4252,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev_priv->rps.pm_intrmsk_mbz = 0;
/*
- * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer
* if GEN6_PM_UP_EI_EXPIRED is masked.
*
* TODO: verify if this can be reproduced on VLV,CHV.
*/
- if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ if (INTEL_INFO(dev_priv)->gen <= 7)
dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_INFO(dev_priv)->gen >= 8)
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 732101ed57fb..f87b0c4e564d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -61,6 +61,7 @@
.has_overlay = 1, .overlay_needs_physical = 1, \
.has_gmch_display = 1, \
.hws_needs_physical = 1, \
+ .unfenced_needs_alignment = 1, \
.ring_mask = RENDER_RING, \
GEN_DEFAULT_PIPEOFFSETS, \
CURSOR_OFFSETS
@@ -102,6 +103,7 @@ static const struct intel_device_info intel_i915g_info = {
.platform = INTEL_I915G, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i915gm_info = {
@@ -113,6 +115,7 @@ static const struct intel_device_info intel_i915gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945g_info = {
@@ -121,6 +124,7 @@ static const struct intel_device_info intel_i945g_info = {
.has_hotplug = 1, .cursor_needs_physical = 1,
.has_overlay = 1, .overlay_needs_physical = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_i945gm_info = {
@@ -131,6 +135,7 @@ static const struct intel_device_info intel_i945gm_info = {
.supports_tv = 1,
.has_fbc = 1,
.hws_needs_physical = 1,
+ .unfenced_needs_alignment = 1,
};
static const struct intel_device_info intel_g33_info = {
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 8c121187ff39..060b171480d5 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1705,7 +1705,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
*/
if (WARN_ON(stream->sample_flags != props->sample_flags)) {
ret = -ENODEV;
- goto err_alloc;
+ goto err_flags;
}
list_add(&stream->link, &dev_priv->perf.streams);
@@ -1728,6 +1728,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv,
err_open:
list_del(&stream->link);
+err_flags:
if (stream->ops->destroy)
stream->ops->destroy(stream);
err_alloc:
@@ -1793,6 +1794,11 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
if (ret)
return ret;
+ if (id == 0 || id >= DRM_I915_PERF_PROP_MAX) {
+ DRM_DEBUG("Unknown i915 perf property ID\n");
+ return -EINVAL;
+ }
+
switch ((enum drm_i915_perf_property_id)id) {
case DRM_I915_PERF_PROP_CTX_HANDLE:
props->single_context = 1;
@@ -1862,9 +1868,8 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv,
props->oa_periodic = true;
props->oa_period_exponent = value;
break;
- default:
+ case DRM_I915_PERF_PROP_MAX:
MISSING_CASE(id);
- DRM_DEBUG("Unknown i915 perf property ID\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 04c8f69fcc62..11b12f412492 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7829,7 +7829,14 @@ enum {
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1<<8)
+#define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1<<7)
+#define TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ (1<<6)
#define TRANS_DDI_BFI_ENABLE (1<<4)
+#define TRANS_DDI_HIGH_TMDS_CHAR_RATE (1<<4)
+#define TRANS_DDI_HDMI_SCRAMBLING (1<<0)
+#define TRANS_DDI_HDMI_SCRAMBLING_MASK (TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE \
+ | TRANS_DDI_HDMI_SCRAMBLER_RESET_FREQ \
+ | TRANS_DDI_HDMI_SCRAMBLING)
/* DisplayPort Transport Control */
#define _DP_TP_CTL_A 0x64040
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 94a3a3299910..c5455d36b617 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,6 +25,24 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
+#undef WARN_ON
+/* Many gcc seem to no see through this and fall over :( */
+#if 0
+#define WARN_ON(x) ({ \
+ bool __i915_warn_cond = (x); \
+ if (__builtin_constant_p(__i915_warn_cond)) \
+ BUILD_BUG_ON(__i915_warn_cond); \
+ WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
+#else
+#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
+#endif
+
+#undef WARN_ON_ONCE
+#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")")
+
+#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
+ (long)(x), __func__)
+
#if GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index ba986edee312..9ccbf26124c6 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -47,11 +47,12 @@ static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned long flags;
unsigned int result;
- spin_lock_irq(&b->irq_lock);
+ spin_lock_irqsave(&b->irq_lock, flags);
result = __intel_breadcrumbs_wakeup(b);
- spin_unlock_irq(&b->irq_lock);
+ spin_unlock_irqrestore(&b->irq_lock, flags);
return result;
}
@@ -579,6 +580,8 @@ static int intel_breadcrumbs_signaler(void *arg)
signaler_set_rtpriority();
do {
+ bool do_schedule = true;
+
set_current_state(TASK_INTERRUPTIBLE);
/* We are either woken up by the interrupt bottom-half,
@@ -625,9 +628,23 @@ static int intel_breadcrumbs_signaler(void *arg)
spin_unlock_irq(&b->rb_lock);
i915_gem_request_put(request);
- } else {
+
+ /* If the engine is saturated we may be continually
+ * processing completed requests. This angers the
+ * NMI watchdog if we never let anything else
+ * have access to the CPU. Let's pretend to be nice
+ * and relinquish the CPU if we burn through the
+ * entire RT timeslice!
+ */
+ do_schedule = need_resched();
+ }
+
+ if (unlikely(do_schedule)) {
DEFINE_WAIT(exec);
+ if (kthread_should_park())
+ kthread_parkme();
+
if (kthread_should_stop()) {
GEM_BUG_ON(request);
break;
@@ -640,9 +657,6 @@ static int intel_breadcrumbs_signaler(void *arg)
if (request)
remove_wait_queue(&request->execute, &exec);
-
- if (kthread_should_park())
- kthread_parkme();
}
i915_gem_request_put(request);
} while (1);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index c2cc33f3d888..dd3ad52b7dfe 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1442,16 +1442,33 @@ static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
- /* BSpec says "Do not use DisplayPort with CDCLK less than
- * 432 MHz, audio enabled, port width x4, and link rate
- * HBR2 (5.4 GHz), or else there may be audio corruption or
- * screen corruption."
+ /* BSpec says "Do not use DisplayPort with CDCLK less than 432 MHz,
+ * audio enabled, port width x4, and link rate HBR2 (5.4 GHz), or else
+ * there may be audio corruption or screen corruption." This cdclk
+ * restriction for GLK is 316.8 MHz and since GLK can output two
+ * pixels per clock, the pixel rate becomes 2 * 316.8 MHz.
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4)
- pixel_rate = max(432000, pixel_rate);
+ crtc_state->lane_count == 4) {
+ if (IS_GEMINILAKE(dev_priv))
+ pixel_rate = max(2 * 316800, pixel_rate);
+ else
+ pixel_rate = max(432000, pixel_rate);
+ }
+
+ /* According to BSpec, "The CD clock frequency must be at least twice
+ * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
+ * The check for GLK has to be adjusted as the platform can output
+ * two pixels per clock.
+ */
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9) {
+ if (IS_GEMINILAKE(dev_priv))
+ pixel_rate = max(2 * 2 * 96000, pixel_rate);
+ else
+ pixel_rate = max(2 * 96000, pixel_rate);
+ }
return pixel_rate;
}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 36832257cc9b..1575bde0cf90 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -49,7 +49,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
-#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
+#define FIRMWARE_URL "https://01.org/linuxgraphics/downloads/firmware"
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index d8214ba8da14..0914ad96a71b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -539,7 +539,7 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
* values in advance. This function programs the correct values for
* DP/eDP/FDI use cases.
*/
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
+static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
@@ -806,7 +806,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
DP_TP_CTL_ENABLE);
}
-void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
+static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
@@ -837,7 +837,8 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
return ret;
}
-static struct intel_encoder *
+/* Finds the only possible encoder associated with the given CRTC. */
+struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -1127,72 +1128,6 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
bxt_ddi_clock_get(encoder, pipe_config);
}
-static bool
-hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- struct intel_shared_dpll *pll;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state,
- encoder);
- if (!pll)
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
-
- return pll;
-}
-
-static bool
-skl_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- struct intel_shared_dpll *pll;
-
- pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
- if (pll == NULL) {
- DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
- pipe_name(intel_crtc->pipe));
- return false;
- }
-
- return true;
-}
-
-static bool
-bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
-{
- return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
-}
-
-/*
- * Tries to find a *shared* PLL for the CRTC and store it in
- * intel_crtc->ddi_pll_sel.
- *
- * For private DPLLs, compute_config() should do the selection for us. This
- * function should be folded into compute_config() eventually.
- */
-bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_encoder *encoder =
- intel_ddi_get_crtc_new_encoder(crtc_state);
-
- if (IS_GEN9_BC(dev_priv))
- return skl_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
- else if (IS_GEN9_LP(dev_priv))
- return bxt_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
- else
- return hsw_ddi_pll_select(intel_crtc, crtc_state,
- encoder);
-}
-
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -1309,6 +1244,11 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
+
+ if (crtc_state->hdmi_scrambling)
+ temp |= TRANS_DDI_HDMI_SCRAMBLING_MASK;
+ if (crtc_state->hdmi_high_tmds_clock_ratio)
+ temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (crtc_state->fdi_lanes - 1) << 1;
@@ -1676,8 +1616,8 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
return DDI_BUF_TRANS_SELECT(level);
}
-void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll)
+static void intel_ddi_clk_select(struct intel_encoder *encoder,
+ struct intel_shared_dpll *pll)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
@@ -1881,6 +1821,12 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
+ bool clock_ratio = pipe_config->hdmi_high_tmds_clock_ratio;
+ bool scrambling = pipe_config->hdmi_scrambling;
+
+ intel_hdmi_handle_sink_scrambling(intel_encoder,
+ conn_state->connector,
+ clock_ratio, scrambling);
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
@@ -1914,6 +1860,12 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
if (old_crtc_state->has_audio)
intel_audio_codec_disable(intel_encoder);
+ if (type == INTEL_OUTPUT_HDMI) {
+ intel_hdmi_handle_sink_scrambling(intel_encoder,
+ old_conn_state->connector,
+ false, false);
+ }
+
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2040,6 +1992,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
+
+ if ((temp & TRANS_DDI_HDMI_SCRAMBLING_MASK) ==
+ TRANS_DDI_HDMI_SCRAMBLING_MASK)
+ pipe_config->hdmi_scrambling = true;
+ if (temp & TRANS_DDI_HIGH_TMDS_CHAR_RATE)
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
/* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
pipe_config->lane_count = 4;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index e217d04133e6..3617927af269 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1997,7 +1997,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
unsigned int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
return cpp;
case I915_FORMAT_MOD_X_TILED:
if (IS_GEN2(dev_priv))
@@ -2033,7 +2033,7 @@ intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
static unsigned int
intel_tile_height(const struct drm_framebuffer *fb, int plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 1;
else
return intel_tile_size(to_i915(fb->dev)) /
@@ -2107,7 +2107,7 @@ static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
return 4096;
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
if (INTEL_GEN(dev_priv) >= 9)
@@ -2290,7 +2290,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
WARN_ON(new_offset > old_offset);
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_size, tile_width, tile_height;
unsigned int pitch_tiles;
@@ -2345,7 +2345,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
if (alignment)
alignment--;
- if (fb_modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb_modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_size, tile_width, tile_height;
unsigned int tile_rows, tiles, pitch_tiles;
@@ -2471,7 +2471,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
DRM_ROTATE_0, tile_size);
offset /= tile_size;
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
unsigned int tile_width, tile_height;
unsigned int pitch_tiles;
struct drm_rect r;
@@ -2803,7 +2803,7 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
int cpp = fb->format->cpp[plane];
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
switch (cpp) {
case 8:
@@ -2962,28 +2962,27 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
return 0;
}
-static void i9xx_update_primary_plane(struct drm_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(primary->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
- u32 linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- unsigned long irqflags;
+ u32 dspcntr;
- dspcntr = DISPPLANE_GAMMA_ENABLE;
+ dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPLAY_PLANE_ENABLE;
+ if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) ||
+ IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 4) {
- if (intel_crtc->pipe == PIPE_B)
+ if (crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
}
@@ -3010,7 +3009,8 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
if (INTEL_GEN(dev_priv) >= 4 &&
@@ -3023,25 +3023,66 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
if (rotation & DRM_REFLECT_X)
dspcntr |= DISPPLANE_MIRROR;
- if (IS_G4X(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+ return dspcntr;
+}
- intel_add_fb_offsets(&x, &y, plane_state, 0);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ int src_x = plane_state->base.src.x1 >> 16;
+ int src_y = plane_state->base.src.y1 >> 16;
+ u32 offset;
+
+ intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
if (INTEL_GEN(dev_priv) >= 4)
- intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, plane_state, 0);
+ offset = intel_compute_tile_offset(&src_x, &src_y,
+ plane_state, 0);
+ else
+ offset = 0;
- if (rotation & DRM_ROTATE_180) {
- x += crtc_state->pipe_src_w - 1;
- y += crtc_state->pipe_src_h - 1;
- } else if (rotation & DRM_REFLECT_X) {
- x += crtc_state->pipe_src_w - 1;
+ /* HSW/BDW do this automagically in hardware */
+ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
+ unsigned int rotation = plane_state->base.rotation;
+ int src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ int src_h = drm_rect_height(&plane_state->base.src) >> 16;
+
+ if (rotation & DRM_ROTATE_180) {
+ src_x += src_w - 1;
+ src_y += src_h - 1;
+ } else if (rotation & DRM_REFLECT_X) {
+ src_x += src_w - 1;
+ }
}
+ plane_state->main.offset = offset;
+ plane_state->main.x = src_x;
+ plane_state->main.y = src_y;
+
+ return 0;
+}
+
+static void i9xx_update_primary_plane(struct drm_plane *primary,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(primary->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int plane = intel_crtc->plane;
+ u32 linear_offset;
+ u32 dspcntr = plane_state->ctl;
+ i915_reg_t reg = DSPCNTR(plane);
+ int x = plane_state->main.x;
+ int y = plane_state->main.y;
+ unsigned long irqflags;
+
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (INTEL_GEN(dev_priv) < 4)
+ if (INTEL_GEN(dev_priv) >= 4)
+ intel_crtc->dspaddr_offset = plane_state->main.offset;
+ else
intel_crtc->dspaddr_offset = linear_offset;
intel_crtc->adjusted_x = x;
@@ -3068,7 +3109,12 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE_FW(reg, dspcntr);
I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
- if (INTEL_GEN(dev_priv) >= 4) {
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ I915_WRITE_FW(DSPSURF(plane),
+ intel_plane_ggtt_offset(plane_state) +
+ intel_crtc->dspaddr_offset);
+ I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
+ } else if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE_FW(DSPSURF(plane),
intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
@@ -3105,101 +3151,10 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void ironlake_update_primary_plane(struct drm_plane *primary,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_device *dev = primary->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int plane = intel_crtc->plane;
- u32 linear_offset;
- u32 dspcntr;
- i915_reg_t reg = DSPCNTR(plane);
- unsigned int rotation = plane_state->base.rotation;
- int x = plane_state->base.src.x1 >> 16;
- int y = plane_state->base.src.y1 >> 16;
- unsigned long irqflags;
-
- dspcntr = DISPPLANE_GAMMA_ENABLE;
- dspcntr |= DISPLAY_PLANE_ENABLE;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case DRM_FORMAT_RGB565:
- dspcntr |= DISPPLANE_BGRX565;
- break;
- case DRM_FORMAT_XRGB8888:
- dspcntr |= DISPPLANE_BGRX888;
- break;
- case DRM_FORMAT_XBGR8888:
- dspcntr |= DISPPLANE_RGBX888;
- break;
- case DRM_FORMAT_XRGB2101010:
- dspcntr |= DISPPLANE_BGRX101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- dspcntr |= DISPPLANE_RGBX101010;
- break;
- default:
- BUG();
- }
-
- if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- dspcntr |= DISPPLANE_TILED;
-
- if (rotation & DRM_ROTATE_180)
- dspcntr |= DISPPLANE_ROTATE_180;
-
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv))
- dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
-
- intel_add_fb_offsets(&x, &y, plane_state, 0);
-
- intel_crtc->dspaddr_offset =
- intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- /* HSW+ does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
- rotation & DRM_ROTATE_180) {
- x += crtc_state->pipe_src_w - 1;
- y += crtc_state->pipe_src_h - 1;
- }
-
- linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
-
- intel_crtc->adjusted_x = x;
- intel_crtc->adjusted_y = y;
-
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
-
- I915_WRITE_FW(reg, dspcntr);
-
- I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE_FW(DSPSURF(plane),
- intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
- } else {
- I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
- }
- POSTING_READ_FW(reg);
-
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
-}
-
static u32
intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
- if (fb->modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
return 64;
else
return intel_tile_width_bytes(fb, plane);
@@ -3254,7 +3209,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
return stride;
}
-u32 skl_plane_ctl_format(uint32_t pixel_format)
+static u32 skl_plane_ctl_format(uint32_t pixel_format)
{
switch (pixel_format) {
case DRM_FORMAT_C8:
@@ -3295,10 +3250,10 @@ u32 skl_plane_ctl_format(uint32_t pixel_format)
return 0;
}
-u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+static u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
{
switch (fb_modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
return PLANE_CTL_TILED_X;
@@ -3313,7 +3268,7 @@ u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
return 0;
}
-u32 skl_plane_ctl_rotation(unsigned int rotation)
+static u32 skl_plane_ctl_rotation(unsigned int rotation)
{
switch (rotation) {
case DRM_ROTATE_0:
@@ -3335,6 +3290,37 @@ u32 skl_plane_ctl_rotation(unsigned int rotation)
return 0;
}
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ unsigned int rotation = plane_state->base.rotation;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ u32 plane_ctl;
+
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (!IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |=
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE |
+ PLANE_CTL_PLANE_GAMMA_DISABLE;
+ }
+
+ plane_ctl |= skl_plane_ctl_format(fb->format->format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
+
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
+
+ return plane_ctl;
+}
+
static void skylake_update_primary_plane(struct drm_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
@@ -3345,7 +3331,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = to_intel_plane(plane)->id;
enum pipe pipe = to_intel_plane(plane)->pipe;
- u32 plane_ctl;
+ u32 plane_ctl = plane_state->ctl;
unsigned int rotation = plane_state->base.rotation;
u32 stride = skl_plane_stride(fb, 0, rotation);
u32 surf_addr = plane_state->main.offset;
@@ -3360,19 +3346,6 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
int dst_h = drm_rect_height(&plane_state->base.dst);
unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (!IS_GEMINILAKE(dev_priv)) {
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -6306,6 +6279,17 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
static void compute_m_n(unsigned int m, unsigned int n,
uint32_t *ret_m, uint32_t *ret_n)
{
+ /*
+ * Reduce M/N as much as possible without loss in precision. Several DP
+ * dongles in particular seem to be fussy about too large *link* M/N
+ * values. The passed in values are more likely to have the least
+ * significant bits zero than M after rounding below, so do this first.
+ */
+ while ((m & 1) == 0 && (n & 1) == 0) {
+ m >>= 1;
+ n >>= 1;
+ }
+
*ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
*ret_m = div_u64((uint64_t) m * *ret_n, n);
intel_reduce_m_n_ratio(ret_m, ret_n);
@@ -8395,7 +8379,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
tiling = val & PLANE_CTL_TILED_MASK;
switch (tiling) {
case PLANE_CTL_TILED_LINEAR:
- fb->modifier = DRM_FORMAT_MOD_NONE;
+ fb->modifier = DRM_FORMAT_MOD_LINEAR;
break;
case PLANE_CTL_TILED_X:
plane_config->tiling = I915_TILING_X;
@@ -8851,8 +8835,14 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
- if (!intel_ddi_pll_select(crtc, crtc_state))
+ struct intel_encoder *encoder =
+ intel_ddi_get_crtc_new_encoder(crtc_state);
+
+ if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+ DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
+ }
}
crtc->lowfreq_avail = false;
@@ -9148,6 +9138,31 @@ out:
return active;
}
+static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int width = plane_state->base.crtc_w;
+ unsigned int stride = roundup_pow_of_two(width) * 4;
+
+ switch (stride) {
+ default:
+ WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
+ width, stride);
+ stride = 256;
+ /* fallthrough */
+ case 256:
+ case 512:
+ case 1024:
+ case 2048:
+ break;
+ }
+
+ return CURSOR_ENABLE |
+ CURSOR_GAMMA_ENABLE |
+ CURSOR_FORMAT_ARGB |
+ CURSOR_STRIDE(stride);
+}
+
static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
@@ -9159,26 +9174,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
if (plane_state && plane_state->base.visible) {
unsigned int width = plane_state->base.crtc_w;
unsigned int height = plane_state->base.crtc_h;
- unsigned int stride = roundup_pow_of_two(width) * 4;
-
- switch (stride) {
- default:
- WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
- width, stride);
- stride = 256;
- /* fallthrough */
- case 256:
- case 512:
- case 1024:
- case 2048:
- break;
- }
-
- cntl |= CURSOR_ENABLE |
- CURSOR_GAMMA_ENABLE |
- CURSOR_FORMAT_ARGB |
- CURSOR_STRIDE(stride);
+ cntl = plane_state->ctl;
size = (height << 12) | width;
}
@@ -9211,6 +9208,43 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
}
}
+static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ enum pipe pipe = crtc->pipe;
+ u32 cntl;
+
+ cntl = MCURSOR_GAMMA_ENABLE;
+
+ if (HAS_DDI(dev_priv))
+ cntl |= CURSOR_PIPE_CSC_ENABLE;
+
+ cntl |= pipe << 28; /* Connect to correct pipe */
+
+ switch (plane_state->base.crtc_w) {
+ case 64:
+ cntl |= CURSOR_MODE_64_ARGB_AX;
+ break;
+ case 128:
+ cntl |= CURSOR_MODE_128_ARGB_AX;
+ break;
+ case 256:
+ cntl |= CURSOR_MODE_256_ARGB_AX;
+ break;
+ default:
+ MISSING_CASE(plane_state->base.crtc_w);
+ return 0;
+ }
+
+ if (plane_state->base.rotation & DRM_ROTATE_180)
+ cntl |= CURSOR_ROTATE_180;
+
+ return cntl;
+}
+
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
const struct intel_plane_state *plane_state)
{
@@ -9220,30 +9254,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
int pipe = intel_crtc->pipe;
uint32_t cntl = 0;
- if (plane_state && plane_state->base.visible) {
- cntl = MCURSOR_GAMMA_ENABLE;
- switch (plane_state->base.crtc_w) {
- case 64:
- cntl |= CURSOR_MODE_64_ARGB_AX;
- break;
- case 128:
- cntl |= CURSOR_MODE_128_ARGB_AX;
- break;
- case 256:
- cntl |= CURSOR_MODE_256_ARGB_AX;
- break;
- default:
- MISSING_CASE(plane_state->base.crtc_w);
- return;
- }
- cntl |= pipe << 28; /* Connect to correct pipe */
-
- if (HAS_DDI(dev_priv))
- cntl |= CURSOR_PIPE_CSC_ENABLE;
-
- if (plane_state->base.rotation & DRM_ROTATE_180)
- cntl |= CURSOR_ROTATE_180;
- }
+ if (plane_state && plane_state->base.visible)
+ cntl = plane_state->ctl;
if (intel_crtc->cursor_cntl != cntl) {
I915_WRITE_FW(CURCNTR(pipe), cntl);
@@ -10338,7 +10350,7 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
ctl = I915_READ(PLANE_CTL(pipe, 0));
ctl &= ~PLANE_CTL_TILED_MASK;
switch (fb->modifier) {
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
break;
case I915_FORMAT_MOD_X_TILED:
ctl |= PLANE_CTL_TILED_X;
@@ -11692,6 +11704,9 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
PIPE_CONF_CHECK_I(limited_color_range);
+
+ PIPE_CONF_CHECK_I(hdmi_scrambling);
+ PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio);
PIPE_CONF_CHECK_I(has_infoframe);
PIPE_CONF_CHECK_I(has_audio);
@@ -12992,17 +13007,6 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
- /*
- * The intel_legacy_cursor_update() fast path takes care
- * of avoiding the vblank waits for simple cursor
- * movement and flips. For cursor on/off and size changes,
- * we want to perform the vblank waits so that watermark
- * updates happen during the correct frames. Gen9+ have
- * double buffered watermarks and so shouldn't need this.
- */
- if (INTEL_GEN(dev_priv) < 9)
- state->legacy_cursor_update = false;
-
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -13018,6 +13022,26 @@ static int intel_atomic_commit(struct drm_device *dev,
return ret;
}
+ /*
+ * The intel_legacy_cursor_update() fast path takes care
+ * of avoiding the vblank waits for simple cursor
+ * movement and flips. For cursor on/off and size changes,
+ * we want to perform the vblank waits so that watermark
+ * updates happen during the correct frames. Gen9+ have
+ * double buffered watermarks and so shouldn't need this.
+ *
+ * Do this after drm_atomic_helper_setup_commit() and
+ * intel_atomic_prepare_commit() because we still want
+ * to skip the flip and fb cleanup waits. Although that
+ * does risk yanking the mapping from under the display
+ * engine.
+ *
+ * FIXME doing watermarks and fb cleanup from a vblank worker
+ * (assuming we had any) would solve these problems.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ state->legacy_cursor_update = false;
+
drm_atomic_helper_swap_state(state, true);
dev_priv->wm.distrust_bios_wm = false;
intel_shared_dpll_swap_state(state);
@@ -13285,6 +13309,14 @@ intel_check_primary_plane(struct drm_plane *plane,
ret = skl_check_plane_surface(state);
if (ret)
return ret;
+
+ state->ctl = skl_plane_ctl(crtc_state, state);
+ } else {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = i9xx_plane_ctl(crtc_state, state);
}
return 0;
@@ -13544,12 +13576,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
primary->update_plane = skylake_update_primary_plane;
primary->disable_plane = skylake_disable_primary_plane;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- intel_primary_formats = i965_primary_formats;
- num_formats = ARRAY_SIZE(i965_primary_formats);
-
- primary->update_plane = ironlake_update_primary_plane;
- primary->disable_plane = i9xx_disable_primary_plane;
} else if (INTEL_GEN(dev_priv) >= 4) {
intel_primary_formats = i965_primary_formats;
num_formats = ARRAY_SIZE(i965_primary_formats);
@@ -13621,6 +13647,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *state)
{
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
enum pipe pipe = to_intel_plane(plane)->pipe;
@@ -13640,7 +13667,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return 0;
/* Check for which cursor types we support */
- if (!cursor_size_ok(to_i915(plane->dev), state->base.crtc_w,
+ if (!cursor_size_ok(dev_priv, state->base.crtc_w,
state->base.crtc_h)) {
DRM_DEBUG("Cursor dimension %dx%d not supported\n",
state->base.crtc_w, state->base.crtc_h);
@@ -13653,7 +13680,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -ENOMEM;
}
- if (fb->modifier != DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier != DRM_FORMAT_MOD_LINEAR) {
DRM_DEBUG_KMS("cursor cannot be tiled\n");
return -EINVAL;
}
@@ -13668,12 +13695,17 @@ intel_check_cursor_plane(struct drm_plane *plane,
* display power well must be turned off and on again.
* Refuse the put the cursor into that compromised position.
*/
- if (IS_CHERRYVIEW(to_i915(plane->dev)) && pipe == PIPE_C &&
+ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
state->base.visible && state->base.crtc_x < 0) {
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
+ if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
+ state->ctl = i845_cursor_ctl(crtc_state, state);
+ else
+ state->ctl = i9xx_cursor_ctl(crtc_state, state);
+
return 0;
}
@@ -14309,7 +14341,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0]);
goto err;
}
- case DRM_FORMAT_MOD_NONE:
+ case DRM_FORMAT_MOD_LINEAR:
case I915_FORMAT_MOD_X_TILED:
break;
default:
@@ -14332,7 +14364,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], pitch_limit);
goto err;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6e04cb54e3ff..ee77b519835c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -4636,9 +4636,20 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
*/
status = connector_status_disconnected;
goto out;
- } else if (connector->status == connector_status_connected) {
+ } else {
+ /*
+ * If display is now connected check links status,
+ * there has been known issues of link loss triggerring
+ * long pulse.
+ *
+ * Some sinks (eg. ASUS PB287Q) seem to perform some
+ * weird HPD ping pong during modesets. So we can apparently
+ * end up with HPD going low during a modeset, and then
+ * going back up soon after. And once that happens we must
+ * retrain the link to get a picture. That's in case no
+ * userspace component reacted to intermittent HPD dip.
+ */
intel_dp_check_link_status(intel_dp);
- goto out;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index fb7afcf9e8be..aaee3949a422 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -398,6 +398,9 @@ struct intel_plane_state {
int x, y;
} aux;
+ /* plane control register */
+ u32 ctl;
+
/*
* scaler_id
* = -1 : not using a scaler
@@ -729,6 +732,12 @@ struct intel_crtc_state {
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
+
+ /* HDMI scrambling status */
+ bool hdmi_scrambling;
+
+ /* HDMI High TMDS char rate ratio */
+ bool hdmi_high_tmds_clock_ratio;
};
struct intel_crtc {
@@ -1220,12 +1229,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv);
void intel_crt_reset(struct drm_encoder *encoder);
/* intel_ddi.c */
-void intel_ddi_clk_select(struct intel_encoder *encoder,
- struct intel_shared_dpll *pll);
void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state);
-void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
void hsw_fdi_link_train(struct intel_crtc *crtc,
const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
@@ -1236,8 +1242,8 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-bool intel_ddi_pll_select(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
+struct intel_encoder *
+intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
@@ -1246,7 +1252,6 @@ bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
@@ -1445,12 +1450,12 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
return i915_ggtt_offset(state->vma);
}
-u32 skl_plane_ctl_format(uint32_t pixel_format);
-u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
-u32 skl_plane_ctl_rotation(unsigned int rotation);
+u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation);
int skl_check_plane_surface(struct intel_plane_state *plane_state);
+int i9xx_check_plane_surface(struct intel_plane_state *plane_state);
/* intel_csr.c */
void intel_csr_ucode_init(struct drm_i915_private *);
@@ -1620,6 +1625,10 @@ struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
+void intel_hdmi_handle_sink_scrambling(struct intel_encoder *intel_encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 4200faa520c7..854e8e0c836b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -36,45 +36,45 @@ static const struct engine_info {
int (*init_execlists)(struct intel_engine_cs *engine);
} intel_engines[] = {
[RCS] = {
- .name = "render ring",
- .exec_id = I915_EXEC_RENDER,
+ .name = "rcs",
.hw_id = RCS_HW,
+ .exec_id = I915_EXEC_RENDER,
.mmio_base = RENDER_RING_BASE,
.irq_shift = GEN8_RCS_IRQ_SHIFT,
.init_execlists = logical_render_ring_init,
.init_legacy = intel_init_render_ring_buffer,
},
[BCS] = {
- .name = "blitter ring",
- .exec_id = I915_EXEC_BLT,
+ .name = "bcs",
.hw_id = BCS_HW,
+ .exec_id = I915_EXEC_BLT,
.mmio_base = BLT_RING_BASE,
.irq_shift = GEN8_BCS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_blt_ring_buffer,
},
[VCS] = {
- .name = "bsd ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs",
.hw_id = VCS_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN6_BSD_RING_BASE,
.irq_shift = GEN8_VCS1_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd_ring_buffer,
},
[VCS2] = {
- .name = "bsd2 ring",
- .exec_id = I915_EXEC_BSD,
+ .name = "vcs2",
.hw_id = VCS2_HW,
+ .exec_id = I915_EXEC_BSD,
.mmio_base = GEN8_BSD2_RING_BASE,
.irq_shift = GEN8_VCS2_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
.init_legacy = intel_init_bsd2_ring_buffer,
},
[VECS] = {
- .name = "video enhancement ring",
- .exec_id = I915_EXEC_VEBOX,
+ .name = "vecs",
.hw_id = VECS_HW,
+ .exec_id = I915_EXEC_VEBOX,
.mmio_base = VEBOX_RING_BASE,
.irq_shift = GEN8_VECS_IRQ_SHIFT,
.init_execlists = logical_xcs_ring_init,
@@ -242,12 +242,12 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
void *semaphores;
/* Semaphores are in noncoherent memory, flush to be safe */
- semaphores = kmap(page);
+ semaphores = kmap_atomic(page);
memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
0, I915_NUM_ENGINES * gen8_semaphore_seqno_size);
drm_clflush_virt_range(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0),
I915_NUM_ENGINES * gen8_semaphore_seqno_size);
- kunmap(page);
+ kunmap_atomic(semaphores);
}
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
@@ -1111,6 +1111,15 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ if (READ_ONCE(dev_priv->gt.active_requests))
+ return false;
+
+ /* If the driver is wedged, HW state may be very inconsistent and
+ * report that it is still busy, even though we have stopped using it.
+ */
+ if (i915_terminally_wedged(&dev_priv->gpu_error))
+ return true;
+
for_each_engine(engine, dev_priv, id) {
if (!intel_engine_is_idle(engine))
return false;
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 25691f0e4c50..cb36cbf3818f 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -26,14 +26,14 @@
#define GFXCORE_FAMILY_GEN9 12
#define GFXCORE_FAMILY_UNKNOWN 0x7fffffff
-#define GUC_CTX_PRIORITY_KMD_HIGH 0
-#define GUC_CTX_PRIORITY_HIGH 1
-#define GUC_CTX_PRIORITY_KMD_NORMAL 2
-#define GUC_CTX_PRIORITY_NORMAL 3
-#define GUC_CTX_PRIORITY_NUM 4
+#define GUC_CLIENT_PRIORITY_KMD_HIGH 0
+#define GUC_CLIENT_PRIORITY_HIGH 1
+#define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
+#define GUC_CLIENT_PRIORITY_NORMAL 3
+#define GUC_CLIENT_PRIORITY_NUM 4
-#define GUC_MAX_GPU_CONTEXTS 1024
-#define GUC_INVALID_CTX_ID GUC_MAX_GPU_CONTEXTS
+#define GUC_MAX_STAGE_DESCRIPTORS 1024
+#define GUC_INVALID_STAGE_ID GUC_MAX_STAGE_DESCRIPTORS
#define GUC_RENDER_ENGINE 0
#define GUC_VIDEO_ENGINE 1
@@ -68,14 +68,14 @@
#define GUC_DOORBELL_ENABLED 1
#define GUC_DOORBELL_DISABLED 0
-#define GUC_CTX_DESC_ATTR_ACTIVE (1 << 0)
-#define GUC_CTX_DESC_ATTR_PENDING_DB (1 << 1)
-#define GUC_CTX_DESC_ATTR_KERNEL (1 << 2)
-#define GUC_CTX_DESC_ATTR_PREEMPT (1 << 3)
-#define GUC_CTX_DESC_ATTR_RESET (1 << 4)
-#define GUC_CTX_DESC_ATTR_WQLOCKED (1 << 5)
-#define GUC_CTX_DESC_ATTR_PCH (1 << 6)
-#define GUC_CTX_DESC_ATTR_TERMINATED (1 << 7)
+#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
+#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
+#define GUC_STAGE_DESC_ATTR_KERNEL BIT(2)
+#define GUC_STAGE_DESC_ATTR_PREEMPT BIT(3)
+#define GUC_STAGE_DESC_ATTR_RESET BIT(4)
+#define GUC_STAGE_DESC_ATTR_WQLOCKED BIT(5)
+#define GUC_STAGE_DESC_ATTR_PCH BIT(6)
+#define GUC_STAGE_DESC_ATTR_TERMINATED BIT(7)
/* The guc control data is 10 DWORDs */
#define GUC_CTL_CTXINFO 0
@@ -241,8 +241,8 @@ union guc_doorbell_qw {
u64 value_qw;
} __packed;
-#define GUC_MAX_DOORBELLS 256
-#define GUC_INVALID_DOORBELL_ID (GUC_MAX_DOORBELLS)
+#define GUC_NUM_DOORBELLS 256
+#define GUC_DOORBELL_INVALID (GUC_NUM_DOORBELLS)
#define GUC_DB_SIZE (PAGE_SIZE)
#define GUC_WQ_SIZE (PAGE_SIZE * 2)
@@ -251,12 +251,12 @@ union guc_doorbell_qw {
struct guc_wq_item {
u32 header;
u32 context_desc;
- u32 ring_tail;
+ u32 submit_element_info;
u32 fence_id;
} __packed;
struct guc_process_desc {
- u32 context_id;
+ u32 stage_id;
u64 db_base_addr;
u32 head;
u32 tail;
@@ -278,7 +278,7 @@ struct guc_execlist_context {
u32 context_desc;
u32 context_id;
u32 ring_status;
- u32 ring_lcra;
+ u32 ring_lrca;
u32 ring_begin;
u32 ring_end;
u32 ring_next_free_location;
@@ -289,10 +289,18 @@ struct guc_execlist_context {
u16 engine_submit_queue_count;
} __packed;
-/*Context descriptor for communicating between uKernel and Driver*/
-struct guc_context_desc {
+/*
+ * This structure describes a stage set arranged for a particular communication
+ * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
+ * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
+ * to avoid confusion with all the other things already named "context" in the
+ * driver. A static pool of these descriptors are stored inside a GEM object
+ * (stage_desc_pool) which is held for the entire lifetime of our interaction
+ * with the GuC, being allocated before the GuC is loaded with its firmware.
+ */
+struct guc_stage_desc {
u32 sched_common_area;
- u32 context_id;
+ u32 stage_id;
u32 pas_id;
u8 engines_used;
u64 db_trigger_cpu;
@@ -359,7 +367,7 @@ struct guc_policy {
} __packed;
struct guc_policies {
- struct guc_policy policy[GUC_CTX_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
+ struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINES_NUM];
/* In micro seconds. How much time to allow before DPC processing is
* called back via interrupt (to prevent DPC queue drain starving).
@@ -401,16 +409,17 @@ struct guc_mmio_regset {
u32 number_of_registers;
} __packed;
+/* MMIO registers that are set as non privileged */
+struct mmio_white_list {
+ u32 mmio_start;
+ u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
+ u32 count;
+} __packed;
+
struct guc_mmio_reg_state {
struct guc_mmio_regset global_reg;
struct guc_mmio_regset engine_reg[GUC_MAX_ENGINES_NUM];
-
- /* MMIO registers that are set as non privileged */
- struct __packed {
- u32 mmio_start;
- u32 offsets[GUC_MMIO_WHITE_LIST_MAX];
- u32 count;
- } mmio_white_list[GUC_MAX_ENGINES_NUM];
+ struct mmio_white_list white_list[GUC_MAX_ENGINES_NUM];
} __packed;
/* GuC Additional Data Struct */
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 2f270d02894c..8a1a023e48b2 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -73,22 +73,6 @@ MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
#define I915_KBL_GUC_UCODE GUC_FW_PATH(kbl, KBL_FW_MAJOR, KBL_FW_MINOR)
MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
-/* User-friendly representation of an enum */
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
-{
- switch (status) {
- case INTEL_UC_FIRMWARE_FAIL:
- return "FAIL";
- case INTEL_UC_FIRMWARE_NONE:
- return "NONE";
- case INTEL_UC_FIRMWARE_PENDING:
- return "PENDING";
- case INTEL_UC_FIRMWARE_SUCCESS:
- return "SUCCESS";
- default:
- return "UNKNOWN!";
- }
-};
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
@@ -148,16 +132,14 @@ static void guc_params_init(struct drm_i915_private *dev_priv)
} else
params[GUC_CTL_DEBUG] = GUC_LOG_DISABLED;
- if (guc->ads_vma) {
+ /* If GuC submission is enabled, set up additional parameters here */
+ if (i915.enable_guc_submission) {
u32 ads = guc_ggtt_offset(guc->ads_vma) >> PAGE_SHIFT;
+ u32 pgs = guc_ggtt_offset(dev_priv->guc.stage_desc_pool);
+ u32 ctx_in_16 = GUC_MAX_STAGE_DESCRIPTORS / 16;
+
params[GUC_CTL_DEBUG] |= ads << GUC_ADS_ADDR_SHIFT;
params[GUC_CTL_DEBUG] |= GUC_ADS_ENABLED;
- }
-
- /* If GuC submission is enabled, set up additional parameters here */
- if (i915.enable_guc_submission) {
- u32 pgs = guc_ggtt_offset(dev_priv->guc.ctx_pool_vma);
- u32 ctx_in_16 = GUC_MAX_GPU_CONTEXTS / 16;
pgs >>= PAGE_SHIFT;
params[GUC_CTL_CTXINFO] = (pgs << GUC_CTL_BASE_ADDR_SHIFT) |
@@ -430,24 +412,3 @@ int intel_guc_select_fw(struct intel_guc *guc)
return 0;
}
-
-/**
- * intel_guc_fini() - clean up all allocated resources
- * @dev_priv: i915 device private
- */
-void intel_guc_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- struct drm_i915_gem_object *obj;
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_guc_submission_disable(dev_priv);
- i915_guc_submission_fini(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- obj = fetch_and_zero(&guc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 5c0f9a49da0e..6fb63a3c65b0 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -66,7 +66,6 @@ static int guc_log_control(struct intel_guc *guc, u32 control_val)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-
/*
* Sub buffer switch callback. Called whenever relay has to switch to a new
* sub buffer, relay stays on the same sub buffer if 0 is returned.
@@ -139,45 +138,15 @@ static struct rchan_callbacks relay_callbacks = {
.remove_buf_file = remove_buf_file_callback,
};
-static void guc_log_remove_relay_file(struct intel_guc *guc)
-{
- relay_close(guc->log.relay_chan);
-}
-
-static int guc_log_create_relay_channel(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
- struct rchan *guc_log_relay_chan;
- size_t n_subbufs, subbuf_size;
-
- /* Keep the size of sub buffers same as shared log buffer */
- subbuf_size = guc->log.vma->obj->base.size;
-
- /* Store up to 8 snapshots, which is large enough to buffer sufficient
- * boot time logs and provides enough leeway to User, in terms of
- * latency, for consuming the logs from relay. Also doesn't take
- * up too much memory.
- */
- n_subbufs = 8;
-
- guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
- n_subbufs, &relay_callbacks, dev_priv);
- if (!guc_log_relay_chan) {
- DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- return -ENOMEM;
- }
-
- GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
- guc->log.relay_chan = guc_log_relay_chan;
- return 0;
-}
-
-static int guc_log_create_relay_file(struct intel_guc *guc)
+static int guc_log_relay_file_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct dentry *log_dir;
int ret;
+ if (i915.guc_log_level < 0)
+ return 0;
+
/* For now create the log file in /sys/kernel/debug/dri/0 dir */
log_dir = dev_priv->drm.primary->debugfs_root;
@@ -197,8 +166,8 @@ static int guc_log_create_relay_file(struct intel_guc *guc)
return -ENODEV;
}
- ret = relay_late_setup_files(guc->log.relay_chan, "guc_log", log_dir);
- if (ret) {
+ ret = relay_late_setup_files(guc->log.runtime.relay_chan, "guc_log", log_dir);
+ if (ret < 0 && ret != -EEXIST) {
DRM_ERROR("Couldn't associate relay chan with file %d\n", ret);
return ret;
}
@@ -214,15 +183,15 @@ static void guc_move_to_next_buf(struct intel_guc *guc)
smp_wmb();
/* All data has been written, so now move the offset of sub buffer. */
- relay_reserve(guc->log.relay_chan, guc->log.vma->obj->base.size);
+ relay_reserve(guc->log.runtime.relay_chan, guc->log.vma->obj->base.size);
/* Switch to the next sub buffer */
- relay_flush(guc->log.relay_chan);
+ relay_flush(guc->log.runtime.relay_chan);
}
static void *guc_get_write_buffer(struct intel_guc *guc)
{
- if (!guc->log.relay_chan)
+ if (!guc->log.runtime.relay_chan)
return NULL;
/* Just get the base address of a new sub buffer and copy data into it
@@ -233,7 +202,7 @@ static void *guc_get_write_buffer(struct intel_guc *guc)
* done without using relay_reserve() along with relay_write(). So its
* better to use relay_reserve() alone.
*/
- return relay_reserve(guc->log.relay_chan, 0);
+ return relay_reserve(guc->log.runtime.relay_chan, 0);
}
static bool guc_check_log_buf_overflow(struct intel_guc *guc,
@@ -284,11 +253,11 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
void *src_data, *dst_data;
bool new_overflow;
- if (WARN_ON(!guc->log.buf_addr))
+ if (WARN_ON(!guc->log.runtime.buf_addr))
return;
/* Get the pointer to shared GuC log buffer */
- log_buf_state = src_data = guc->log.buf_addr;
+ log_buf_state = src_data = guc->log.runtime.buf_addr;
/* Get the pointer to local buffer to store the logs */
log_buf_snapshot_state = dst_data = guc_get_write_buffer(guc);
@@ -371,153 +340,113 @@ static void guc_read_update_log_buffer(struct intel_guc *guc)
}
}
-static void guc_log_cleanup(struct intel_guc *guc)
-{
- struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- /* First disable the flush interrupt */
- gen9_disable_guc_interrupts(dev_priv);
-
- if (guc->log.flush_wq)
- destroy_workqueue(guc->log.flush_wq);
-
- guc->log.flush_wq = NULL;
-
- if (guc->log.relay_chan)
- guc_log_remove_relay_file(guc);
-
- guc->log.relay_chan = NULL;
-
- if (guc->log.buf_addr)
- i915_gem_object_unpin_map(guc->log.vma->obj);
-
- guc->log.buf_addr = NULL;
-}
-
static void capture_logs_work(struct work_struct *work)
{
struct intel_guc *guc =
- container_of(work, struct intel_guc, log.flush_work);
+ container_of(work, struct intel_guc, log.runtime.flush_work);
guc_log_capture_logs(guc);
}
-static int guc_log_create_extras(struct intel_guc *guc)
+static bool guc_log_has_runtime(struct intel_guc *guc)
+{
+ return guc->log.runtime.buf_addr != NULL;
+}
+
+static int guc_log_runtime_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
void *vaddr;
- int ret;
+ struct rchan *guc_log_relay_chan;
+ size_t n_subbufs, subbuf_size;
+ int ret = 0;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- /* Nothing to do */
- if (i915.guc_log_level < 0)
- return 0;
-
- if (!guc->log.buf_addr) {
- /* Create a WC (Uncached for read) vmalloc mapping of log
- * buffer pages, so that we can directly get the data
- * (up-to-date) from memory.
- */
- vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
- return ret;
- }
+ GEM_BUG_ON(guc_log_has_runtime(guc));
- guc->log.buf_addr = vaddr;
+ /* Create a WC (Uncached for read) vmalloc mapping of log
+ * buffer pages, so that we can directly get the data
+ * (up-to-date) from memory.
+ */
+ vaddr = i915_gem_object_pin_map(guc->log.vma->obj, I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ DRM_ERROR("Couldn't map log buffer pages %d\n", ret);
+ return PTR_ERR(vaddr);
}
- if (!guc->log.relay_chan) {
- /* Create a relay channel, so that we have buffers for storing
- * the GuC firmware logs, the channel will be linked with a file
- * later on when debugfs is registered.
- */
- ret = guc_log_create_relay_channel(guc);
- if (ret)
- return ret;
- }
+ guc->log.runtime.buf_addr = vaddr;
- if (!guc->log.flush_wq) {
- INIT_WORK(&guc->log.flush_work, capture_logs_work);
-
- /*
- * GuC log buffer flush work item has to do register access to
- * send the ack to GuC and this work item, if not synced before
- * suspend, can potentially get executed after the GFX device is
- * suspended.
- * By marking the WQ as freezable, we don't have to bother about
- * flushing of this work item from the suspend hooks, the pending
- * work item if any will be either executed before the suspend
- * or scheduled later on resume. This way the handling of work
- * item can be kept same between system suspend & rpm suspend.
- */
- guc->log.flush_wq = alloc_ordered_workqueue("i915-guc_log",
- WQ_HIGHPRI | WQ_FREEZABLE);
- if (guc->log.flush_wq == NULL) {
- DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-
-void intel_guc_log_create(struct intel_guc *guc)
-{
- struct i915_vma *vma;
- unsigned long offset;
- uint32_t size, flags;
+ /* Keep the size of sub buffers same as shared log buffer */
+ subbuf_size = guc->log.vma->obj->base.size;
- if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
- i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+ /* Store up to 8 snapshots, which is large enough to buffer sufficient
+ * boot time logs and provides enough leeway to User, in terms of
+ * latency, for consuming the logs from relay. Also doesn't take
+ * up too much memory.
+ */
+ n_subbufs = 8;
- /* The first page is to save log buffer state. Allocate one
- * extra page for others in case for overlap */
- size = (1 + GUC_LOG_DPC_PAGES + 1 +
- GUC_LOG_ISR_PAGES + 1 +
- GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+ /* Create a relay channel, so that we have buffers for storing
+ * the GuC firmware logs, the channel will be linked with a file
+ * later on when debugfs is registered.
+ */
+ guc_log_relay_chan = relay_open(NULL, NULL, subbuf_size,
+ n_subbufs, &relay_callbacks, dev_priv);
+ if (!guc_log_relay_chan) {
+ DRM_ERROR("Couldn't create relay chan for GuC logging\n");
- vma = guc->log.vma;
- if (!vma) {
- /* We require SSE 4.1 for fast reads from the GuC log buffer and
- * it should be present on the chipsets supporting GuC based
- * submisssions.
- */
- if (WARN_ON(!i915_has_memcpy_from_wc())) {
- /* logging will not be enabled */
- i915.guc_log_level = -1;
- return;
- }
+ ret = -ENOMEM;
+ goto err_vaddr;
+ }
- vma = intel_guc_allocate_vma(guc, size);
- if (IS_ERR(vma)) {
- /* logging will be off */
- i915.guc_log_level = -1;
- return;
- }
+ GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size);
+ guc->log.runtime.relay_chan = guc_log_relay_chan;
+
+ INIT_WORK(&guc->log.runtime.flush_work, capture_logs_work);
+
+ /*
+ * GuC log buffer flush work item has to do register access to
+ * send the ack to GuC and this work item, if not synced before
+ * suspend, can potentially get executed after the GFX device is
+ * suspended.
+ * By marking the WQ as freezable, we don't have to bother about
+ * flushing of this work item from the suspend hooks, the pending
+ * work item if any will be either executed before the suspend
+ * or scheduled later on resume. This way the handling of work
+ * item can be kept same between system suspend & rpm suspend.
+ */
+ guc->log.runtime.flush_wq = alloc_ordered_workqueue("i915-guc_log",
+ WQ_HIGHPRI | WQ_FREEZABLE);
+ if (!guc->log.runtime.flush_wq) {
+ DRM_ERROR("Couldn't allocate the wq for GuC logging\n");
+ ret = -ENOMEM;
+ goto err_relaychan;
+ }
- guc->log.vma = vma;
+ return 0;
- if (guc_log_create_extras(guc)) {
- guc_log_cleanup(guc);
- i915_vma_unpin_and_release(&guc->log.vma);
- i915.guc_log_level = -1;
- return;
- }
- }
+err_relaychan:
+ relay_close(guc->log.runtime.relay_chan);
+err_vaddr:
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
+ return ret;
+}
- /* each allocated unit is a page */
- flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
- (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
- (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
- (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+static void guc_log_runtime_destroy(struct intel_guc *guc)
+{
+ /*
+ * It's possible that the runtime stuff was never allocated because
+ * guc_log_level was < 0 at the time
+ **/
+ if (!guc_log_has_runtime(guc))
+ return;
- offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
- guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+ destroy_workqueue(guc->log.runtime.flush_wq);
+ relay_close(guc->log.runtime.relay_chan);
+ i915_gem_object_unpin_map(guc->log.vma->obj);
+ guc->log.runtime.buf_addr = NULL;
}
static int guc_log_late_setup(struct intel_guc *guc)
@@ -527,24 +456,25 @@ static int guc_log_late_setup(struct intel_guc *guc)
lockdep_assert_held(&dev_priv->drm.struct_mutex);
- if (i915.guc_log_level < 0)
- return -EINVAL;
-
- /* If log_level was set as -1 at boot time, then setup needed to
- * handle log buffer flush interrupts would not have been done yet,
- * so do that now.
- */
- ret = guc_log_create_extras(guc);
- if (ret)
- goto err;
+ if (!guc_log_has_runtime(guc)) {
+ /* If log_level was set as -1 at boot time, then setup needed to
+ * handle log buffer flush interrupts would not have been done yet,
+ * so do that now.
+ */
+ ret = guc_log_runtime_create(guc);
+ if (ret)
+ goto err;
+ }
- ret = guc_log_create_relay_file(guc);
+ ret = guc_log_relay_file_create(guc);
if (ret)
- goto err;
+ goto err_runtime;
return 0;
+
+err_runtime:
+ guc_log_runtime_destroy(guc);
err:
- guc_log_cleanup(guc);
/* logging will remain off */
i915.guc_log_level = -1;
return ret;
@@ -577,7 +507,7 @@ static void guc_flush_logs(struct intel_guc *guc)
/* Before initiating the forceful flush, wait for any pending/ongoing
* flush to complete otherwise forceful flush may not actually happen.
*/
- flush_work(&guc->log.flush_work);
+ flush_work(&guc->log.runtime.flush_work);
/* Ask GuC to update the log buffer state */
guc_log_flush(guc);
@@ -586,6 +516,72 @@ static void guc_flush_logs(struct intel_guc *guc)
guc_log_capture_logs(guc);
}
+int intel_guc_log_create(struct intel_guc *guc)
+{
+ struct i915_vma *vma;
+ unsigned long offset;
+ uint32_t size, flags;
+ int ret;
+
+ GEM_BUG_ON(guc->log.vma);
+
+ if (i915.guc_log_level > GUC_LOG_VERBOSITY_MAX)
+ i915.guc_log_level = GUC_LOG_VERBOSITY_MAX;
+
+ /* The first page is to save log buffer state. Allocate one
+ * extra page for others in case for overlap */
+ size = (1 + GUC_LOG_DPC_PAGES + 1 +
+ GUC_LOG_ISR_PAGES + 1 +
+ GUC_LOG_CRASH_PAGES + 1) << PAGE_SHIFT;
+
+ /* We require SSE 4.1 for fast reads from the GuC log buffer and
+ * it should be present on the chipsets supporting GuC based
+ * submisssions.
+ */
+ if (WARN_ON(!i915_has_memcpy_from_wc())) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ vma = intel_guc_allocate_vma(guc, size);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
+
+ guc->log.vma = vma;
+
+ if (i915.guc_log_level >= 0) {
+ ret = guc_log_runtime_create(guc);
+ if (ret < 0)
+ goto err_vma;
+ }
+
+ /* each allocated unit is a page */
+ flags = GUC_LOG_VALID | GUC_LOG_NOTIFY_ON_HALF_FULL |
+ (GUC_LOG_DPC_PAGES << GUC_LOG_DPC_SHIFT) |
+ (GUC_LOG_ISR_PAGES << GUC_LOG_ISR_SHIFT) |
+ (GUC_LOG_CRASH_PAGES << GUC_LOG_CRASH_SHIFT);
+
+ offset = guc_ggtt_offset(vma) >> PAGE_SHIFT; /* in pages */
+ guc->log.flags = (offset << GUC_LOG_BUF_ADDR_SHIFT) | flags;
+
+ return 0;
+
+err_vma:
+ i915_vma_unpin_and_release(&guc->log.vma);
+err:
+ /* logging will be off */
+ i915.guc_log_level = -1;
+ return ret;
+}
+
+void intel_guc_log_destroy(struct intel_guc *guc)
+{
+ guc_log_runtime_destroy(guc);
+ i915_vma_unpin_and_release(&guc->log.vma);
+}
+
int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
{
struct intel_guc *guc = &dev_priv->guc;
@@ -609,17 +605,22 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
return ret;
}
- i915.guc_log_level = log_param.verbosity;
+ if (log_param.logging_enabled) {
+ i915.guc_log_level = log_param.verbosity;
- /* If log_level was set as -1 at boot time, then the relay channel file
- * wouldn't have been created by now and interrupts also would not have
- * been enabled.
- */
- if (!dev_priv->guc.log.relay_chan) {
+ /* If log_level was set as -1 at boot time, then the relay channel file
+ * wouldn't have been created by now and interrupts also would not have
+ * been enabled. Try again now, just in case.
+ */
ret = guc_log_late_setup(guc);
- if (!ret)
- gen9_enable_guc_interrupts(dev_priv);
- } else if (!log_param.logging_enabled) {
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("GuC log late setup failed %d\n", ret);
+ return ret;
+ }
+
+ /* GuC logging is currently the only user of Guc2Host interrupts */
+ gen9_enable_guc_interrupts(dev_priv);
+ } else {
/* Once logging is disabled, GuC won't generate logs & send an
* interrupt. But there could be some data in the log buffer
* which is yet to be captured. So request GuC to update the log
@@ -629,9 +630,6 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
/* As logging is disabled, update log level to reflect that */
i915.guc_log_level = -1;
- } else {
- /* In case interrupts were disabled, enable them now */
- gen9_enable_guc_interrupts(dev_priv);
}
return ret;
@@ -639,7 +637,7 @@ int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val)
void i915_guc_log_register(struct drm_i915_private *dev_priv)
{
- if (!i915.enable_guc_submission)
+ if (!i915.enable_guc_submission || i915.guc_log_level < 0)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
@@ -653,6 +651,8 @@ void i915_guc_log_unregister(struct drm_i915_private *dev_priv)
return;
mutex_lock(&dev_priv->drm.struct_mutex);
- guc_log_cleanup(&dev_priv->guc);
+ /* GuC logging is currently the only user of Guc2Host interrupts */
+ gen9_disable_guc_interrupts(dev_priv);
+ guc_log_runtime_destroy(&dev_priv->guc);
mutex_unlock(&dev_priv->drm.struct_mutex);
}
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 8c04eca84351..e1ab6432a914 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -45,6 +45,8 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true;
if (IS_SKYLAKE(dev_priv))
return true;
+ if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D)
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 3eec74ca5116..1d623b5e09d6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -34,6 +34,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_scdc_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
@@ -1208,6 +1209,8 @@ static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv))
return 165000;
+ else if (IS_GEMINILAKE(dev_priv))
+ return 594000;
else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
else
@@ -1334,6 +1337,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
+ struct drm_scdc *scdc = &conn_state->connector->display_info.hdmi.scdc;
int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock;
int clock_12bpc = clock_8bpc * 3 / 2;
int desired_bpp;
@@ -1403,6 +1407,16 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = 4;
+ if (scdc->scrambling.supported && IS_GEMINILAKE(dev_priv)) {
+ if (scdc->scrambling.low_rates)
+ pipe_config->hdmi_scrambling = true;
+
+ if (pipe_config->port_clock > 340000) {
+ pipe_config->hdmi_scrambling = true;
+ pipe_config->hdmi_high_tmds_clock_ratio = true;
+ }
+ }
+
return true;
}
@@ -1812,6 +1826,57 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
}
+/*
+ * intel_hdmi_handle_sink_scrambling: handle sink scrambling/clock ratio setup
+ * @encoder: intel_encoder
+ * @connector: drm_connector
+ * @high_tmds_clock_ratio = bool to indicate if the function needs to set
+ * or reset the high tmds clock ratio for scrambling
+ * @scrambling: bool to Indicate if the function needs to set or reset
+ * sink scrambling
+ *
+ * This function handles scrambling on HDMI 2.0 capable sinks.
+ * If required clock rate is > 340 Mhz && scrambling is supported by sink
+ * it enables scrambling. This should be called before enabling the HDMI
+ * 2.0 port, as the sink can choose to disable the scrambling if it doesn't
+ * detect a scrambled clock within 100 ms.
+ */
+void intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling)
+{
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ struct drm_scrambling *sink_scrambling =
+ &connector->display_info.hdmi.scdc.scrambling;
+ struct i2c_adapter *adptr = intel_gmbus_get_adapter(dev_priv,
+ intel_hdmi->ddc_bus);
+ bool ret;
+
+ if (!sink_scrambling->supported)
+ return;
+
+ DRM_DEBUG_KMS("Setting sink scrambling for enc:%s connector:%s\n",
+ encoder->base.name, connector->name);
+
+ /* Set TMDS bit clock ratio to 1/40 or 1/10 */
+ ret = drm_scdc_set_high_tmds_clock_ratio(adptr, high_tmds_clock_ratio);
+ if (!ret) {
+ DRM_ERROR("Set TMDS ratio failed\n");
+ return;
+ }
+
+ /* Enable/disable sink scrambling */
+ ret = drm_scdc_set_scrambling(adptr, scrambling);
+ if (!ret) {
+ DRM_ERROR("Set sink scrambling failed\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("sink scrambling handled\n");
+}
+
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 7af900bcdc05..9ee819666a4c 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -251,24 +251,6 @@ fail:
}
/**
- * intel_huc_fini() - clean up resources allocated for HuC
- * @dev_priv: the drm_i915_private device
- *
- * Cleans up by releasing the huc firmware GEM obj.
- */
-void intel_huc_fini(struct drm_i915_private *dev_priv)
-{
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
- struct drm_i915_gem_object *obj;
-
- obj = fetch_and_zero(&huc_fw->obj);
- if (obj)
- i915_gem_object_put(obj);
-
- huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
-}
-
-/**
* intel_guc_auth_huc() - authenticate ucode
* @dev_priv: the drm_i915_device
*
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 7a5b41b1c024..25d8e76489e4 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -131,8 +131,15 @@ err:
static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
{
+ /* XXX Note that platform_device_register_full() allocates a dma_mask
+ * and never frees it. We can't free it here as we cannot guarantee
+ * this is the last reference (i.e. that the dma_mask will not be
+ * used after our unregister). So ee choose to leak the sizeof(u64)
+ * allocation here - it should be fixed in the platform_device rather
+ * than us fiddle with its internals.
+ */
+
platform_device_unregister(dev_priv->lpe_audio.platdev);
- kfree(dev_priv->lpe_audio.platdev->dev.dma_mask);
}
static void lpe_audio_irq_unmask(struct irq_data *d)
@@ -331,6 +338,7 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
* audio driver and i915
* @dev_priv: the i915 drm device private data
* @eld : ELD data
+ * @pipe: pipe id
* @port: port id
* @tmds_clk_speed: tmds clock frequency in Hz
*
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 77168e673e0a..c8f7c631fc1f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -326,7 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
- GEM_BUG_ON(!IS_ALIGNED(rq->tail, 8));
+ assert_ring_tail_valid(rq->ring, rq->tail);
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
@@ -399,22 +399,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *last;
struct execlist_port *port = engine->execlist_port;
- unsigned long flags;
struct rb_node *rb;
bool submit = false;
- /* After execlist_first is updated, the tasklet will be rescheduled.
- *
- * If we are currently running (inside the tasklet) and a third
- * party queues a request and so updates engine->execlist_first under
- * the spinlock (which we have elided), it will atomically set the
- * TASKLET_SCHED flag causing the us to be re-executed and pick up
- * the change in state (the update to TASKLET_SCHED incurs a memory
- * barrier making this cross-cpu checking safe).
- */
- if (!READ_ONCE(engine->execlist_first))
- return;
-
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
@@ -448,7 +435,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock_irqsave(&engine->timeline->lock, flags);
+ spin_lock_irq(&engine->timeline->lock);
rb = engine->execlist_first;
while (rb) {
struct drm_i915_gem_request *cursor =
@@ -500,7 +487,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_gem_request_assign(&port->request, last);
engine->execlist_first = rb;
}
- spin_unlock_irqrestore(&engine->timeline->lock, flags);
+ spin_unlock_irq(&engine->timeline->lock);
if (submit)
execlists_submit_ports(engine);
@@ -530,24 +517,36 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
+ /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
+ * imposing the cost of a locked atomic transaction when submitting a
+ * new request (outside of the context-switch interrupt).
+ */
+ while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
u32 __iomem *csb_mmio =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
u32 __iomem *buf =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0));
- unsigned int csb, head, tail;
-
- csb = readl(csb_mmio);
- head = GEN8_CSB_READ_PTR(csb);
- tail = GEN8_CSB_WRITE_PTR(csb);
- if (head == tail)
- break;
+ unsigned int head, tail;
+
+ /* The write will be ordered by the uncached read (itself
+ * a memory barrier), so we do not need another in the form
+ * of a locked instruction. The race between the interrupt
+ * handler and the split test/clear is harmless as we order
+ * our clear before the CSB read. If the interrupt arrived
+ * first between the test and the clear, we read the updated
+ * CSB and clear the bit. If the interrupt arrives as we read
+ * the CSB or later (i.e. after we had cleared the bit) the bit
+ * is set and we do a new loop.
+ */
+ __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ head = readl(csb_mmio);
+ tail = GEN8_CSB_WRITE_PTR(head);
+ head = GEN8_CSB_READ_PTR(head);
+ while (head != tail) {
+ unsigned int status;
- if (tail < head)
- tail += GEN8_CSB_ENTRIES;
- do {
- unsigned int idx = ++head % GEN8_CSB_ENTRIES;
- unsigned int status = readl(buf + 2 * idx);
+ if (++head == GEN8_CSB_ENTRIES)
+ head = 0;
/* We are flying near dragons again.
*
@@ -566,11 +565,12 @@ static void intel_lrc_irq_handler(unsigned long data)
* status notifier.
*/
+ status = readl(buf + 2 * head);
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
/* Check the context/desc id for this event matches */
- GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
+ GEM_DEBUG_BUG_ON(readl(buf + 2 * head + 1) !=
port[0].context_id);
GEM_BUG_ON(port[0].count == 0);
@@ -588,10 +588,9 @@ static void intel_lrc_irq_handler(unsigned long data)
GEM_BUG_ON(port[0].count == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- } while (head < tail);
+ }
- writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
- GEN8_CSB_WRITE_PTR(csb) << 8),
+ writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
csb_mmio);
}
@@ -647,15 +646,14 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
static struct intel_engine_cs *
pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
{
- struct intel_engine_cs *engine;
+ struct intel_engine_cs *engine =
+ container_of(pt, struct drm_i915_gem_request, priotree)->engine;
+
+ GEM_BUG_ON(!locked);
- engine = container_of(pt,
- struct drm_i915_gem_request,
- priotree)->engine;
if (engine != locked) {
- if (locked)
- spin_unlock_irq(&locked->timeline->lock);
- spin_lock_irq(&engine->timeline->lock);
+ spin_unlock(&locked->timeline->lock);
+ spin_lock(&engine->timeline->lock);
}
return engine;
@@ -663,7 +661,7 @@ pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
{
- struct intel_engine_cs *engine = NULL;
+ struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
LIST_HEAD(dfs);
@@ -697,26 +695,23 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
list_for_each_entry_safe(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
- list_for_each_entry(p, &pt->signalers_list, signal_link)
+ /* Within an engine, there can be no cycle, but we may
+ * refer to the same dependency chain multiple times
+ * (redundant dependencies are not eliminated) and across
+ * engines.
+ */
+ list_for_each_entry(p, &pt->signalers_list, signal_link) {
+ GEM_BUG_ON(p->signaler->priority < pt->priority);
if (prio > READ_ONCE(p->signaler->priority))
list_move_tail(&p->dfs_link, &dfs);
+ }
list_safe_reset_next(dep, p, dfs_link);
- if (!RB_EMPTY_NODE(&pt->node))
- continue;
-
- engine = pt_lock_engine(pt, engine);
-
- /* If it is not already in the rbtree, we can update the
- * priority inplace and skip over it (and its dependencies)
- * if it is referenced *again* as we descend the dfs.
- */
- if (prio > pt->priority && RB_EMPTY_NODE(&pt->node)) {
- pt->priority = prio;
- list_del_init(&dep->dfs_link);
- }
}
+ engine = request->engine;
+ spin_lock_irq(&engine->timeline->lock);
+
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
struct i915_priotree *pt = dep->signaler;
@@ -728,16 +723,15 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
if (prio <= pt->priority)
continue;
- GEM_BUG_ON(RB_EMPTY_NODE(&pt->node));
-
pt->priority = prio;
- rb_erase(&pt->node, &engine->execlist_queue);
- if (insert_request(pt, &engine->execlist_queue))
- engine->execlist_first = &pt->node;
+ if (!RB_EMPTY_NODE(&pt->node)) {
+ rb_erase(&pt->node, &engine->execlist_queue);
+ if (insert_request(pt, &engine->execlist_queue))
+ engine->execlist_first = &pt->node;
+ }
}
- if (engine)
- spin_unlock_irq(&engine->timeline->lock);
+ spin_unlock_irq(&engine->timeline->lock);
/* XXX Do we need to preempt to make room for us and our deps? */
}
@@ -1255,7 +1249,6 @@ static void reset_common_ring(struct intel_engine_cs *engine,
ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
request->ring->head = request->postfix;
- request->ring->last_retired_head = -1;
intel_ring_update_space(request->ring);
/* Catch up with any missed context-switch interrupts */
@@ -1268,8 +1261,10 @@ static void reset_common_ring(struct intel_engine_cs *engine,
GEM_BUG_ON(request->ctx != port[0].request->ctx);
/* Reset WaIdleLiteRestore:bdw,skl as well */
- request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ request->tail =
+ intel_ring_wrap(request->ring,
+ request->wa_tail - WA_TAIL_DWORDS*sizeof(u32));
+ assert_ring_tail_valid(request->ring, request->tail);
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
@@ -1480,7 +1475,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
gen8_emit_wa_tail(request, cs);
}
@@ -1508,7 +1503,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
request->tail = intel_ring_offset(request, cs);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
gen8_emit_wa_tail(request, cs);
}
@@ -1575,6 +1570,7 @@ static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = execlists_submit_request;
engine->schedule = execlists_schedule;
+ engine->irq_tasklet.func = intel_lrc_irq_handler;
}
static void
@@ -2041,7 +2037,6 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
i915_gem_object_unpin_map(ce->state->obj);
ce->ring->head = ce->ring->tail = 0;
- ce->ring->last_retired_head = -1;
intel_ring_update_space(ce->ring);
}
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 441c01466384..d44465190dc1 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -920,6 +920,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
char buf[sizeof(OPREGION_SIGNATURE)];
int err = 0;
void *base;
+ const void *vbt;
+ u32 vbt_size;
BUILD_BUG_ON(sizeof(struct opregion_header) != 0x100);
BUILD_BUG_ON(sizeof(struct opregion_acpi) != 0x100);
@@ -972,45 +974,46 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
if (mboxes & MBOX_ASLE_EXT)
DRM_DEBUG_DRIVER("ASLE extension supported\n");
- if (!dmi_check_system(intel_no_opregion_vbt)) {
- const void *vbt = NULL;
- u32 vbt_size = 0;
-
- if (opregion->header->opregion_ver >= 2 && opregion->asle &&
- opregion->asle->rvda && opregion->asle->rvds) {
- opregion->rvda = memremap(opregion->asle->rvda,
- opregion->asle->rvds,
- MEMREMAP_WB);
- vbt = opregion->rvda;
- vbt_size = opregion->asle->rvds;
- }
+ if (dmi_check_system(intel_no_opregion_vbt))
+ goto out;
+ if (opregion->header->opregion_ver >= 2 && opregion->asle &&
+ opregion->asle->rvda && opregion->asle->rvds) {
+ opregion->rvda = memremap(opregion->asle->rvda,
+ opregion->asle->rvds,
+ MEMREMAP_WB);
+ vbt = opregion->rvda;
+ vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
+ goto out;
} else {
- vbt = base + OPREGION_VBT_OFFSET;
- /*
- * The VBT specification says that if the ASLE ext
- * mailbox is not used its area is reserved, but
- * on some CHT boards the VBT extends into the
- * ASLE ext area. Allow this even though it is
- * against the spec, so we do not end up rejecting
- * the VBT on those boards (and end up not finding the
- * LCD panel because of this).
- */
- vbt_size = (mboxes & MBOX_ASLE_EXT) ?
- OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
- vbt_size -= OPREGION_VBT_OFFSET;
- if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
- opregion->vbt = vbt;
- opregion->vbt_size = vbt_size;
- }
+ DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
}
}
+ vbt = base + OPREGION_VBT_OFFSET;
+ /*
+ * The VBT specification says that if the ASLE ext mailbox is not used
+ * its area is reserved, but on some CHT boards the VBT extends into the
+ * ASLE ext area. Allow this even though it is against the spec, so we
+ * do not end up rejecting the VBT on those boards (and end up not
+ * finding the LCD panel because of this).
+ */
+ vbt_size = (mboxes & MBOX_ASLE_EXT) ?
+ OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
+ vbt_size -= OPREGION_VBT_OFFSET;
+ if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
+ DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ opregion->vbt = vbt;
+ opregion->vbt_size = vbt_size;
+ } else {
+ DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ }
+
+out:
return 0;
err_out:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index aece0ff88a5d..570bd603f401 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -655,6 +655,29 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
return wm_size;
}
+static bool intel_wm_plane_visible(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+
+ /* FIXME check the 'enable' instead */
+ if (!crtc_state->base.active)
+ return false;
+
+ /*
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
+ */
+ if (plane->id == PLANE_CURSOR)
+ return plane_state->base.fb != NULL;
+ else
+ return plane_state->base.visible;
+}
+
static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
{
struct intel_crtc *crtc, *enabled = NULL;
@@ -1961,7 +1984,7 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -1990,7 +2013,7 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
uint32_t method1, method2;
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -2013,15 +2036,7 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
{
int cpp;
- /*
- * Treat cursor with fb as always visible since cursor updates
- * can happen faster than the vrefresh rate, and the current
- * watermark code doesn't handle that correctly. Cursor updates
- * which set/clear the fb or change the cursor size are going
- * to get throttled by intel_legacy_cursor_update() to work
- * around this problem with the watermark code.
- */
- if (!cstate->base.active || !pstate->base.fb)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -2038,7 +2053,7 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate,
{
int cpp;
- if (!cstate->base.active || !pstate->base.visible)
+ if (!intel_wm_plane_visible(cstate, pstate))
return 0;
cpp = pstate->base.fb->format->cpp[0];
@@ -3346,19 +3361,29 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
* Caller should take care of dividing & rounding off the value.
*/
static uint32_t
-skl_plane_downscale_amount(const struct intel_plane_state *pstate)
+skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
+ const struct intel_plane_state *pstate)
{
+ struct intel_plane *plane = to_intel_plane(pstate->base.plane);
uint32_t downscale_h, downscale_w;
uint32_t src_w, src_h, dst_w, dst_h;
- if (WARN_ON(!pstate->base.visible))
+ if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return DRM_PLANE_HELPER_NO_SCALING;
/* n.b., src is 16.16 fixed point, dst is whole integer */
- src_w = drm_rect_width(&pstate->base.src);
- src_h = drm_rect_height(&pstate->base.src);
- dst_w = drm_rect_width(&pstate->base.dst);
- dst_h = drm_rect_height(&pstate->base.dst);
+ if (plane->id == PLANE_CURSOR) {
+ src_w = pstate->base.src_w;
+ src_h = pstate->base.src_h;
+ dst_w = pstate->base.crtc_w;
+ dst_h = pstate->base.crtc_h;
+ } else {
+ src_w = drm_rect_width(&pstate->base.src);
+ src_h = drm_rect_height(&pstate->base.src);
+ dst_w = drm_rect_width(&pstate->base.dst);
+ dst_h = drm_rect_height(&pstate->base.dst);
+ }
+
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
@@ -3374,6 +3399,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
const struct drm_plane_state *pstate,
int y)
{
+ struct intel_plane *plane = to_intel_plane(pstate->plane);
struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
uint32_t down_scale_amount, data_rate;
uint32_t width = 0, height = 0;
@@ -3386,7 +3412,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
fb = pstate->fb;
format = fb->format->format;
- if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (plane->id == PLANE_CURSOR)
return 0;
if (y && format != DRM_FORMAT_NV12)
return 0;
@@ -3410,7 +3436,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
data_rate = width * height * fb->format->cpp[0];
}
- down_scale_amount = skl_plane_downscale_amount(intel_pstate);
+ down_scale_amount = skl_plane_downscale_amount(cstate, intel_pstate);
return (uint64_t)data_rate * down_scale_amount >> 16;
}
@@ -3702,7 +3728,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
uint64_t pixel_rate;
/* Shouldn't reach here on disabled planes... */
- if (WARN_ON(!pstate->base.visible))
+ if (WARN_ON(!intel_wm_plane_visible(cstate, pstate)))
return 0;
/*
@@ -3710,7 +3736,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
* with additional adjustments for plane-specific scaling.
*/
adjusted_pixel_rate = cstate->pixel_rate;
- downscale_amount = skl_plane_downscale_amount(pstate);
+ downscale_amount = skl_plane_downscale_amount(cstate, pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
@@ -3727,6 +3753,7 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
uint8_t *out_lines, /* out */
bool *enabled /* out */)
{
+ struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
struct drm_plane_state *pstate = &intel_pstate->base;
struct drm_framebuffer *fb = pstate->fb;
uint32_t latency = dev_priv->wm.skl_latency[level];
@@ -3746,7 +3773,8 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
bool apply_memory_bw_wa = skl_needs_memory_bw_wa(state);
bool y_tiled, x_tiled;
- if (latency == 0 || !cstate->base.active || !intel_pstate->base.visible) {
+ if (latency == 0 ||
+ !intel_wm_plane_visible(cstate, intel_pstate)) {
*enabled = false;
return 0;
}
@@ -3762,8 +3790,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
if (apply_memory_bw_wa && x_tiled)
latency += 15;
- width = drm_rect_width(&intel_pstate->base.src) >> 16;
- height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ if (plane->id == PLANE_CURSOR) {
+ width = intel_pstate->base.crtc_w;
+ height = intel_pstate->base.crtc_h;
+ } else {
+ width = drm_rect_width(&intel_pstate->base.src) >> 16;
+ height = drm_rect_height(&intel_pstate->base.src) >> 16;
+ }
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
@@ -8055,7 +8088,7 @@ static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
case GEN6_PCODE_TIMEOUT:
return -ETIMEDOUT;
default:
- MISSING_CASE(flags)
+ MISSING_CASE(flags);
return 0;
}
}
@@ -8355,6 +8388,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
u32 lower, upper, tmp;
+ int loop = 2;
/* The register accessed do not need forcewake. We borrow
* uncore lock to prevent concurrent access to range reg.
@@ -8383,7 +8417,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
I915_WRITE_FW(VLV_COUNTER_CONTROL,
_MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = I915_READ_FW(reg);
- } while (upper != tmp);
+ } while (upper != tmp && --loop);
/* Everywhere else we always use VLV_COUNTER_CONTROL with the
* VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index f7c6383ecd06..66a2b8b83972 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,13 +49,7 @@ static int __intel_ring_space(int head, int tail, int size)
void intel_ring_update_space(struct intel_ring *ring)
{
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
- }
-
- ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
- ring->tail, ring->size);
+ ring->space = __intel_ring_space(ring->head, ring->tail, ring->size);
}
static int
@@ -618,12 +612,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
}
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO) {
- struct intel_ring *ring = request->ring;
-
- ring->head = request->postfix;
- ring->last_retired_head = -1;
- }
+ if (request->fence.error == -EIO)
+ request->ring->head = request->postfix;
} else {
engine->legacy_active_context = NULL;
}
@@ -784,7 +774,7 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
- GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
+ assert_ring_tail_valid(request->ring, request->tail);
I915_WRITE_TAIL(request->engine, request->tail);
}
@@ -796,7 +786,7 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
*cs++ = MI_USER_INTERRUPT;
req->tail = intel_ring_offset(req, cs);
- GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
@@ -835,7 +825,7 @@ static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
*cs++ = MI_NOOP;
req->tail = intel_ring_offset(req, cs);
- GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1392,7 +1382,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
vma = intel_ring_create_vma(engine->i915, size);
@@ -1573,10 +1562,8 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
+ for_each_engine(engine, dev_priv, id)
engine->buffer->head = engine->buffer->tail;
- engine->buffer->last_retired_head = -1;
- }
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
@@ -2130,7 +2117,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
num_rings =
hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
- engine->emit_breadcrumb_sz += num_rings * 6;
+ engine->emit_breadcrumb_sz += num_rings * 8;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 847aea554464..a82a0807f64d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -149,16 +149,6 @@ struct intel_ring {
int space;
int size;
int effective_size;
-
- /** We track the position of the requests in the ring buffer, and
- * when each is retired we increment last_retired_head as the GPU
- * must have finished processing the request and so we know we
- * can advance the ringbuffer up to that position.
- *
- * last_retired_head is set to -1 after the value is consumed so
- * we can detect new retirements.
- */
- u32 last_retired_head;
};
struct i915_gem_context;
@@ -442,18 +432,10 @@ struct intel_engine_cs {
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
-static inline unsigned
+static inline unsigned int
intel_engine_flag(const struct intel_engine_cs *engine)
{
- return 1 << engine->id;
-}
-
-static inline void
-intel_flush_status_page(struct intel_engine_cs *engine, int reg)
-{
- mb();
- clflush(&engine->status_page.page_addr[reg]);
- mb();
+ return BIT(engine->id);
}
static inline u32
@@ -464,14 +446,22 @@ intel_read_status_page(struct intel_engine_cs *engine, int reg)
}
static inline void
-intel_write_status_page(struct intel_engine_cs *engine,
- int reg, u32 value)
+intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
{
- mb();
- clflush(&engine->status_page.page_addr[reg]);
- engine->status_page.page_addr[reg] = value;
- clflush(&engine->status_page.page_addr[reg]);
- mb();
+ /* Writing into the status page should be done sparingly. Since
+ * we do when we are uncertain of the device state, we take a bit
+ * of extra paranoia to try and ensure that the HWS takes the value
+ * we give and that it doesn't end up trapped inside the CPU!
+ */
+ if (static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ mb();
+ clflush(&engine->status_page.page_addr[reg]);
+ engine->status_page.page_addr[reg] = value;
+ clflush(&engine->status_page.page_addr[reg]);
+ mb();
+ } else {
+ WRITE_ONCE(engine->status_page.page_addr[reg], value);
+ }
}
/*
@@ -525,12 +515,29 @@ intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
}
static inline u32
-intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
+intel_ring_wrap(const struct intel_ring *ring, u32 pos)
+{
+ return pos & (ring->size - 1);
+}
+
+static inline u32
+intel_ring_offset(const struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
u32 offset = addr - req->ring->vaddr;
GEM_BUG_ON(offset > req->ring->size);
- return offset & (req->ring->size - 1);
+ return intel_ring_wrap(req->ring, offset);
+}
+
+static inline void
+assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail)
+{
+ /* We could combine these into a single tail operation, but keeping
+ * them as seperate tests will help identify the cause should one
+ * ever fire.
+ */
+ GEM_BUG_ON(!IS_ALIGNED(tail, 8));
+ GEM_BUG_ON(tail >= ring->size);
}
void intel_ring_update_space(struct intel_ring *ring);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 012bc358a33a..f8a375f8dde6 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2840,8 +2840,10 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct device *kdev = &pdev->dev;
+ int ret;
- pm_runtime_get_sync(kdev);
+ ret = pm_runtime_get_sync(kdev);
+ WARN_ONCE(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv);
@@ -2871,7 +2873,8 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
* function, since the power state is undefined. This applies
* atm to the late/early system suspend/resume handlers.
*/
- WARN_ON_ONCE(ret < 0);
+ WARN_ONCE(ret < 0,
+ "pm_runtime_get_if_in_use() failed: %d\n", ret);
if (ret <= 0)
return false;
}
@@ -2955,8 +2958,11 @@ void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
* platforms without RPM support.
*/
if (!HAS_RUNTIME_PM(dev_priv)) {
+ int ret;
+
pm_runtime_dont_use_autosuspend(kdev);
- pm_runtime_get_sync(kdev);
+ ret = pm_runtime_get_sync(kdev);
+ WARN(ret < 0, "pm_runtime_get_sync() failed: %d\n", ret);
} else {
pm_runtime_use_autosuspend(kdev);
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b931d0bd7a64..f7d431427115 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -217,7 +217,7 @@ skl_update_plane(struct drm_plane *drm_plane,
struct drm_framebuffer *fb = plane_state->base.fb;
enum plane_id plane_id = intel_plane->id;
enum pipe pipe = intel_plane->pipe;
- u32 plane_ctl;
+ u32 plane_ctl = plane_state->ctl;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
u32 surf_addr = plane_state->main.offset;
unsigned int rotation = plane_state->base.rotation;
@@ -232,24 +232,6 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE;
-
- if (!IS_GEMINILAKE(dev_priv)) {
- plane_ctl |=
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE |
- PLANE_CTL_PLANE_GAMMA_DISABLE;
- }
-
- plane_ctl |= skl_plane_ctl_format(fb->format->format);
- plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= skl_plane_ctl_rotation(rotation);
-
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
-
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -361,32 +343,15 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
-static void
-vlv_update_plane(struct drm_plane *dplane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = dplane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(dplane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- enum plane_id plane_id = intel_plane->id;
- u32 sprctl;
- u32 sprsurf_offset, linear_offset;
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 sprctl;
- sprctl = SP_ENABLE;
+ sprctl = SP_ENABLE | SP_GAMMA_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_YUYV:
@@ -423,20 +388,10 @@ vlv_update_plane(struct drm_plane *dplane,
sprctl |= SP_FORMAT_RGBA8888;
break;
default:
- /*
- * If we get here one of the upper layers failed to filter
- * out the unsupported plane formats
- */
- BUG();
- break;
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- sprctl |= SP_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SP_TILED;
@@ -449,22 +404,36 @@ vlv_update_plane(struct drm_plane *dplane,
if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SP_SOURCE_KEY;
+ return sprctl;
+}
+
+static void
+vlv_update_plane(struct drm_plane *dplane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = dplane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane_id = intel_plane->id;
+ u32 sprctl = plane_state->ctl;
+ u32 sprsurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
- src_w--;
- src_h--;
crtc_w--;
crtc_h--;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- if (rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- } else if (rotation & DRM_REFLECT_X) {
- x += src_w;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -516,31 +485,23 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
-ivb_update_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- enum pipe pipe = intel_plane->pipe;
- u32 sprctl, sprscale = 0;
- u32 sprsurf_offset, linear_offset;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 sprctl;
+
+ sprctl = SPRITE_ENABLE | SPRITE_GAMMA_ENABLE;
+
+ if (IS_IVYBRIDGE(dev_priv))
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
- sprctl = SPRITE_ENABLE;
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ sprctl |= SPRITE_PIPE_CSC_ENABLE;
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
@@ -562,34 +523,48 @@ ivb_update_plane(struct drm_plane *plane,
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- sprctl |= SPRITE_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
sprctl |= SPRITE_TILED;
if (rotation & DRM_ROTATE_180)
sprctl |= SPRITE_ROTATE_180;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE;
- else
- sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
-
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- sprctl |= SPRITE_PIPE_CSC_ENABLE;
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
sprctl |= SPRITE_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
sprctl |= SPRITE_SOURCE_KEY;
+ return sprctl;
+}
+
+static void
+ivb_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ enum pipe pipe = intel_plane->pipe;
+ u32 sprctl = plane_state->ctl, sprscale = 0;
+ u32 sprsurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -599,16 +574,6 @@ ivb_update_plane(struct drm_plane *plane,
if (crtc_w != src_w || crtc_h != src_h)
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- sprsurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- /* HSW+ does this automagically in hardware */
- if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv) &&
- rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -664,31 +629,20 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-static void
-ilk_update_plane(struct drm_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+static u32 ilk_sprite_ctl(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_framebuffer *fb = plane_state->base.fb;
- int pipe = intel_plane->pipe;
- u32 dvscntr, dvsscale;
- u32 dvssurf_offset, linear_offset;
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
- int crtc_x = plane_state->base.dst.x1;
- int crtc_y = plane_state->base.dst.y1;
- uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
- uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
- uint32_t x = plane_state->base.src.x1 >> 16;
- uint32_t y = plane_state->base.src.y1 >> 16;
- uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
- uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
- unsigned long irqflags;
+ u32 dvscntr;
- dvscntr = DVS_ENABLE;
+ dvscntr = DVS_ENABLE | DVS_GAMMA_ENABLE;
+
+ if (IS_GEN6(dev_priv))
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
switch (fb->format->format) {
case DRM_FORMAT_XBGR8888:
@@ -710,47 +664,57 @@ ilk_update_plane(struct drm_plane *plane,
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
break;
default:
- BUG();
+ MISSING_CASE(fb->format->format);
+ return 0;
}
- /*
- * Enable gamma to match primary/cursor plane behaviour.
- * FIXME should be user controllable via propertiesa.
- */
- dvscntr |= DVS_GAMMA_ENABLE;
-
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
dvscntr |= DVS_TILED;
if (rotation & DRM_ROTATE_180)
dvscntr |= DVS_ROTATE_180;
- if (IS_GEN6(dev_priv))
- dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
dvscntr |= DVS_DEST_KEY;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
dvscntr |= DVS_SOURCE_KEY;
+ return dvscntr;
+}
+
+static void
+ilk_update_plane(struct drm_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct drm_framebuffer *fb = plane_state->base.fb;
+ int pipe = intel_plane->pipe;
+ u32 dvscntr = plane_state->ctl, dvsscale = 0;
+ u32 dvssurf_offset = plane_state->main.offset;
+ u32 linear_offset;
+ const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
+ int crtc_x = plane_state->base.dst.x1;
+ int crtc_y = plane_state->base.dst.y1;
+ uint32_t crtc_w = drm_rect_width(&plane_state->base.dst);
+ uint32_t crtc_h = drm_rect_height(&plane_state->base.dst);
+ uint32_t x = plane_state->main.x;
+ uint32_t y = plane_state->main.y;
+ uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
+ uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- dvsscale = 0;
if (crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
- intel_add_fb_offsets(&x, &y, plane_state, 0);
- dvssurf_offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- if (rotation & DRM_ROTATE_180) {
- x += src_w;
- y += src_h;
- }
-
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
@@ -981,6 +945,26 @@ intel_check_sprite_plane(struct drm_plane *plane,
ret = skl_check_plane_surface(state);
if (ret)
return ret;
+
+ state->ctl = skl_plane_ctl(crtc_state, state);
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = vlv_sprite_ctl(crtc_state, state);
+ } else if (INTEL_GEN(dev_priv) >= 7) {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = ivb_sprite_ctl(crtc_state, state);
+ } else {
+ ret = i9xx_check_plane_surface(state);
+ if (ret)
+ return ret;
+
+ state->ctl = ilk_sprite_ctl(crtc_state, state);
}
return 0;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index d15a7d9d4eb0..c117424f1f50 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -26,6 +26,19 @@
#include "intel_uc.h"
#include <linux/firmware.h>
+/* Cleans up uC firmware by releasing the firmware GEM obj.
+ */
+static void __intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = fetch_and_zero(&uc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
+}
+
/* Reset GuC providing us with fresh state for both GuC and HuC.
*/
static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
@@ -83,23 +96,166 @@ void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
- mutex_init(&dev_priv->guc.send_mutex);
+ struct intel_guc *guc = &dev_priv->guc;
+
+ mutex_init(&guc->send_mutex);
+ guc->send = intel_guc_send_mmio;
+}
+
+static void fetch_uc_fw(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ if (!uc_fw->path)
+ return;
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+ if (err)
+ goto fail;
+ if (!fw)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+ uc_fw->path, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_NOTE("Firmware header is missing\n");
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Firmware bits always start from header */
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
+
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+ DRM_NOTE("CSS header definition mismatch\n");
+ goto fail;
+ }
+
+ /* then, uCode */
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ DRM_NOTE("RSA key size is bad\n");
+ goto fail;
+ }
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_NOTE("Missing firmware components\n");
+ goto fail;
+ }
+
+ /*
+ * The GuC firmware image has the version number embedded at a
+ * well-known offset within the firmware blob; note that major / minor
+ * version are TWO bytes each (i.e. u16), although all pointers and
+ * offsets are defined in terms of bytes (u8).
+ */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ /* Header and uCode will be loaded to WOPCM. Size of the two. */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+
+ /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ goto fail;
+ }
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("Skipping %s firmware version check\n",
+ intel_uc_fw_type_repr(uc_fw->type));
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("%s firmware version %d.%d, required %d.%d\n",
+ intel_uc_fw_type_repr(uc_fw->type),
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+
+ DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+ uc_fw->obj);
+
+ release_firmware(fw);
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+ return;
+
+fail:
+ DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+ uc_fw->path, err);
+ DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+ err, fw, uc_fw->obj);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
void intel_uc_init_fw(struct drm_i915_private *dev_priv)
{
- if (dev_priv->huc.fw.path)
- intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw);
+ fetch_uc_fw(dev_priv, &dev_priv->huc.fw);
+ fetch_uc_fw(dev_priv, &dev_priv->guc.fw);
+}
- if (dev_priv->guc.fw.path)
- intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw);
+void intel_uc_fini_fw(struct drm_i915_private *dev_priv)
+{
+ __intel_uc_fw_fini(&dev_priv->guc.fw);
+ __intel_uc_fw_fini(&dev_priv->huc.fw);
}
int intel_uc_init_hw(struct drm_i915_private *dev_priv)
{
int ret, attempts;
- /* GuC not enabled, nothing to do */
if (!i915.enable_guc_loading)
return 0;
@@ -109,9 +265,13 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
i915_ggtt_enable_guc(dev_priv);
if (i915.enable_guc_submission) {
+ /*
+ * This is stuff we need to have available at fw load time
+ * if we are planning to enable submission later
+ */
ret = i915_guc_submission_init(dev_priv);
if (ret)
- goto err;
+ goto err_guc;
}
/* WaEnableuKernelHeaderValidFix:skl */
@@ -150,7 +310,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
ret = i915_guc_submission_enable(dev_priv);
if (ret)
- goto err_submission;
+ goto err_interrupts;
}
return 0;
@@ -164,11 +324,12 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv)
* nonfatal error (i.e. it doesn't prevent driver load, but
* marks the GPU as wedged until reset).
*/
+err_interrupts:
+ gen9_disable_guc_interrupts(dev_priv);
err_submission:
if (i915.enable_guc_submission)
i915_guc_submission_fini(dev_priv);
-
-err:
+err_guc:
i915_ggtt_disable_guc(dev_priv);
DRM_ERROR("GuC init failed\n");
@@ -185,11 +346,24 @@ err:
return ret;
}
+void intel_uc_fini_hw(struct drm_i915_private *dev_priv)
+{
+ if (!i915.enable_guc_loading)
+ return;
+
+ if (i915.enable_guc_submission) {
+ i915_guc_submission_disable(dev_priv);
+ gen9_disable_guc_interrupts(dev_priv);
+ i915_guc_submission_fini(dev_priv);
+ }
+ i915_ggtt_disable_guc(dev_priv);
+}
+
/*
* Read GuC command/status register (SOFT_SCRATCH_0)
* Return true if it contains a response rather than a command
*/
-static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
+static bool guc_recv(struct intel_guc *guc, u32 *status)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -198,7 +372,10 @@ static bool intel_guc_recv(struct intel_guc *guc, u32 *status)
return INTEL_GUC_RECV_IS_RESPONSE(val);
}
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+/*
+ * This function implements the MMIO based host to GuC interface.
+ */
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
u32 status;
@@ -209,7 +386,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
return -EINVAL;
mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
dev_priv->guc.action_count += 1;
dev_priv->guc.action_cmd = action[0];
@@ -226,9 +403,9 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
* up to that length of time, then switch to a slower sleep-wait loop.
* No inte_guc_send command should ever take longer than 10ms.
*/
- ret = wait_for_us(intel_guc_recv(guc, &status), 10);
+ ret = wait_for_us(guc_recv(guc, &status), 10);
if (ret)
- ret = wait_for(intel_guc_recv(guc, &status), 10);
+ ret = wait_for(guc_recv(guc, &status), 10);
if (status != INTEL_GUC_STATUS_SUCCESS) {
/*
* Either the GuC explicitly returned an error (which
@@ -247,7 +424,7 @@ int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
}
dev_priv->guc.action_status = status;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
mutex_unlock(&guc->send_mutex);
return ret;
@@ -268,136 +445,3 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
-
-void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err)
- goto fail;
- if (!fw)
- goto fail;
-
- DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
- uc_fw->path, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_NOTE("Firmware header is missing\n");
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_NOTE("CSS header definition mismatch\n");
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_NOTE("RSA key size is bad\n");
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_NOTE("Missing firmware components\n");
- goto fail;
- }
-
- /*
- * The GuC firmware image has the version number embedded at a
- * well-known offset within the firmware blob; note that major / minor
- * version are TWO bytes each (i.e. u16), although all pointers and
- * offsets are defined in terms of bytes (u8).
- */
- switch (uc_fw->type) {
- case INTEL_UC_FW_TYPE_GUC:
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = uc_fw->header_size + uc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
- break;
-
- default:
- DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
- err = -ENOEXEC;
- goto fail;
- }
-
- if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
- DRM_NOTE("Skipping uC firmware version check\n");
- } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
-
- DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
- uc_fw->obj);
-
- release_firmware(fw);
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- return;
-
-fail:
- DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
- uc_fw->path, err);
- DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, uc_fw->obj);
-
- release_firmware(fw); /* OK even if fw is NULL */
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
-}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index a35ededfaa40..4b7f73aeddac 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -34,7 +34,9 @@ struct drm_i915_gem_request;
/*
* This structure primarily describes the GEM object shared with the GuC.
- * The GEM object is held for the entire lifetime of our interaction with
+ * The specs sometimes refer to this object as a "GuC context", but we use
+ * the term "client" to avoid confusion with hardware contexts. This
+ * GEM object is held for the entire lifetime of our interaction with
* the GuC, being allocated before the GuC is loaded with its firmware.
* Because there's no way to update the address used by the GuC after
* initialisation, the shared object must stay pinned into the GGTT as
@@ -44,7 +46,7 @@ struct drm_i915_gem_request;
*
* The single GEM object described here is actually made up of several
* separate areas, as far as the GuC is concerned. The first page (kept
- * kmap'd) includes the "process decriptor" which holds sequence data for
+ * kmap'd) includes the "process descriptor" which holds sequence data for
* the doorbell, and one cacheline which actually *is* the doorbell; a
* write to this will "ring the doorbell" (i.e. send an interrupt to the
* GuC). The subsequent pages of the client object constitute the work
@@ -72,13 +74,12 @@ struct i915_guc_client {
uint32_t engines; /* bitmap of (host) engine ids */
uint32_t priority;
- uint32_t ctx_index;
+ u32 stage_id;
uint32_t proc_desc_offset;
- uint32_t doorbell_offset;
- uint32_t doorbell_cookie;
- uint16_t doorbell_id;
- uint16_t padding[3]; /* Maintain alignment */
+ u16 doorbell_id;
+ unsigned long doorbell_offset;
+ u32 doorbell_cookie;
spinlock_t wq_lock;
uint32_t wq_offset;
@@ -100,11 +101,40 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_SUCCESS
};
+/* User-friendly representation of an enum */
+static inline
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
+{
+ switch (status) {
+ case INTEL_UC_FIRMWARE_FAIL:
+ return "FAIL";
+ case INTEL_UC_FIRMWARE_NONE:
+ return "NONE";
+ case INTEL_UC_FIRMWARE_PENDING:
+ return "PENDING";
+ case INTEL_UC_FIRMWARE_SUCCESS:
+ return "SUCCESS";
+ }
+ return "<invalid>";
+}
+
enum intel_uc_fw_type {
INTEL_UC_FW_TYPE_GUC,
INTEL_UC_FW_TYPE_HUC
};
+/* User-friendly representation of an enum */
+static inline const char *intel_uc_fw_type_repr(enum intel_uc_fw_type type)
+{
+ switch (type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ return "GuC";
+ case INTEL_UC_FW_TYPE_HUC:
+ return "HuC";
+ }
+ return "uC";
+}
+
/*
* This structure encapsulates all the data needed during the process
* of fetching, caching, and loading the firmware image into the GuC.
@@ -133,11 +163,13 @@ struct intel_uc_fw {
struct intel_guc_log {
uint32_t flags;
struct i915_vma *vma;
- void *buf_addr;
- struct workqueue_struct *flush_wq;
- struct work_struct flush_work;
- struct rchan *relay_chan;
-
+ /* The runtime stuff gets created only when GuC logging gets enabled */
+ struct {
+ void *buf_addr;
+ struct workqueue_struct *flush_wq;
+ struct work_struct flush_work;
+ struct rchan *relay_chan;
+ } runtime;
/* logging related stats */
u32 capture_miss_count;
u32 flush_interrupt_count;
@@ -154,12 +186,13 @@ struct intel_guc {
bool interrupts_enabled;
struct i915_vma *ads_vma;
- struct i915_vma *ctx_pool_vma;
- struct ida ctx_ids;
+ struct i915_vma *stage_desc_pool;
+ void *stage_desc_pool_vaddr;
+ struct ida stage_ids;
struct i915_guc_client *execbuf_client;
- DECLARE_BITMAP(doorbell_bitmap, GUC_MAX_DOORBELLS);
+ DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
uint32_t db_cacheline; /* Cyclic counter mod pagesize */
/* Action status & statistics */
@@ -174,6 +207,9 @@ struct intel_guc {
/* To serialize the intel_guc_send actions */
struct mutex send_mutex;
+
+ /* GuC's FW specific send function */
+ int (*send)(struct intel_guc *guc, const u32 *data, u32 len);
};
struct intel_huc {
@@ -187,17 +223,19 @@ struct intel_huc {
void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
void intel_uc_init_fw(struct drm_i915_private *dev_priv);
+void intel_uc_fini_fw(struct drm_i915_private *dev_priv);
int intel_uc_init_hw(struct drm_i915_private *dev_priv);
-void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
-int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
+void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_guc_sample_forcewake(struct intel_guc *guc);
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len);
+static inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
+{
+ return guc->send(guc, action, len);
+}
/* intel_guc_loader.c */
int intel_guc_select_fw(struct intel_guc *guc);
int intel_guc_init_hw(struct intel_guc *guc);
-void intel_guc_fini(struct drm_i915_private *dev_priv);
-const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
int intel_guc_suspend(struct drm_i915_private *dev_priv);
int intel_guc_resume(struct drm_i915_private *dev_priv);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
@@ -212,10 +250,11 @@ void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
/* intel_guc_log.c */
-void intel_guc_log_create(struct intel_guc *guc);
+int intel_guc_log_create(struct intel_guc *guc);
+void intel_guc_log_destroy(struct intel_guc *guc);
+int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
void i915_guc_log_register(struct drm_i915_private *dev_priv);
void i915_guc_log_unregister(struct drm_i915_private *dev_priv);
-int i915_guc_log_control(struct drm_i915_private *dev_priv, u64 control_val);
static inline u32 guc_ggtt_offset(struct i915_vma *vma)
{
@@ -227,7 +266,6 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
/* intel_huc.c */
void intel_huc_select_fw(struct intel_huc *huc);
-void intel_huc_fini(struct drm_i915_private *dev_priv);
int intel_huc_init_hw(struct intel_huc *huc);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 09f5f02d7901..6d1ea26b2493 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -52,10 +52,10 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
}
static inline void
-fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- WARN_ON(!i915_mmio_reg_valid(d->reg_set));
- __raw_i915_write32(d->i915, d->reg_set, d->val_reset);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
}
static inline void
@@ -69,9 +69,10 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
}
static inline void
-fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
FORCEWAKE_KERNEL) == 0,
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
@@ -79,15 +80,17 @@ fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
}
static inline void
-fw_domain_get(const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(d->i915, d->reg_set, d->val_set);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
}
static inline void
-fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- if (wait_for_atomic((__raw_i915_read32(d->i915, d->reg_ack) &
+ if (wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) &
FORCEWAKE_KERNEL),
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
@@ -95,72 +98,59 @@ fw_domain_wait_ack(const struct intel_uncore_forcewake_domain *d)
}
static inline void
-fw_domain_put(const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct drm_i915_private *i915,
+ const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(d->i915, d->reg_set, d->val_clear);
-}
-
-static inline void
-fw_domain_posting_read(const struct intel_uncore_forcewake_domain *d)
-{
- /* something from same cacheline, but not from the set register */
- if (i915_mmio_reg_valid(d->reg_post))
- __raw_posting_read(d->i915, d->reg_post);
+ __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
}
static void
-fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- for_each_fw_domain_masked(d, fw_domains, dev_priv) {
- fw_domain_wait_ack_clear(d);
- fw_domain_get(d);
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
+ fw_domain_wait_ack_clear(i915, d);
+ fw_domain_get(i915, d);
}
- for_each_fw_domain_masked(d, fw_domains, dev_priv)
- fw_domain_wait_ack(d);
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_wait_ack(i915, d);
- dev_priv->uncore.fw_domains_active |= fw_domains;
+ i915->uncore.fw_domains_active |= fw_domains;
}
static void
-fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- for_each_fw_domain_masked(d, fw_domains, dev_priv) {
- fw_domain_put(d);
- fw_domain_posting_read(d);
- }
-
- dev_priv->uncore.fw_domains_active &= ~fw_domains;
-}
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
-static void
-fw_domains_posting_read(struct drm_i915_private *dev_priv)
-{
- struct intel_uncore_forcewake_domain *d;
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_put(i915, d);
- /* No need to do for all, just do for first found */
- for_each_fw_domain(d, dev_priv) {
- fw_domain_posting_read(d);
- break;
- }
+ i915->uncore.fw_domains_active &= ~fw_domains;
}
static void
-fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains)
+fw_domains_reset(struct drm_i915_private *i915,
+ enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
+ unsigned int tmp;
- if (dev_priv->uncore.fw_domains == 0)
+ if (!fw_domains)
return;
- for_each_fw_domain_masked(d, fw_domains, dev_priv)
- fw_domain_reset(d);
+ GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
- fw_domains_posting_read(dev_priv);
+ for_each_fw_domain_masked(d, fw_domains, i915, tmp)
+ fw_domain_reset(i915, d);
}
static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
@@ -236,7 +226,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct drm_i915_private *dev_priv = domain->i915;
+ struct drm_i915_private *dev_priv =
+ container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
unsigned long irqflags;
assert_rpm_device_not_suspended(dev_priv);
@@ -266,9 +257,11 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
* timers are run before holding.
*/
while (1) {
+ unsigned int tmp;
+
active_domains = 0;
- for_each_fw_domain(domain, dev_priv) {
+ for_each_fw_domain(domain, dev_priv, tmp) {
if (hrtimer_cancel(&domain->timer) == 0)
continue;
@@ -277,7 +270,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- for_each_fw_domain(domain, dev_priv) {
+ for_each_fw_domain(domain, dev_priv, tmp) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
@@ -300,7 +293,7 @@ static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
if (fw)
dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
- fw_domains_reset(dev_priv, FORCEWAKE_ALL);
+ fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
if (restore) { /* If reset with a user forcewake, try to restore */
if (fw)
@@ -457,13 +450,13 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
if (domain->wake_count++)
fw_domains &= ~domain->mask;
- }
if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -520,10 +513,11 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
fw_domains &= dev_priv->uncore.fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv) {
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
if (WARN_ON(domain->wake_count == 0))
continue;
@@ -928,8 +922,11 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
+ unsigned int tmp;
+
+ GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
- for_each_fw_domain_masked(domain, fw_domains, dev_priv)
+ for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
@@ -1141,41 +1138,27 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
WARN_ON(d->wake_count);
+ WARN_ON(!i915_mmio_reg_valid(reg_set));
+ WARN_ON(!i915_mmio_reg_valid(reg_ack));
+
d->wake_count = 0;
d->reg_set = reg_set;
d->reg_ack = reg_ack;
- if (IS_GEN6(dev_priv)) {
- d->val_reset = 0;
- d->val_set = FORCEWAKE_KERNEL;
- d->val_clear = 0;
- } else {
- /* WaRsClearFWBitsAtReset:bdw,skl */
- d->val_reset = _MASKED_BIT_DISABLE(0xffff);
- d->val_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
- d->val_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
- }
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- d->reg_post = FORCEWAKE_ACK_VLV;
- else if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv) || IS_GEN8(dev_priv))
- d->reg_post = ECOBUS;
-
- d->i915 = dev_priv;
d->id = domain_id;
BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER));
BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER));
BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA));
- d->mask = 1 << domain_id;
+ d->mask = BIT(domain_id);
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
- dev_priv->uncore.fw_domains |= (1 << domain_id);
+ dev_priv->uncore.fw_domains |= BIT(domain_id);
- fw_domain_reset(d);
+ fw_domain_reset(dev_priv, d);
}
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
@@ -1183,6 +1166,17 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
return;
+ if (IS_GEN6(dev_priv)) {
+ dev_priv->uncore.fw_reset = 0;
+ dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
+ dev_priv->uncore.fw_clear = 0;
+ } else {
+ /* WaRsClearFWBitsAtReset:bdw,skl */
+ dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
+ dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
+ dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
+ }
+
if (IS_GEN9(dev_priv)) {
dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
@@ -1246,9 +1240,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
spin_lock_irq(&dev_priv->uncore.lock);
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
+ fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
+ fw_domains_put_with_fifo(dev_priv, FORCEWAKE_RENDER);
spin_unlock_irq(&dev_priv->uncore.lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index 926b24c117d6..98b7aac41eec 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -291,8 +291,6 @@ static int begin_live_test(struct live_test *t,
return err;
}
- i915_gem_retire_requests(i915);
-
i915->gpu_error.missed_irq_rings = 0;
t->reset_count = i915_reset_count(&i915->gpu_error);
@@ -303,7 +301,9 @@ static int end_live_test(struct live_test *t)
{
struct drm_i915_private *i915 = t->i915;
- if (wait_for(intel_engines_are_idle(i915), 1)) {
+ i915_gem_retire_requests(i915);
+
+ if (wait_for(intel_engines_are_idle(i915), 10)) {
pr_err("%s(%s): GPU not idle\n", t->func, t->name);
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 6ec7c731a267..aa31d6c0cdfb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -235,7 +235,6 @@ static void hang_fini(struct hang *h)
i915_gem_object_put(h->hws);
i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
- i915_gem_retire_requests(h->i915);
}
static int igt_hang_sanitycheck(void *arg)
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
index 99da8f4ef497..302f7d103635 100644
--- a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -129,10 +129,10 @@ static const struct dma_buf_ops mock_dmabuf_ops = {
.map_dma_buf = mock_map_dma_buf,
.unmap_dma_buf = mock_unmap_dma_buf,
.release = mock_dmabuf_release,
- .kmap = mock_dmabuf_kmap,
- .kmap_atomic = mock_dmabuf_kmap_atomic,
- .kunmap = mock_dmabuf_kunmap,
- .kunmap_atomic = mock_dmabuf_kunmap_atomic,
+ .map = mock_dmabuf_kmap,
+ .map_atomic = mock_dmabuf_kmap_atomic,
+ .unmap = mock_dmabuf_kunmap,
+ .unmap_atomic = mock_dmabuf_kunmap_atomic,
.mmap = mock_dmabuf_mmap,
.vmap = mock_dmabuf_vmap,
.vunmap = mock_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
index 113dec05c7dc..09c704153456 100644
--- a/drivers/gpu/drm/i915/selftests/mock_drm.c
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.c
@@ -24,31 +24,50 @@
#include "mock_drm.h"
-static inline struct inode fake_inode(struct drm_i915_private *i915)
-{
- return (struct inode){ .i_rdev = i915->drm.primary->index };
-}
-
struct drm_file *mock_file(struct drm_i915_private *i915)
{
- struct inode inode = fake_inode(i915);
- struct file filp = {};
+ struct file *filp;
+ struct inode *inode;
struct drm_file *file;
int err;
- err = drm_open(&inode, &filp);
- if (unlikely(err))
- return ERR_PTR(err);
+ inode = kzalloc(sizeof(*inode), GFP_KERNEL);
+ if (!inode) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ inode->i_rdev = i915->drm.primary->index;
- file = filp.private_data;
+ filp = kzalloc(sizeof(*filp), GFP_KERNEL);
+ if (!filp) {
+ err = -ENOMEM;
+ goto err_inode;
+ }
+
+ err = drm_open(inode, filp);
+ if (err)
+ goto err_filp;
+
+ file = filp->private_data;
+ memset(&file->filp, POISON_INUSE, sizeof(file->filp));
file->authenticated = true;
+
+ kfree(filp);
+ kfree(inode);
return file;
+
+err_filp:
+ kfree(filp);
+err_inode:
+ kfree(inode);
+err:
+ return ERR_PTR(err);
}
void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
{
- struct inode inode = fake_inode(i915);
struct file filp = { .private_data = file };
- drm_release(&inode, &filp);
+ drm_release(NULL, &filp);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 8d5ba037064c..0ad624a1db90 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -118,7 +118,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
INIT_LIST_HEAD(&ring->request_list);
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
return ring;
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0e8d2e7f8c70..8097e3693ec4 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -35,7 +35,7 @@ mock_request(struct intel_engine_cs *engine,
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = i915_gem_request_alloc(engine, context);
- if (!request)
+ if (IS_ERR(request))
return NULL;
mock = container_of(request, typeof(*mock), base);
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
index eb2cda8e2b9f..1cc5d2931753 100644
--- a/drivers/gpu/drm/i915/selftests/scatterlist.c
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -189,6 +189,13 @@ static unsigned int random(unsigned long n,
return 1 + (prandom_u32_state(rnd) % 1024);
}
+static inline bool page_contiguous(struct page *first,
+ struct page *last,
+ unsigned long npages)
+{
+ return first + npages == last;
+}
+
static int alloc_table(struct pfn_table *pt,
unsigned long count, unsigned long max,
npages_fn_t npages_fn,
@@ -216,7 +223,9 @@ static int alloc_table(struct pfn_table *pt,
unsigned long npages = npages_fn(n, count, rnd);
/* Nobody expects the Sparse Memmap! */
- if (pfn_to_page(pfn + npages) != pfn_to_page(pfn) + npages) {
+ if (!page_contiguous(pfn_to_page(pfn),
+ pfn_to_page(pfn + npages),
+ npages)) {
sg_free_table(&pt->st);
return -ENOSPC;
}
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index f2c9ae822149..c9e439c82241 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -31,13 +31,6 @@ config DRM_IMX_LDB
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
-config DRM_IMX_IPUV3
- tristate
- depends on DRM_IMX
- depends on IMX_IPUV3_CORE
- default y if DRM_IMX=y
- default m if DRM_IMX=m
-
config DRM_IMX_HDMI
tristate "Freescale i.MX DRM HDMI"
select DRM_DW_HDMI
diff --git a/drivers/gpu/drm/imx/Makefile b/drivers/gpu/drm/imx/Makefile
index f3ecd8903d97..16ecef33e008 100644
--- a/drivers/gpu/drm/imx/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
@@ -1,5 +1,5 @@
-imxdrm-objs := imx-drm-core.o
+imxdrm-objs := imx-drm-core.o ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX) += imxdrm.o
@@ -7,6 +7,5 @@ obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o
obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o
obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o
-imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o
obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o
obj-$(CONFIG_DRM_IMX_HDMI) += dw_hdmi-imx.o
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1888bf3920fc..50add2f9e250 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -422,7 +422,23 @@ static struct platform_driver imx_drm_pdrv = {
.of_match_table = imx_drm_dt_ids,
},
};
-module_platform_driver(imx_drm_pdrv);
+
+static struct platform_driver * const drivers[] = {
+ &imx_drm_pdrv,
+ &ipu_drm_driver,
+};
+
+static int __init imx_drm_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(imx_drm_init);
+
+static void __exit imx_drm_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(imx_drm_exit);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION("i.MX drm driver core");
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 295434b199db..f6dd64be9cd5 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -29,6 +29,8 @@ int imx_drm_init_drm(struct platform_device *pdev,
int preferred_bpp);
int imx_drm_exit_drm(void);
+extern struct platform_driver ipu_drm_driver;
+
void imx_drm_mode_config_init(struct drm_device *drm);
struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dab9d50ffd8c..5456c15d962c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -465,16 +465,10 @@ static int ipu_drm_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver ipu_drm_driver = {
+struct platform_driver ipu_drm_driver = {
.driver = {
.name = "imx-ipuv3-crtc",
},
.probe = ipu_drm_probe,
.remove = ipu_drm_remove,
};
-module_platform_driver(ipu_drm_driver);
-
-MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-crtc");
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index c70310206ac5..a14d7d64d7b1 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -35,18 +35,28 @@
#define DISP_REG_OVL_PITCH(n) (0x0044 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_CTRL(n) (0x00c0 + 0x20 * (n))
#define DISP_REG_OVL_RDMA_GMC(n) (0x00c8 + 0x20 * (n))
-#define DISP_REG_OVL_ADDR(n) (0x0f40 + 0x20 * (n))
+#define DISP_REG_OVL_ADDR_MT2701 0x0040
+#define DISP_REG_OVL_ADDR_MT8173 0x0f40
+#define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n))
#define OVL_RDMA_MEM_GMC 0x40402020
#define OVL_CON_BYTE_SWAP BIT(24)
-#define OVL_CON_CLRFMT_RGB565 (0 << 12)
-#define OVL_CON_CLRFMT_RGB888 (1 << 12)
+#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (2 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (3 << 12)
+#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
+ 0 : OVL_CON_CLRFMT_RGB)
+#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
+ OVL_CON_CLRFMT_RGB : 0)
#define OVL_CON_AEN BIT(8)
#define OVL_CON_ALPHA 0xff
+struct mtk_disp_ovl_data {
+ unsigned int addr;
+ bool fmt_rgb565_is_0;
+};
+
/**
* struct mtk_disp_ovl - DISP_OVL driver structure
* @ddp_comp - structure containing type enum and hardware resources
@@ -55,8 +65,14 @@
struct mtk_disp_ovl {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ const struct mtk_disp_ovl_data *data;
};
+static inline struct mtk_disp_ovl *comp_to_ovl(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_ovl, ddp_comp);
+}
+
static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_ovl *priv = dev_id;
@@ -76,20 +92,18 @@ static irqreturn_t mtk_disp_ovl_irq_handler(int irq, void *dev_id)
static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp,
struct drm_crtc *crtc)
{
- struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
- ddp_comp);
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- priv->crtc = crtc;
+ ovl->crtc = crtc;
writel(0x0, comp->regs + DISP_REG_OVL_INTSTA);
writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN);
}
static void mtk_ovl_disable_vblank(struct mtk_ddp_comp *comp)
{
- struct mtk_disp_ovl *priv = container_of(comp, struct mtk_disp_ovl,
- ddp_comp);
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
- priv->crtc = NULL;
+ ovl->crtc = NULL;
writel_relaxed(0x0, comp->regs + DISP_REG_OVL_INTEN);
}
@@ -138,18 +152,18 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
writel(0x0, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(unsigned int fmt)
+static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
- return OVL_CON_CLRFMT_RGB565;
+ return OVL_CON_CLRFMT_RGB565(ovl);
case DRM_FORMAT_BGR565:
- return OVL_CON_CLRFMT_RGB565 | OVL_CON_BYTE_SWAP;
+ return OVL_CON_CLRFMT_RGB565(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGB888:
- return OVL_CON_CLRFMT_RGB888;
+ return OVL_CON_CLRFMT_RGB888(ovl);
case DRM_FORMAT_BGR888:
- return OVL_CON_CLRFMT_RGB888 | OVL_CON_BYTE_SWAP;
+ return OVL_CON_CLRFMT_RGB888(ovl) | OVL_CON_BYTE_SWAP;
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_RGBA8888:
return OVL_CON_CLRFMT_ARGB8888;
@@ -168,6 +182,7 @@ static unsigned int ovl_fmt_convert(unsigned int fmt)
static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
struct mtk_plane_state *state)
{
+ struct mtk_disp_ovl *ovl = comp_to_ovl(comp);
struct mtk_plane_pending_state *pending = &state->pending;
unsigned int addr = pending->addr;
unsigned int pitch = pending->pitch & 0xffff;
@@ -179,7 +194,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
if (!pending->enable)
mtk_ovl_layer_off(comp, idx);
- con = ovl_fmt_convert(fmt);
+ con = ovl_fmt_convert(ovl, fmt);
if (idx != 0)
con |= OVL_CON_AEN | OVL_CON_ALPHA;
@@ -187,7 +202,7 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx));
writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx));
writel_relaxed(offset, comp->regs + DISP_REG_OVL_OFFSET(idx));
- writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(idx));
+ writel_relaxed(addr, comp->regs + DISP_REG_OVL_ADDR(ovl, idx));
if (pending->enable)
mtk_ovl_layer_on(comp, idx);
@@ -264,6 +279,8 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev)
return ret;
}
+ priv->data = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, priv);
ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
@@ -287,8 +304,21 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT2701,
+ .fmt_rgb565_is_0 = false,
+};
+
+static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = {
+ .addr = DISP_REG_OVL_ADDR_MT8173,
+ .fmt_rgb565_is_0 = true,
+};
+
static const struct of_device_id mtk_disp_ovl_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-ovl", },
+ { .compatible = "mediatek,mt2701-disp-ovl",
+ .data = &mt2701_ovl_driver_data},
+ { .compatible = "mediatek,mt8173-disp-ovl",
+ .data = &mt8173_ovl_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_ovl_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 0df05f95b916..b68a51376f83 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -38,6 +38,11 @@
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
+#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+
+struct mtk_disp_rdma_data {
+ unsigned int fifo_size;
+};
/**
* struct mtk_disp_rdma - DISP_RDMA driver structure
@@ -47,8 +52,14 @@
struct mtk_disp_rdma {
struct mtk_ddp_comp ddp_comp;
struct drm_crtc *crtc;
+ const struct mtk_disp_rdma_data *data;
};
+static inline struct mtk_disp_rdma *comp_to_rdma(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_rdma, ddp_comp);
+}
+
static irqreturn_t mtk_disp_rdma_irq_handler(int irq, void *dev_id)
{
struct mtk_disp_rdma *priv = dev_id;
@@ -77,20 +88,18 @@ static void rdma_update_bits(struct mtk_ddp_comp *comp, unsigned int reg,
static void mtk_rdma_enable_vblank(struct mtk_ddp_comp *comp,
struct drm_crtc *crtc)
{
- struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
- ddp_comp);
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- priv->crtc = crtc;
+ rdma->crtc = crtc;
rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT,
RDMA_FRAME_END_INT);
}
static void mtk_rdma_disable_vblank(struct mtk_ddp_comp *comp)
{
- struct mtk_disp_rdma *priv = container_of(comp, struct mtk_disp_rdma,
- ddp_comp);
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
- priv->crtc = NULL;
+ rdma->crtc = NULL;
rdma_update_bits(comp, DISP_REG_RDMA_INT_ENABLE, RDMA_FRAME_END_INT, 0);
}
@@ -111,6 +120,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
{
unsigned int threshold;
unsigned int reg;
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0, 0xfff, width);
rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_1, 0xfffff, height);
@@ -123,7 +133,7 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
*/
threshold = width * height * vrefresh * 4 * 7 / 1000000;
reg = RDMA_FIFO_UNDERFLOW_EN |
- RDMA_FIFO_PSEUDO_SIZE(SZ_8K) |
+ RDMA_FIFO_PSEUDO_SIZE(RDMA_FIFO_SIZE(rdma)) |
RDMA_OUTPUT_VALID_FIFO_THRESHOLD(threshold);
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
@@ -208,6 +218,8 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
return ret;
}
+ priv->data = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, priv);
ret = component_add(dev, &mtk_disp_rdma_component_ops);
@@ -224,8 +236,19 @@ static int mtk_disp_rdma_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_disp_rdma_data mt2701_rdma_driver_data = {
+ .fifo_size = SZ_4K,
+};
+
+static const struct mtk_disp_rdma_data mt8173_rdma_driver_data = {
+ .fifo_size = SZ_8K,
+};
+
static const struct of_device_id mtk_disp_rdma_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-rdma", },
+ { .compatible = "mediatek,mt2701-disp-rdma",
+ .data = &mt2701_rdma_driver_data},
+ { .compatible = "mediatek,mt8173-disp-rdma",
+ .data = &mt8173_rdma_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mtk_disp_rdma_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 69982f5a6198..6b08774e5501 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -327,6 +327,42 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
pm_runtime_put(drm->dev);
}
+static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
+{
+ struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
+ struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ unsigned int i;
+
+ /*
+ * TODO: instead of updating the registers here, we should prepare
+ * working registers in atomic_commit and let the hardware command
+ * queue update module registers on vblank.
+ */
+ if (state->pending_config) {
+ mtk_ddp_comp_config(ovl, state->pending_width,
+ state->pending_height,
+ state->pending_vrefresh, 0);
+
+ state->pending_config = false;
+ }
+
+ if (mtk_crtc->pending_planes) {
+ for (i = 0; i < OVL_LAYER_NR; i++) {
+ struct drm_plane *plane = &mtk_crtc->planes[i];
+ struct mtk_plane_state *plane_state;
+
+ plane_state = to_mtk_plane_state(plane->state);
+
+ if (plane_state->pending.config) {
+ mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ plane_state->pending.config = false;
+ }
+ }
+ mtk_crtc->pending_planes = false;
+ }
+}
+
static void mtk_drm_crtc_enable(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
@@ -403,6 +439,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0;
int i;
@@ -424,6 +461,12 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (crtc->state->color_mgmt_changed)
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state);
+
+ if (priv->data->shadow_register) {
+ mtk_disp_mutex_acquire(mtk_crtc->mutex);
+ mtk_crtc_ddp_config(crtc);
+ mtk_disp_mutex_release(mtk_crtc->mutex);
+ }
}
static const struct drm_crtc_funcs mtk_crtc_funcs = {
@@ -471,36 +514,10 @@ err_cleanup_crtc:
void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- unsigned int i;
+ struct mtk_drm_private *priv = crtc->dev->dev_private;
- /*
- * TODO: instead of updating the registers here, we should prepare
- * working registers in atomic_commit and let the hardware command
- * queue update module registers on vblank.
- */
- if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
- state->pending_height,
- state->pending_vrefresh, 0);
-
- state->pending_config = false;
- }
-
- if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
- struct drm_plane *plane = &mtk_crtc->planes[i];
- struct mtk_plane_state *plane_state;
-
- plane_state = to_mtk_plane_state(plane->state);
-
- if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
- plane_state->pending.config = false;
- }
- }
- mtk_crtc->pending_planes = false;
- }
+ if (!priv->data->shadow_register)
+ mtk_crtc_ddp_config(crtc);
mtk_drm_finish_page_flip(mtk_crtc);
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 17ba9355a49c..8130f3dab661 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -12,6 +12,7 @@
*/
#include <linux/clk.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -31,26 +32,40 @@
#define DISP_REG_CONFIG_DISP_RDMA1_MOUT_EN 0x0c8
#define DISP_REG_CONFIG_MMSYS_CG_CON0 0x100
+#define DISP_REG_CONFIG_DISP_OVL_MOUT_EN 0x030
+#define DISP_REG_CONFIG_OUT_SEL 0x04c
+#define DISP_REG_CONFIG_DSI_SEL 0x050
+
#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n))
+#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n))
#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n))
#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n))
#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n))
-#define MUTEX_MOD_DISP_OVL0 BIT(11)
-#define MUTEX_MOD_DISP_OVL1 BIT(12)
-#define MUTEX_MOD_DISP_RDMA0 BIT(13)
-#define MUTEX_MOD_DISP_RDMA1 BIT(14)
-#define MUTEX_MOD_DISP_RDMA2 BIT(15)
-#define MUTEX_MOD_DISP_WDMA0 BIT(16)
-#define MUTEX_MOD_DISP_WDMA1 BIT(17)
-#define MUTEX_MOD_DISP_COLOR0 BIT(18)
-#define MUTEX_MOD_DISP_COLOR1 BIT(19)
-#define MUTEX_MOD_DISP_AAL BIT(20)
-#define MUTEX_MOD_DISP_GAMMA BIT(21)
-#define MUTEX_MOD_DISP_UFOE BIT(22)
-#define MUTEX_MOD_DISP_PWM0 BIT(23)
-#define MUTEX_MOD_DISP_PWM1 BIT(24)
-#define MUTEX_MOD_DISP_OD BIT(25)
+#define INT_MUTEX BIT(1)
+
+#define MT8173_MUTEX_MOD_DISP_OVL0 BIT(11)
+#define MT8173_MUTEX_MOD_DISP_OVL1 BIT(12)
+#define MT8173_MUTEX_MOD_DISP_RDMA0 BIT(13)
+#define MT8173_MUTEX_MOD_DISP_RDMA1 BIT(14)
+#define MT8173_MUTEX_MOD_DISP_RDMA2 BIT(15)
+#define MT8173_MUTEX_MOD_DISP_WDMA0 BIT(16)
+#define MT8173_MUTEX_MOD_DISP_WDMA1 BIT(17)
+#define MT8173_MUTEX_MOD_DISP_COLOR0 BIT(18)
+#define MT8173_MUTEX_MOD_DISP_COLOR1 BIT(19)
+#define MT8173_MUTEX_MOD_DISP_AAL BIT(20)
+#define MT8173_MUTEX_MOD_DISP_GAMMA BIT(21)
+#define MT8173_MUTEX_MOD_DISP_UFOE BIT(22)
+#define MT8173_MUTEX_MOD_DISP_PWM0 BIT(23)
+#define MT8173_MUTEX_MOD_DISP_PWM1 BIT(24)
+#define MT8173_MUTEX_MOD_DISP_OD BIT(25)
+
+#define MT2701_MUTEX_MOD_DISP_OVL BIT(3)
+#define MT2701_MUTEX_MOD_DISP_WDMA BIT(6)
+#define MT2701_MUTEX_MOD_DISP_COLOR BIT(7)
+#define MT2701_MUTEX_MOD_DISP_BLS BIT(9)
+#define MT2701_MUTEX_MOD_DISP_RDMA0 BIT(10)
+#define MT2701_MUTEX_MOD_DISP_RDMA1 BIT(12)
#define MUTEX_SOF_SINGLE_MODE 0
#define MUTEX_SOF_DSI0 1
@@ -67,6 +82,10 @@
#define DPI0_SEL_IN_RDMA1 0x1
#define COLOR1_SEL_IN_OVL1 0x1
+#define OVL_MOUT_EN_RDMA 0x1
+#define BLS_TO_DSI_RDMA1_TO_DPI1 0x8
+#define DSI_SEL_IN_BLS 0x0
+
struct mtk_disp_mutex {
int id;
bool claimed;
@@ -77,24 +96,34 @@ struct mtk_ddp {
struct clk *clk;
void __iomem *regs;
struct mtk_disp_mutex mutex[10];
+ const unsigned int *mutex_mod;
+};
+
+static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_BLS] = MT2701_MUTEX_MOD_DISP_BLS,
+ [DDP_COMPONENT_COLOR0] = MT2701_MUTEX_MOD_DISP_COLOR,
+ [DDP_COMPONENT_OVL0] = MT2701_MUTEX_MOD_DISP_OVL,
+ [DDP_COMPONENT_RDMA0] = MT2701_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT2701_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_WDMA0] = MT2701_MUTEX_MOD_DISP_WDMA,
};
-static const unsigned int mutex_mod[DDP_COMPONENT_ID_MAX] = {
- [DDP_COMPONENT_AAL] = MUTEX_MOD_DISP_AAL,
- [DDP_COMPONENT_COLOR0] = MUTEX_MOD_DISP_COLOR0,
- [DDP_COMPONENT_COLOR1] = MUTEX_MOD_DISP_COLOR1,
- [DDP_COMPONENT_GAMMA] = MUTEX_MOD_DISP_GAMMA,
- [DDP_COMPONENT_OD] = MUTEX_MOD_DISP_OD,
- [DDP_COMPONENT_OVL0] = MUTEX_MOD_DISP_OVL0,
- [DDP_COMPONENT_OVL1] = MUTEX_MOD_DISP_OVL1,
- [DDP_COMPONENT_PWM0] = MUTEX_MOD_DISP_PWM0,
- [DDP_COMPONENT_PWM1] = MUTEX_MOD_DISP_PWM1,
- [DDP_COMPONENT_RDMA0] = MUTEX_MOD_DISP_RDMA0,
- [DDP_COMPONENT_RDMA1] = MUTEX_MOD_DISP_RDMA1,
- [DDP_COMPONENT_RDMA2] = MUTEX_MOD_DISP_RDMA2,
- [DDP_COMPONENT_UFOE] = MUTEX_MOD_DISP_UFOE,
- [DDP_COMPONENT_WDMA0] = MUTEX_MOD_DISP_WDMA0,
- [DDP_COMPONENT_WDMA1] = MUTEX_MOD_DISP_WDMA1,
+static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL] = MT8173_MUTEX_MOD_DISP_AAL,
+ [DDP_COMPONENT_COLOR0] = MT8173_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_COLOR1] = MT8173_MUTEX_MOD_DISP_COLOR1,
+ [DDP_COMPONENT_GAMMA] = MT8173_MUTEX_MOD_DISP_GAMMA,
+ [DDP_COMPONENT_OD] = MT8173_MUTEX_MOD_DISP_OD,
+ [DDP_COMPONENT_OVL0] = MT8173_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL1] = MT8173_MUTEX_MOD_DISP_OVL1,
+ [DDP_COMPONENT_PWM0] = MT8173_MUTEX_MOD_DISP_PWM0,
+ [DDP_COMPONENT_PWM1] = MT8173_MUTEX_MOD_DISP_PWM1,
+ [DDP_COMPONENT_RDMA0] = MT8173_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA1] = MT8173_MUTEX_MOD_DISP_RDMA1,
+ [DDP_COMPONENT_RDMA2] = MT8173_MUTEX_MOD_DISP_RDMA2,
+ [DDP_COMPONENT_UFOE] = MT8173_MUTEX_MOD_DISP_UFOE,
+ [DDP_COMPONENT_WDMA0] = MT8173_MUTEX_MOD_DISP_WDMA0,
+ [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1,
};
static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
@@ -106,6 +135,9 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_COLOR0) {
*addr = DISP_REG_CONFIG_DISP_OVL0_MOUT_EN;
value = OVL0_MOUT_EN_COLOR0;
+ } else if (cur == DDP_COMPONENT_OVL0 && next == DDP_COMPONENT_RDMA0) {
+ *addr = DISP_REG_CONFIG_DISP_OVL_MOUT_EN;
+ value = OVL_MOUT_EN_RDMA;
} else if (cur == DDP_COMPONENT_OD && next == DDP_COMPONENT_RDMA0) {
*addr = DISP_REG_CONFIG_DISP_OD_MOUT_EN;
value = OD_MOUT_EN_RDMA0;
@@ -143,6 +175,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_OVL1 && next == DDP_COMPONENT_COLOR1) {
*addr = DISP_REG_CONFIG_DISP_COLOR1_SEL_IN;
value = COLOR1_SEL_IN_OVL1;
+ } else if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSI_SEL;
+ value = DSI_SEL_IN_BLS;
} else {
value = 0;
}
@@ -150,6 +185,15 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
return value;
}
+static void mtk_ddp_sout_sel(void __iomem *config_regs,
+ enum mtk_ddp_comp_id cur,
+ enum mtk_ddp_comp_id next)
+{
+ if (cur == DDP_COMPONENT_BLS && next == DDP_COMPONENT_DSI0)
+ writel_relaxed(BLS_TO_DSI_RDMA1_TO_DPI1,
+ config_regs + DISP_REG_CONFIG_OUT_SEL);
+}
+
void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
enum mtk_ddp_comp_id cur,
enum mtk_ddp_comp_id next)
@@ -162,6 +206,8 @@ void mtk_ddp_add_comp_to_path(void __iomem *config_regs,
writel_relaxed(reg, config_regs + addr);
}
+ mtk_ddp_sout_sel(config_regs, cur, next);
+
value = mtk_ddp_sel_in(cur, next, &addr);
if (value) {
reg = readl_relaxed(config_regs + addr) | value;
@@ -247,7 +293,7 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex,
break;
default:
reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg |= mutex_mod[id];
+ reg |= ddp->mutex_mod[id];
writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
return;
}
@@ -273,7 +319,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
break;
default:
reg = readl_relaxed(ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
- reg &= ~mutex_mod[id];
+ reg &= ~(ddp->mutex_mod[id]);
writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_MOD(mutex->id));
break;
}
@@ -299,6 +345,27 @@ void mtk_disp_mutex_disable(struct mtk_disp_mutex *mutex)
writel(0, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
}
+void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+ u32 tmp;
+
+ writel(1, ddp->regs + DISP_REG_MUTEX_EN(mutex->id));
+ writel(1, ddp->regs + DISP_REG_MUTEX(mutex->id));
+ if (readl_poll_timeout_atomic(ddp->regs + DISP_REG_MUTEX(mutex->id),
+ tmp, tmp & INT_MUTEX, 1, 10000))
+ pr_err("could not acquire mutex %d\n", mutex->id);
+}
+
+void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex)
+{
+ struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp,
+ mutex[mutex->id]);
+
+ writel(0, ddp->regs + DISP_REG_MUTEX(mutex->id));
+}
+
static int mtk_ddp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -326,6 +393,8 @@ static int mtk_ddp_probe(struct platform_device *pdev)
return PTR_ERR(ddp->regs);
}
+ ddp->mutex_mod = of_device_get_match_data(dev);
+
platform_set_drvdata(pdev, ddp);
return 0;
@@ -337,7 +406,8 @@ static int mtk_ddp_remove(struct platform_device *pdev)
}
static const struct of_device_id ddp_driver_dt_match[] = {
- { .compatible = "mediatek,mt8173-disp-mutex" },
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod},
+ { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod},
{},
};
MODULE_DEVICE_TABLE(of, ddp_driver_dt_match);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
index 92c11752ff65..f9a799168077 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.h
@@ -37,5 +37,7 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex,
enum mtk_ddp_comp_id id);
void mtk_disp_mutex_unprepare(struct mtk_disp_mutex *mutex);
void mtk_disp_mutex_put(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_acquire(struct mtk_disp_mutex *mutex);
+void mtk_disp_mutex_release(struct mtk_disp_mutex *mutex);
#endif /* MTK_DRM_DDP_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 48cc01fd20c7..8b52416b6e41 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -39,9 +39,11 @@
#define DISP_REG_UFO_START 0x0000
#define DISP_COLOR_CFG_MAIN 0x0400
-#define DISP_COLOR_START 0x0c00
-#define DISP_COLOR_WIDTH 0x0c50
-#define DISP_COLOR_HEIGHT 0x0c54
+#define DISP_COLOR_START_MT2701 0x0f00
+#define DISP_COLOR_START_MT8173 0x0c00
+#define DISP_COLOR_START(comp) ((comp)->data->color_offset)
+#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50)
+#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54)
#define DISP_AAL_EN 0x0000
#define DISP_AAL_SIZE 0x0030
@@ -80,6 +82,20 @@
#define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4)
#define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0)
+struct mtk_disp_color_data {
+ unsigned int color_offset;
+};
+
+struct mtk_disp_color {
+ struct mtk_ddp_comp ddp_comp;
+ const struct mtk_disp_color_data *data;
+};
+
+static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp)
+{
+ return container_of(comp, struct mtk_disp_color, ddp_comp);
+}
+
void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc,
unsigned int CFG)
{
@@ -107,15 +123,19 @@ static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w,
unsigned int h, unsigned int vrefresh,
unsigned int bpc)
{
- writel(w, comp->regs + DISP_COLOR_WIDTH);
- writel(h, comp->regs + DISP_COLOR_HEIGHT);
+ struct mtk_disp_color *color = comp_to_color(comp);
+
+ writel(w, comp->regs + DISP_COLOR_WIDTH(color));
+ writel(h, comp->regs + DISP_COLOR_HEIGHT(color));
}
static void mtk_color_start(struct mtk_ddp_comp *comp)
{
+ struct mtk_disp_color *color = comp_to_color(comp);
+
writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL,
comp->regs + DISP_COLOR_CFG_MAIN);
- writel(0x1, comp->regs + DISP_COLOR_START);
+ writel(0x1, comp->regs + DISP_COLOR_START(color));
}
static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w,
@@ -236,6 +256,7 @@ static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = {
[MTK_DISP_PWM] = "pwm",
[MTK_DISP_MUTEX] = "mutex",
[MTK_DISP_OD] = "od",
+ [MTK_DISP_BLS] = "bls",
};
struct mtk_ddp_comp_match {
@@ -246,6 +267,7 @@ struct mtk_ddp_comp_match {
static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal },
+ [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL },
[DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color },
[DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color },
[DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL },
@@ -264,6 +286,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL },
};
+static const struct mtk_disp_color_data mt2701_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT2701,
+};
+
+static const struct mtk_disp_color_data mt8173_color_driver_data = {
+ .color_offset = DISP_COLOR_START_MT8173,
+};
+
+static const struct of_device_id mtk_disp_color_driver_dt_match[] = {
+ { .compatible = "mediatek,mt2701-disp-color",
+ .data = &mt2701_color_driver_data},
+ { .compatible = "mediatek,mt8173-disp-color",
+ .data = &mt8173_color_driver_data},
+ {},
+};
+
int mtk_ddp_comp_get_id(struct device_node *node,
enum mtk_ddp_comp_type comp_type)
{
@@ -286,14 +324,29 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
enum mtk_ddp_comp_type type;
struct device_node *larb_node;
struct platform_device *larb_pdev;
+ const struct of_device_id *match;
+ struct mtk_disp_color *color;
if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX)
return -EINVAL;
+ type = mtk_ddp_matches[comp_id].type;
+ if (type == MTK_DISP_COLOR) {
+ devm_kfree(dev, comp);
+ color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL);
+ if (!color)
+ return -ENOMEM;
+
+ match = of_match_node(mtk_disp_color_driver_dt_match, node);
+ color->data = match->data;
+ comp = &color->ddp_comp;
+ }
+
comp->id = comp_id;
comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs;
- if (comp_id == DDP_COMPONENT_DPI0 ||
+ if (comp_id == DDP_COMPONENT_BLS ||
+ comp_id == DDP_COMPONENT_DPI0 ||
comp_id == DDP_COMPONENT_DSI0 ||
comp_id == DDP_COMPONENT_PWM0) {
comp->regs = NULL;
@@ -308,8 +361,6 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
if (IS_ERR(comp->clk))
comp->clk = NULL;
- type = mtk_ddp_matches[comp_id].type;
-
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 22a33ee451c4..0828cf8bf85c 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -36,11 +36,13 @@ enum mtk_ddp_comp_type {
MTK_DISP_PWM,
MTK_DISP_MUTEX,
MTK_DISP_OD,
+ MTK_DISP_BLS,
MTK_DDP_COMP_TYPE_MAX,
};
enum mtk_ddp_comp_id {
DDP_COMPONENT_AAL,
+ DDP_COMPONENT_BLS,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_DPI0,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index f5a1fd9b3ecc..f6c8ec4c7dbc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -128,7 +128,20 @@ static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = {
.atomic_commit = mtk_atomic_commit,
};
-static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
+static const enum mtk_ddp_comp_id mt2701_mtk_ddp_main[] = {
+ DDP_COMPONENT_OVL0,
+ DDP_COMPONENT_RDMA0,
+ DDP_COMPONENT_COLOR0,
+ DDP_COMPONENT_BLS,
+ DDP_COMPONENT_DSI0,
+};
+
+static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
+ DDP_COMPONENT_RDMA1,
+ DDP_COMPONENT_DPI0,
+};
+
+static const enum mtk_ddp_comp_id mt8173_mtk_ddp_main[] = {
DDP_COMPONENT_OVL0,
DDP_COMPONENT_COLOR0,
DDP_COMPONENT_AAL,
@@ -139,7 +152,7 @@ static const enum mtk_ddp_comp_id mtk_ddp_main[] = {
DDP_COMPONENT_PWM0,
};
-static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
+static const enum mtk_ddp_comp_id mt8173_mtk_ddp_ext[] = {
DDP_COMPONENT_OVL1,
DDP_COMPONENT_COLOR1,
DDP_COMPONENT_GAMMA,
@@ -147,6 +160,21 @@ static const enum mtk_ddp_comp_id mtk_ddp_ext[] = {
DDP_COMPONENT_DPI0,
};
+static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
+ .main_path = mt2701_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt2701_mtk_ddp_main),
+ .ext_path = mt2701_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt2701_mtk_ddp_ext),
+ .shadow_register = true,
+};
+
+static const struct mtk_mmsys_driver_data mt8173_mmsys_driver_data = {
+ .main_path = mt8173_mtk_ddp_main,
+ .main_len = ARRAY_SIZE(mt8173_mtk_ddp_main),
+ .ext_path = mt8173_mtk_ddp_ext,
+ .ext_len = ARRAY_SIZE(mt8173_mtk_ddp_ext),
+};
+
static int mtk_drm_kms_init(struct drm_device *drm)
{
struct mtk_drm_private *private = drm->dev_private;
@@ -189,17 +217,19 @@ static int mtk_drm_kms_init(struct drm_device *drm)
* and each statically assigned to a crtc:
* OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0 ...
*/
- ret = mtk_drm_crtc_create(drm, mtk_ddp_main, ARRAY_SIZE(mtk_ddp_main));
+ ret = mtk_drm_crtc_create(drm, private->data->main_path,
+ private->data->main_len);
if (ret < 0)
goto err_component_unbind;
/* ... and OVL1 -> COLOR1 -> GAMMA -> RDMA1 -> DPI0. */
- ret = mtk_drm_crtc_create(drm, mtk_ddp_ext, ARRAY_SIZE(mtk_ddp_ext));
+ ret = mtk_drm_crtc_create(drm, private->data->ext_path,
+ private->data->ext_len);
if (ret < 0)
goto err_component_unbind;
/* Use OVL device for all DMA memory allocations */
- np = private->comp_node[mtk_ddp_main[0]] ?:
- private->comp_node[mtk_ddp_ext[0]];
+ np = private->comp_node[private->data->main_path[0]] ?:
+ private->comp_node[private->data->ext_path[0]];
pdev = of_find_device_by_node(np);
if (!pdev) {
ret = -ENODEV;
@@ -328,16 +358,22 @@ static const struct component_master_ops mtk_drm_ops = {
};
static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
+ { .compatible = "mediatek,mt2701-disp-ovl", .data = (void *)MTK_DISP_OVL },
{ .compatible = "mediatek,mt8173-disp-ovl", .data = (void *)MTK_DISP_OVL },
+ { .compatible = "mediatek,mt2701-disp-rdma", .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-rdma", .data = (void *)MTK_DISP_RDMA },
{ .compatible = "mediatek,mt8173-disp-wdma", .data = (void *)MTK_DISP_WDMA },
+ { .compatible = "mediatek,mt2701-disp-color", .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-color", .data = (void *)MTK_DISP_COLOR },
{ .compatible = "mediatek,mt8173-disp-aal", .data = (void *)MTK_DISP_AAL},
{ .compatible = "mediatek,mt8173-disp-gamma", .data = (void *)MTK_DISP_GAMMA, },
{ .compatible = "mediatek,mt8173-disp-ufoe", .data = (void *)MTK_DISP_UFOE },
+ { .compatible = "mediatek,mt2701-dsi", .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dsi", .data = (void *)MTK_DSI },
{ .compatible = "mediatek,mt8173-dpi", .data = (void *)MTK_DPI },
+ { .compatible = "mediatek,mt2701-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
{ .compatible = "mediatek,mt8173-disp-mutex", .data = (void *)MTK_DISP_MUTEX },
+ { .compatible = "mediatek,mt2701-disp-pwm", .data = (void *)MTK_DISP_BLS },
{ .compatible = "mediatek,mt8173-disp-pwm", .data = (void *)MTK_DISP_PWM },
{ .compatible = "mediatek,mt8173-disp-od", .data = (void *)MTK_DISP_OD },
{ }
@@ -359,6 +395,7 @@ static int mtk_drm_probe(struct platform_device *pdev)
mutex_init(&private->commit.lock);
INIT_WORK(&private->commit.work, mtk_atomic_work);
+ private->data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
private->config_regs = devm_ioremap_resource(dev, mem);
@@ -510,7 +547,10 @@ static SIMPLE_DEV_PM_OPS(mtk_drm_pm_ops, mtk_drm_sys_suspend,
mtk_drm_sys_resume);
static const struct of_device_id mtk_drm_of_ids[] = {
- { .compatible = "mediatek,mt8173-mmsys", },
+ { .compatible = "mediatek,mt2701-mmsys",
+ .data = &mt2701_mmsys_driver_data},
+ { .compatible = "mediatek,mt8173-mmsys",
+ .data = &mt8173_mmsys_driver_data},
{ }
};
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
index df322a7a5fcb..aef8747d810b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h
@@ -28,6 +28,14 @@ struct drm_fb_helper;
struct drm_property;
struct regmap;
+struct mtk_mmsys_driver_data {
+ const enum mtk_ddp_comp_id *main_path;
+ unsigned int main_len;
+ const enum mtk_ddp_comp_id *ext_path;
+ unsigned int ext_len;
+ bool shadow_register;
+};
+
struct mtk_drm_private {
struct drm_device *drm;
struct device *dma_dev;
@@ -39,6 +47,7 @@ struct mtk_drm_private {
void __iomem *config_regs;
struct device_node *comp_node[DDP_COMPONENT_ID_MAX];
struct mtk_ddp_comp *ddp_comp[DDP_COMPONENT_ID_MAX];
+ const struct mtk_mmsys_driver_data *data;
struct {
struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 4d837b29ee9e..808b995a990f 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -19,19 +19,28 @@
#include <drm/drm_of.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <video/mipi_display.h>
#include <video/videomode.h>
#include "mtk_drm_ddp_comp.h"
-#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
-#define DSI_HOST_FIFO_DEPTH 64
-
#define DSI_START 0x00
+#define DSI_INTEN 0x08
+
+#define DSI_INTSTA 0x0c
+#define LPRX_RD_RDY_INT_FLAG BIT(0)
+#define CMD_DONE_INT_FLAG BIT(1)
+#define TE_RDY_INT_FLAG BIT(2)
+#define VM_DONE_INT_FLAG BIT(3)
+#define EXT_TE_RDY_INT_FLAG BIT(4)
+#define DSI_BUSY BIT(31)
+
#define DSI_CON_CTRL 0x10
#define DSI_RESET BIT(0)
#define DSI_EN BIT(1)
@@ -46,7 +55,7 @@
#define MIX_MODE BIT(17)
#define DSI_TXRX_CTRL 0x18
-#define VC_NUM (2 << 0)
+#define VC_NUM BIT(1)
#define LANE_NUM (0xf << 2)
#define DIS_EOT BIT(6)
#define NULL_EN BIT(7)
@@ -72,8 +81,19 @@
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
+#define DSI_CMDQ_SIZE 0x60
+#define CMDQ_SIZE 0x3f
+
#define DSI_HSTX_CKL_WC 0x64
+#define DSI_RX_DATA0 0x74
+#define DSI_RX_DATA1 0x78
+#define DSI_RX_DATA2 0x7c
+#define DSI_RX_DATA3 0x80
+
+#define DSI_RACK 0x84
+#define RACK BIT(0)
+
#define DSI_PHY_LCCON 0x104
#define LC_HS_TX_EN BIT(0)
#define LC_ULPM_EN BIT(1)
@@ -106,6 +126,19 @@
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
+#define DSI_VM_CMD_CON 0x130
+#define VM_CMD_EN BIT(0)
+#define TS_VFP_EN BIT(5)
+
+#define DSI_CMDQ0 0x180
+#define CONFIG (0xff << 0)
+#define SHORT_PACKET 0
+#define LONG_PACKET 2
+#define BTA BIT(2)
+#define DATA_ID (0xff << 8)
+#define DATA_0 (0xff << 16)
+#define DATA_1 (0xff << 24)
+
#define T_LPX 5
#define T_HS_PREP 6
#define T_HS_TRAIL 8
@@ -114,6 +147,12 @@
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
+#define MTK_DSI_HOST_IS_READ(type) \
+ ((type == MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM) || \
+ (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \
+ (type == MIPI_DSI_DCS_READ))
+
struct phy;
struct mtk_dsi {
@@ -140,6 +179,8 @@ struct mtk_dsi {
struct videomode vm;
int refcount;
bool enabled;
+ u32 irq_data;
+ wait_queue_head_t irq_wait_queue;
};
static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e)
@@ -164,7 +205,7 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data)
writel((temp & ~mask) | (data & mask), dsi->regs + offset);
}
-static void dsi_phy_timconfig(struct mtk_dsi *dsi)
+static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
u32 ui, cycle_time;
@@ -196,118 +237,39 @@ static void mtk_dsi_disable(struct mtk_dsi *dsi)
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_EN, 0);
}
-static void mtk_dsi_reset(struct mtk_dsi *dsi)
+static void mtk_dsi_reset_engine(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, DSI_RESET);
mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0);
}
-static int mtk_dsi_poweron(struct mtk_dsi *dsi)
-{
- struct device *dev = dsi->dev;
- int ret;
- u64 pixel_clock, total_bits;
- u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
-
- if (++dsi->refcount != 1)
- return 0;
-
- switch (dsi->format) {
- case MIPI_DSI_FMT_RGB565:
- bit_per_pixel = 16;
- break;
- case MIPI_DSI_FMT_RGB666_PACKED:
- bit_per_pixel = 18;
- break;
- case MIPI_DSI_FMT_RGB666:
- case MIPI_DSI_FMT_RGB888:
- default:
- bit_per_pixel = 24;
- break;
- }
-
- /**
- * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
- * htotal_time = htotal * byte_per_pixel / num_lanes
- * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
- * mipi_ratio = (htotal_time + overhead_time) / htotal_time
- * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
- */
- pixel_clock = dsi->vm.pixelclock * 1000;
- htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
- dsi->vm.hsync_len;
- htotal_bits = htotal * bit_per_pixel;
-
- overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
- T_HS_EXIT;
- overhead_bits = overhead_cycles * dsi->lanes * 8;
- total_bits = htotal_bits + overhead_bits;
-
- dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
- htotal * dsi->lanes);
-
- ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
- if (ret < 0) {
- dev_err(dev, "Failed to set data rate: %d\n", ret);
- goto err_refcount;
- }
-
- phy_power_on(dsi->phy);
-
- ret = clk_prepare_enable(dsi->engine_clk);
- if (ret < 0) {
- dev_err(dev, "Failed to enable engine clock: %d\n", ret);
- goto err_phy_power_off;
- }
-
- ret = clk_prepare_enable(dsi->digital_clk);
- if (ret < 0) {
- dev_err(dev, "Failed to enable digital clock: %d\n", ret);
- goto err_disable_engine_clk;
- }
-
- mtk_dsi_enable(dsi);
- mtk_dsi_reset(dsi);
- dsi_phy_timconfig(dsi);
-
- return 0;
-
-err_disable_engine_clk:
- clk_disable_unprepare(dsi->engine_clk);
-err_phy_power_off:
- phy_power_off(dsi->phy);
-err_refcount:
- dsi->refcount--;
- return ret;
-}
-
-static void dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
+static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
}
-static void dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
+static void mtk_dsi_clk_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, LC_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_WAKEUP_EN, 0);
}
-static void dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
+static void mtk_dsi_lane0_ulp_mode_enter(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_HS_TX_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
}
-static void dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
+static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
{
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_ULPM_EN, 0);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, LD0_WAKEUP_EN);
mtk_dsi_mask(dsi, DSI_PHY_LD0CON, LD0_WAKEUP_EN, 0);
}
-static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
+static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
u32 tmp_reg1;
@@ -315,30 +277,37 @@ static bool dsi_clk_hs_state(struct mtk_dsi *dsi)
return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
}
-static void dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
+static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
{
- if (enter && !dsi_clk_hs_state(dsi))
+ if (enter && !mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, LC_HS_TX_EN);
- else if (!enter && dsi_clk_hs_state(dsi))
+ else if (!enter && mtk_dsi_clk_hs_state(dsi))
mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0);
}
-static void dsi_set_mode(struct mtk_dsi *dsi)
+static void mtk_dsi_set_mode(struct mtk_dsi *dsi)
{
u32 vid_mode = CMD_MODE;
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
- vid_mode = SYNC_PULSE_MODE;
-
- if ((dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) &&
- !(dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE))
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
vid_mode = BURST_MODE;
+ else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
+ vid_mode = SYNC_PULSE_MODE;
+ else
+ vid_mode = SYNC_EVENT_MODE;
}
writel(vid_mode, dsi->regs + DSI_MODE_CTRL);
}
-static void dsi_ps_control_vact(struct mtk_dsi *dsi)
+static void mtk_dsi_set_vm_cmd(struct mtk_dsi *dsi)
+{
+ mtk_dsi_mask(dsi, DSI_VM_CMD_CON, VM_CMD_EN, VM_CMD_EN);
+ mtk_dsi_mask(dsi, DSI_VM_CMD_CON, TS_VFP_EN, TS_VFP_EN);
+}
+
+static void mtk_dsi_ps_control_vact(struct mtk_dsi *dsi)
{
struct videomode *vm = &dsi->vm;
u32 dsi_buf_bpp, ps_wc;
@@ -372,7 +341,7 @@ static void dsi_ps_control_vact(struct mtk_dsi *dsi)
writel(ps_wc, dsi->regs + DSI_HSTX_CKL_WC);
}
-static void dsi_rxtx_control(struct mtk_dsi *dsi)
+static void mtk_dsi_rxtx_control(struct mtk_dsi *dsi)
{
u32 tmp_reg;
@@ -394,12 +363,15 @@ static void dsi_rxtx_control(struct mtk_dsi *dsi)
break;
}
+ tmp_reg |= (dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) << 6;
+ tmp_reg |= (dsi->mode_flags & MIPI_DSI_MODE_EOT_PACKET) >> 3;
+
writel(tmp_reg, dsi->regs + DSI_TXRX_CTRL);
}
-static void dsi_ps_control(struct mtk_dsi *dsi)
+static void mtk_dsi_ps_control(struct mtk_dsi *dsi)
{
- unsigned int dsi_tmp_buf_bpp;
+ u32 dsi_tmp_buf_bpp;
u32 tmp_reg;
switch (dsi->format) {
@@ -429,12 +401,12 @@ static void dsi_ps_control(struct mtk_dsi *dsi)
writel(tmp_reg, dsi->regs + DSI_PSCTRL);
}
-static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
+static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
{
- unsigned int horizontal_sync_active_byte;
- unsigned int horizontal_backporch_byte;
- unsigned int horizontal_frontporch_byte;
- unsigned int dsi_tmp_buf_bpp;
+ u32 horizontal_sync_active_byte;
+ u32 horizontal_backporch_byte;
+ u32 horizontal_frontporch_byte;
+ u32 dsi_tmp_buf_bpp;
struct videomode *vm = &dsi->vm;
@@ -463,7 +435,7 @@ static void dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
- dsi_ps_control(dsi);
+ mtk_dsi_ps_control(dsi);
}
static void mtk_dsi_start(struct mtk_dsi *dsi)
@@ -472,6 +444,184 @@ static void mtk_dsi_start(struct mtk_dsi *dsi)
writel(1, dsi->regs + DSI_START);
}
+static void mtk_dsi_stop(struct mtk_dsi *dsi)
+{
+ writel(0, dsi->regs + DSI_START);
+}
+
+static void mtk_dsi_set_cmd_mode(struct mtk_dsi *dsi)
+{
+ writel(CMD_MODE, dsi->regs + DSI_MODE_CTRL);
+}
+
+static void mtk_dsi_set_interrupt_enable(struct mtk_dsi *dsi)
+{
+ u32 inten = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
+
+ writel(inten, dsi->regs + DSI_INTEN);
+}
+
+static void mtk_dsi_irq_data_set(struct mtk_dsi *dsi, u32 irq_bit)
+{
+ dsi->irq_data |= irq_bit;
+}
+
+static void mtk_dsi_irq_data_clear(struct mtk_dsi *dsi, u32 irq_bit)
+{
+ dsi->irq_data &= ~irq_bit;
+}
+
+static s32 mtk_dsi_wait_for_irq_done(struct mtk_dsi *dsi, u32 irq_flag,
+ unsigned int timeout)
+{
+ s32 ret = 0;
+ unsigned long jiffies = msecs_to_jiffies(timeout);
+
+ ret = wait_event_interruptible_timeout(dsi->irq_wait_queue,
+ dsi->irq_data & irq_flag,
+ jiffies);
+ if (ret == 0) {
+ DRM_WARN("Wait DSI IRQ(0x%08x) Timeout\n", irq_flag);
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ }
+
+ return ret;
+}
+
+static irqreturn_t mtk_dsi_irq(int irq, void *dev_id)
+{
+ struct mtk_dsi *dsi = dev_id;
+ u32 status, tmp;
+ u32 flag = LPRX_RD_RDY_INT_FLAG | CMD_DONE_INT_FLAG | VM_DONE_INT_FLAG;
+
+ status = readl(dsi->regs + DSI_INTSTA) & flag;
+
+ if (status) {
+ do {
+ mtk_dsi_mask(dsi, DSI_RACK, RACK, RACK);
+ tmp = readl(dsi->regs + DSI_INTSTA);
+ } while (tmp & DSI_BUSY);
+
+ mtk_dsi_mask(dsi, DSI_INTSTA, status, 0);
+ mtk_dsi_irq_data_set(dsi, status);
+ wake_up_interruptible(&dsi->irq_wait_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t)
+{
+ mtk_dsi_irq_data_clear(dsi, irq_flag);
+ mtk_dsi_set_cmd_mode(dsi);
+
+ if (!mtk_dsi_wait_for_irq_done(dsi, irq_flag, t)) {
+ DRM_ERROR("failed to switch cmd mode\n");
+ return -ETIME;
+ } else {
+ return 0;
+ }
+}
+
+static int mtk_dsi_poweron(struct mtk_dsi *dsi)
+{
+ struct device *dev = dsi->dev;
+ int ret;
+ u64 pixel_clock, total_bits;
+ u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
+
+ if (++dsi->refcount != 1)
+ return 0;
+
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ bit_per_pixel = 16;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ bit_per_pixel = 18;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ bit_per_pixel = 24;
+ break;
+ }
+
+ /**
+ * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+ * htotal_time = htotal * byte_per_pixel / num_lanes
+ * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+ * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+ * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
+ */
+ pixel_clock = dsi->vm.pixelclock * 1000;
+ htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+ dsi->vm.hsync_len;
+ htotal_bits = htotal * bit_per_pixel;
+
+ overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+ T_HS_EXIT;
+ overhead_bits = overhead_cycles * dsi->lanes * 8;
+ total_bits = htotal_bits + overhead_bits;
+
+ dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+ htotal * dsi->lanes);
+
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set data rate: %d\n", ret);
+ goto err_refcount;
+ }
+
+ phy_power_on(dsi->phy);
+
+ ret = clk_prepare_enable(dsi->engine_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable engine clock: %d\n", ret);
+ goto err_phy_power_off;
+ }
+
+ ret = clk_prepare_enable(dsi->digital_clk);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable digital clock: %d\n", ret);
+ goto err_disable_engine_clk;
+ }
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_phy_timconfig(dsi);
+
+ mtk_dsi_rxtx_control(dsi);
+ mtk_dsi_ps_control_vact(dsi);
+ mtk_dsi_set_vm_cmd(dsi);
+ mtk_dsi_config_vdo_timing(dsi);
+ mtk_dsi_set_interrupt_enable(dsi);
+
+ mtk_dsi_clk_ulp_mode_leave(dsi);
+ mtk_dsi_lane0_ulp_mode_leave(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 0);
+
+ if (dsi->panel) {
+ if (drm_panel_prepare(dsi->panel)) {
+ DRM_ERROR("failed to prepare the panel\n");
+ goto err_disable_digital_clk;
+ }
+ }
+
+ return 0;
+err_disable_digital_clk:
+ clk_disable_unprepare(dsi->digital_clk);
+err_disable_engine_clk:
+ clk_disable_unprepare(dsi->engine_clk);
+err_phy_power_off:
+ phy_power_off(dsi->phy);
+err_refcount:
+ dsi->refcount--;
+ return ret;
+}
+
static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
{
if (WARN_ON(dsi->refcount == 0))
@@ -480,8 +630,18 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
- dsi_lane0_ulp_mode_enter(dsi);
- dsi_clk_ulp_mode_enter(dsi);
+ if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
+ if (dsi->panel) {
+ if (drm_panel_unprepare(dsi->panel)) {
+ DRM_ERROR("failed to unprepare the panel\n");
+ return;
+ }
+ }
+ }
+
+ mtk_dsi_reset_engine(dsi);
+ mtk_dsi_lane0_ulp_mode_enter(dsi);
+ mtk_dsi_clk_ulp_mode_enter(dsi);
mtk_dsi_disable(dsi);
@@ -498,35 +658,30 @@ static void mtk_output_dsi_enable(struct mtk_dsi *dsi)
if (dsi->enabled)
return;
- if (dsi->panel) {
- if (drm_panel_prepare(dsi->panel)) {
- DRM_ERROR("failed to setup the panel\n");
- return;
- }
- }
-
ret = mtk_dsi_poweron(dsi);
if (ret < 0) {
DRM_ERROR("failed to power on dsi\n");
return;
}
- dsi_rxtx_control(dsi);
-
- dsi_clk_ulp_mode_leave(dsi);
- dsi_lane0_ulp_mode_leave(dsi);
- dsi_clk_hs_mode(dsi, 0);
- dsi_set_mode(dsi);
-
- dsi_ps_control_vact(dsi);
- dsi_config_vdo_timing(dsi);
-
- dsi_set_mode(dsi);
- dsi_clk_hs_mode(dsi, 1);
+ mtk_dsi_set_mode(dsi);
+ mtk_dsi_clk_hs_mode(dsi, 1);
mtk_dsi_start(dsi);
+ if (dsi->panel) {
+ if (drm_panel_enable(dsi->panel)) {
+ DRM_ERROR("failed to enable the panel\n");
+ goto err_dsi_power_off;
+ }
+ }
+
dsi->enabled = true;
+
+ return;
+err_dsi_power_off:
+ mtk_dsi_stop(dsi);
+ mtk_dsi_poweroff(dsi);
}
static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
@@ -541,6 +696,7 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
}
+ mtk_dsi_stop(dsi);
mtk_dsi_poweroff(dsi);
dsi->enabled = false;
@@ -742,9 +898,149 @@ static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
return 0;
}
+static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
+{
+ u32 timeout_ms = 500000; /* total 1s ~ 2s timeout */
+
+ while (timeout_ms--) {
+ if (!(readl(dsi->regs + DSI_INTSTA) & DSI_BUSY))
+ break;
+
+ usleep_range(2, 4);
+ }
+
+ if (timeout_ms == 0) {
+ DRM_WARN("polling dsi wait not busy timeout!\n");
+
+ mtk_dsi_enable(dsi);
+ mtk_dsi_reset_engine(dsi);
+ }
+}
+
+static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data)
+{
+ switch (type) {
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
+ return 1;
+ case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE:
+ case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
+ return 2;
+ case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
+ case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
+ return read_data[1] + read_data[2] * 16;
+ case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
+ DRM_INFO("type is 0x02, try again\n");
+ break;
+ default:
+ DRM_INFO("type(0x%x) cannot be non-recognite\n", type);
+ break;
+ }
+
+ return 0;
+}
+
+static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg)
+{
+ const char *tx_buf = msg->tx_buf;
+ u8 config, cmdq_size, cmdq_off, type = msg->type;
+ u32 reg_val, cmdq_mask, i;
+
+ if (MTK_DSI_HOST_IS_READ(type))
+ config = BTA;
+ else
+ config = (msg->tx_len > 2) ? LONG_PACKET : SHORT_PACKET;
+
+ if (msg->tx_len > 2) {
+ cmdq_size = 1 + (msg->tx_len + 3) / 4;
+ cmdq_off = 4;
+ cmdq_mask = CONFIG | DATA_ID | DATA_0 | DATA_1;
+ reg_val = (msg->tx_len << 16) | (type << 8) | config;
+ } else {
+ cmdq_size = 1;
+ cmdq_off = 2;
+ cmdq_mask = CONFIG | DATA_ID;
+ reg_val = (type << 8) | config;
+ }
+
+ for (i = 0; i < msg->tx_len; i++)
+ writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i);
+
+ mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val);
+ mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size);
+}
+
+static ssize_t mtk_dsi_host_send_cmd(struct mtk_dsi *dsi,
+ const struct mipi_dsi_msg *msg, u8 flag)
+{
+ mtk_dsi_wait_for_idle(dsi);
+ mtk_dsi_irq_data_clear(dsi, flag);
+ mtk_dsi_cmdq(dsi, msg);
+ mtk_dsi_start(dsi);
+
+ if (!mtk_dsi_wait_for_irq_done(dsi, flag, 2000))
+ return -ETIME;
+ else
+ return 0;
+}
+
+static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+ u32 recv_cnt, i;
+ u8 read_data[16];
+ void *src_addr;
+ u8 irq_flag = CMD_DONE_INT_FLAG;
+
+ if (readl(dsi->regs + DSI_MODE_CTRL) & MODE) {
+ DRM_ERROR("dsi engine is not command mode\n");
+ return -EINVAL;
+ }
+
+ if (MTK_DSI_HOST_IS_READ(msg->type))
+ irq_flag |= LPRX_RD_RDY_INT_FLAG;
+
+ if (mtk_dsi_host_send_cmd(dsi, msg, irq_flag) < 0)
+ return -ETIME;
+
+ if (!MTK_DSI_HOST_IS_READ(msg->type))
+ return 0;
+
+ if (!msg->rx_buf) {
+ DRM_ERROR("dsi receive buffer size may be NULL\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 16; i++)
+ *(read_data + i) = readb(dsi->regs + DSI_RX_DATA0 + i);
+
+ recv_cnt = mtk_dsi_recv_cnt(read_data[0], read_data);
+
+ if (recv_cnt > 2)
+ src_addr = &read_data[4];
+ else
+ src_addr = &read_data[1];
+
+ if (recv_cnt > 10)
+ recv_cnt = 10;
+
+ if (recv_cnt > msg->rx_len)
+ recv_cnt = msg->rx_len;
+
+ if (recv_cnt)
+ memcpy(msg->rx_buf, src_addr, recv_cnt);
+
+ DRM_INFO("dsi get %d byte data from the panel address(0x%x)\n",
+ recv_cnt, *((u8 *)(msg->tx_buf)));
+
+ return recv_cnt;
+}
+
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
.detach = mtk_dsi_host_detach,
+ .transfer = mtk_dsi_host_transfer,
};
static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
@@ -802,6 +1098,7 @@ static int mtk_dsi_probe(struct platform_device *pdev)
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
struct resource *regs;
+ int irq_num;
int comp_id;
int ret;
@@ -866,6 +1163,22 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret;
}
+ irq_num = platform_get_irq(pdev, 0);
+ if (irq_num < 0) {
+ dev_err(&pdev->dev, "failed to request dsi irq resource\n");
+ return -EPROBE_DEFER;
+ }
+
+ irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW);
+ ret = devm_request_irq(&pdev->dev, irq_num, mtk_dsi_irq,
+ IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request mediatek dsi irq\n");
+ return -EPROBE_DEFER;
+ }
+
+ init_waitqueue_head(&dsi->irq_wait_queue);
+
platform_set_drvdata(pdev, dsi);
return component_add(&pdev->dev, &mtk_dsi_component_ops);
@@ -882,6 +1195,7 @@ static int mtk_dsi_remove(struct platform_device *pdev)
}
static const struct of_device_id mtk_dsi_of_match[] = {
+ { .compatible = "mediatek,mt2701-dsi" },
{ .compatible = "mediatek,mt8173-dsi" },
{ },
};
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index 1c366f8cb2d0..90e913108950 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
@@ -87,6 +88,9 @@
#define MIPITX_DSI_PLL_CON2 0x58
+#define MIPITX_DSI_PLL_TOP 0x64
+#define RG_DSI_MPPLL_PRESERVE (0xff << 8)
+
#define MIPITX_DSI_PLL_PWR 0x68
#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0)
#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1)
@@ -123,10 +127,15 @@
#define SW_LNT2_HSTX_PRE_OE BIT(24)
#define SW_LNT2_HSTX_OE BIT(25)
+struct mtk_mipitx_data {
+ const u32 mppll_preserve;
+};
+
struct mtk_mipi_tx {
struct device *dev;
void __iomem *regs;
- unsigned int data_rate;
+ u32 data_rate;
+ const struct mtk_mipitx_data *driver_data;
struct clk_hw pll_hw;
struct clk *pll;
};
@@ -163,7 +172,7 @@ static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset,
static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
{
struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw);
- unsigned int txdiv, txdiv0, txdiv1;
+ u8 txdiv, txdiv0, txdiv1;
u64 pcw;
dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate);
@@ -243,6 +252,10 @@ static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1,
RG_DSI_MPPLL_SDM_SSC_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE,
+ mipi_tx->driver_data->mppll_preserve);
+
return 0;
}
@@ -255,6 +268,9 @@ static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw)
mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0,
RG_DSI_MPPLL_PLL_EN);
+ mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP,
+ RG_DSI_MPPLL_PRESERVE, 0);
+
mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR,
RG_DSI_MPPLL_SDM_ISO_EN |
RG_DSI_MPPLL_SDM_PWR_ON,
@@ -310,7 +326,7 @@ static const struct clk_ops mtk_mipi_tx_pll_ops = {
static int mtk_mipi_tx_power_on_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- unsigned int reg;
+ u32 reg;
for (reg = MIPITX_DSI_CLOCK_LANE;
reg <= MIPITX_DSI_DATA_LANE3; reg += 4)
@@ -341,7 +357,7 @@ static int mtk_mipi_tx_power_on(struct phy *phy)
static void mtk_mipi_tx_power_off_signal(struct phy *phy)
{
struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy);
- unsigned int reg;
+ u32 reg;
mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON,
RG_DSI_PAD_TIE_LOW_EN);
@@ -391,6 +407,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
if (!mipi_tx)
return -ENOMEM;
+ mipi_tx->driver_data = of_device_get_match_data(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mipi_tx->regs = devm_ioremap_resource(dev, mem);
if (IS_ERR(mipi_tx->regs)) {
@@ -448,8 +465,19 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev)
return 0;
}
+static const struct mtk_mipitx_data mt2701_mipitx_data = {
+ .mppll_preserve = (3 << 8)
+};
+
+static const struct mtk_mipitx_data mt8173_mipitx_data = {
+ .mppll_preserve = (0 << 8)
+};
+
static const struct of_device_id mtk_mipi_tx_match[] = {
- { .compatible = "mediatek,mt8173-mipi-tx", },
+ { .compatible = "mediatek,mt2701-mipi-tx",
+ .data = &mt2701_mipitx_data },
+ { .compatible = "mediatek,mt8173-mipi-tx",
+ .data = &mt8173_mipitx_data },
{},
};
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 657598bb1e6b..565a217b46f2 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -236,6 +236,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
.verify_access = mgag200_bo_verify_access,
.io_mem_reserve = &mgag200_ttm_io_mem_reserve,
.io_mem_free = &mgag200_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int mgag200_mm_init(struct mga_device *mdev)
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 39055362da95..5241ac8803ba 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -40,6 +40,7 @@ msm-y := \
mdp/mdp5/mdp5_mdss.o \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_pipe.o \
+ mdp/mdp5/mdp5_mixer.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
msm_atomic.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index b999349b7d2d..7fd77958a436 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -412,10 +412,8 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
#endif
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 511bc855cc7f..dfe0eceaae3b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -456,12 +456,8 @@ static const unsigned int a4xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
-
adreno_show(gpu, m);
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 4414cf73735d..31a9bceed32c 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -534,7 +534,7 @@ static void a5xx_destroy(struct msm_gpu *gpu)
}
if (a5xx_gpu->gpmu_bo) {
- if (a5xx_gpu->gpmu_bo)
+ if (a5xx_gpu->gpmu_iova)
msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->id);
drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
}
@@ -638,10 +638,8 @@ static void a5xx_cp_err_irq(struct msm_gpu *gpu)
}
}
-static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
{
- u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
-
if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
@@ -653,6 +651,10 @@ static void a5xx_rbbm_err_irq(struct msm_gpu *gpu)
/* Clear the error */
gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
}
if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
@@ -704,10 +706,16 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
{
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
- gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD, status);
+ /*
+ * Clear all the interrupts except RBBM_AHB_ERROR - if we clear it
+ * before the source is cleared the interrupt will storm.
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ /* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
- a5xx_rbbm_err_irq(gpu);
+ a5xx_rbbm_err_irq(gpu, status);
if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
a5xx_cp_err_irq(gpu);
@@ -837,12 +845,8 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
#ifdef CONFIG_DEBUG_FS
static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
-
adreno_show(gpu, m);
}
#endif
@@ -860,7 +864,9 @@ static const struct adreno_gpu_funcs funcs = {
.idle = a5xx_idle,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
+#endif
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index ece39b16a864..c0fa5d1c75ff 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,7 +2,7 @@
* Copyright (C) 2013-2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -17,6 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/pm_opp.h>
#include "adreno_gpu.h"
#define ANY_ID 0xff
@@ -155,21 +156,14 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
- mutex_lock(&dev->struct_mutex);
- gpu->funcs->pm_resume(gpu);
- mutex_unlock(&dev->struct_mutex);
- disable_irq(gpu->irq);
-
- ret = gpu->funcs->hw_init(gpu);
+ pm_runtime_get_sync(&pdev->dev);
+ ret = msm_gpu_hw_init(gpu);
+ pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
- } else {
- enable_irq(gpu->irq);
- /* give inactive pm a chance to kick in: */
- msm_gpu_retire(gpu);
}
}
@@ -220,10 +214,71 @@ static int find_chipid(struct device *dev, u32 *chipid)
return 0;
}
+/* Get legacy powerlevels from qcom,gpu-pwrlevels and populate the opp table */
+static int adreno_get_legacy_pwrlevels(struct device *dev)
+{
+ struct device_node *child, *node;
+ int ret;
+
+ node = of_find_compatible_node(dev->of_node, NULL,
+ "qcom,gpu-pwrlevels");
+ if (!node) {
+ dev_err(dev, "Could not find the GPU powerlevels\n");
+ return -ENXIO;
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int val;
+
+ ret = of_property_read_u32(child, "qcom,gpu-freq", &val);
+ if (ret)
+ continue;
+
+ /*
+ * Skip the intentionally bogus clock value found at the bottom
+ * of most legacy frequency tables
+ */
+ if (val != 27000000)
+ dev_pm_opp_add(dev, val, 0);
+ }
+
+ return 0;
+}
+
+static int adreno_get_pwrlevels(struct device *dev,
+ struct adreno_platform_config *config)
+{
+ unsigned long freq = ULONG_MAX;
+ struct dev_pm_opp *opp;
+ int ret;
+
+ /* You down with OPP? */
+ if (!of_find_property(dev->of_node, "operating-points-v2", NULL))
+ ret = adreno_get_legacy_pwrlevels(dev);
+ else
+ ret = dev_pm_opp_of_add_table(dev);
+
+ if (ret)
+ return ret;
+
+ /* Find the fastest defined rate */
+ opp = dev_pm_opp_find_freq_floor(dev, &freq);
+ if (!IS_ERR(opp))
+ config->fast_rate = dev_pm_opp_get_freq(opp);
+
+ if (!config->fast_rate) {
+ DRM_DEV_INFO(dev,
+ "Could not find clock rate. Using default\n");
+ /* Pick a suitably safe clock speed for any target */
+ config->fast_rate = 200000000;
+ }
+
+ return 0;
+}
+
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
- struct device_node *child, *node = dev->of_node;
u32 val;
int ret;
@@ -238,28 +293,10 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
/* find clock rates: */
config.fast_rate = 0;
- config.slow_rate = ~0;
- for_each_child_of_node(node, child) {
- if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
- struct device_node *pwrlvl;
- for_each_child_of_node(child, pwrlvl) {
- ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
- if (ret) {
- dev_err(dev, "could not find gpu-freq: %d\n", ret);
- return ret;
- }
- config.fast_rate = max(config.fast_rate, val);
- config.slow_rate = min(config.slow_rate, val);
- }
- }
- }
- if (!config.fast_rate) {
- dev_warn(dev, "could not find clk rates\n");
- /* This is a safe low speed for all devices: */
- config.fast_rate = 200000000;
- config.slow_rate = 27000000;
- }
+ ret = adreno_get_pwrlevels(dev, &config);
+ if (ret)
+ return ret;
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
@@ -296,12 +333,35 @@ static const struct of_device_id dt_match[] = {
{}
};
+#ifdef CONFIG_PM
+static int adreno_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+#endif
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+};
+
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index c9bd1e6225f4..5b63fc649dcc 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = 0x100000;
+ return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
@@ -68,6 +71,14 @@ int adreno_hw_init(struct msm_gpu *gpu)
return ret;
}
+ /* reset ringbuffer: */
+ gpu->rb->cur = gpu->rb->start;
+
+ /* reset completed fence seqno: */
+ adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
+ adreno_gpu->memptrs->rptr = 0;
+ adreno_gpu->memptrs->wptr = 0;
+
/* Setup REG_CP_RB_CNTL: */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
/* size is log2(quad-words): */
@@ -111,29 +122,20 @@ uint32_t adreno_last_fence(struct msm_gpu *gpu)
void adreno_recover(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
int ret;
- gpu->funcs->pm_suspend(gpu);
-
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
-
- /* reset completed fence seqno: */
- adreno_gpu->memptrs->fence = gpu->fctx->completed_fence;
- adreno_gpu->memptrs->rptr = 0;
- adreno_gpu->memptrs->wptr = 0;
+ // XXX pm-runtime?? we *need* the device to be off after this
+ // so maybe continuing to call ->pm_suspend/resume() is better?
+ gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
- disable_irq(gpu->irq);
- ret = gpu->funcs->hw_init(gpu);
+ ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
- enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
@@ -259,8 +261,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
- gpu->funcs->pm_resume(gpu);
-
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
@@ -273,8 +273,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
-
- gpu->funcs->pm_suspend(gpu);
}
#endif
@@ -354,14 +352,13 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->rev = config->rev;
gpu->fast_rate = config->fast_rate;
- gpu->slow_rate = config->slow_rate;
gpu->bus_freq = config->bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
gpu->bus_scale_table = config->bus_scale_table;
#endif
- DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
- gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u",
+ gpu->fast_rate, gpu->bus_freq);
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
@@ -369,6 +366,10 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (ret)
return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -418,18 +419,27 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return 0;
}
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
{
- if (gpu->memptrs_bo) {
- if (gpu->memptrs)
- msm_gem_put_vaddr(gpu->memptrs_bo);
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (adreno_gpu->memptrs_bo) {
+ if (adreno_gpu->memptrs)
+ msm_gem_put_vaddr(adreno_gpu->memptrs_bo);
+
+ if (adreno_gpu->memptrs_iova)
+ msm_gem_put_iova(adreno_gpu->memptrs_bo, gpu->id);
+
+ drm_gem_object_unreference_unlocked(adreno_gpu->memptrs_bo);
+ }
+ release_firmware(adreno_gpu->pm4);
+ release_firmware(adreno_gpu->pfp);
- if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ msm_gpu_cleanup(gpu);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ if (gpu->aspace) {
+ gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(gpu->aspace);
}
- release_firmware(gpu->pm4);
- release_firmware(gpu->pfp);
- msm_gpu_cleanup(&gpu->base);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 42e444a67630..fb4831f9f80b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -123,7 +123,7 @@ struct adreno_gpu {
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, slow_rate, bus_freq;
+ uint32_t fast_rate, bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
#endif
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 921270ea6059..a879ffa534b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -171,7 +171,7 @@ dsi_mgr_phy_enable(int id,
}
}
} else {
- msm_dsi_host_reset_phy(mdsi->host);
+ msm_dsi_host_reset_phy(msm_dsi->host);
ret = enable_phy(msm_dsi, src_pll_id, &shared_timings[id]);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
index a54d3bb5baad..8177e8511afd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c
@@ -18,13 +18,6 @@
#include <linux/hdmi.h>
#include "hdmi.h"
-
-/* Supported HDMI Audio channels */
-#define MSM_HDMI_AUDIO_CHANNEL_2 0
-#define MSM_HDMI_AUDIO_CHANNEL_4 1
-#define MSM_HDMI_AUDIO_CHANNEL_6 2
-#define MSM_HDMI_AUDIO_CHANNEL_8 3
-
/* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */
static int nchannels[] = { 2, 4, 6, 8 };
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1c29618f4ddb..f29194a74a19 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -114,15 +114,9 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp4_crtc->event;
if (event) {
- /* if regular vblank case (!file) or if cancel-flip from
- * preclose on file that requested flip, then send the
- * event:
- */
- if (!file || (event->base.file_priv == file)) {
- mdp4_crtc->event = NULL;
- DBG("%s: send event: %p", mdp4_crtc->name, event);
- drm_crtc_send_vblank_event(crtc, event);
- }
+ mdp4_crtc->event = NULL;
+ DBG("%s: send event: %p", mdp4_crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 5b6516bb9d06..3d26d7774c08 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -169,7 +169,7 @@ static void mdp4_destroy(struct msm_kms *kms)
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ msm_gem_address_space_put(aspace);
}
if (mdp4_kms->rpm_enabled)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index ba2d017f6591..c2bdad88447e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -70,6 +70,18 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
.nb_stages = 5,
},
.dspp = {
@@ -134,6 +146,18 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
.lm = {
.count = 5,
.base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
@@ -167,6 +191,7 @@ const struct mdp5_cfg_hw apq8084_config = {
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
@@ -211,6 +236,22 @@ const struct mdp5_cfg_hw apq8084_config = {
.lm = {
.count = 6,
.base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
.nb_stages = 5,
.max_width = 2048,
.max_height = 0xFFFF,
@@ -282,6 +323,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = {
.count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB },
+ },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
@@ -306,6 +353,7 @@ const struct mdp5_cfg_hw msm8x94_config = {
.mdp = {
.count = 1,
.caps = MDP_CAP_SMP |
+ MDP_CAP_SRC_SPLIT |
0,
},
.smp = {
@@ -350,6 +398,22 @@ const struct mdp5_cfg_hw msm8x94_config = {
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = 2,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = 3,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
.nb_stages = 8,
.max_width = 2048,
.max_height = 0xFFFF,
@@ -385,6 +449,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
.count = 1,
.caps = MDP_CAP_DSC |
MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT |
0,
},
.ctl = {
@@ -434,6 +499,22 @@ const struct mdp5_cfg_hw msm8x96_config = {
.lm = {
.count = 6,
.base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 1, .pp = 1, .dspp = 1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ { .id = 2, .pp = 2, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR, },
+ { .id = 3, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 4, .pp = -1, .dspp = -1,
+ .caps = MDP_LM_CAP_WB, },
+ { .id = 5, .pp = 3, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY, },
+ },
.nb_stages = 8,
.max_width = 2560,
.max_height = 0xFFFF,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
index b1c7daaede86..75910d0f2f4c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -39,8 +39,16 @@ struct mdp5_sub_block {
MDP5_SUB_BLOCK_DEFINITION;
};
+struct mdp5_lm_instance {
+ int id;
+ int pp;
+ int dspp;
+ uint32_t caps;
+};
+
struct mdp5_lm_block {
MDP5_SUB_BLOCK_DEFINITION;
+ struct mdp5_lm_instance instances[MAX_BASES];
uint32_t nb_stages; /* number of stages per blender */
uint32_t max_width; /* Maximum output resolution */
uint32_t max_height;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index df1c8adec3f3..8dafc7bdba48 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -51,7 +51,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
struct device *dev = encoder->dev->dev;
u32 total_lines_x100, vclks_line, cfg;
long vsync_clk_speed;
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
if (IS_ERR_OR_NULL(mdp5_kms->vsync_clk)) {
dev_err(dev, "vsync_clk is not initialized\n");
@@ -94,7 +95,8 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
int ret;
ret = clk_set_rate(mdp5_kms->vsync_clk,
@@ -119,7 +121,8 @@ static int pingpong_tearcheck_enable(struct drm_encoder *encoder)
static void pingpong_tearcheck_disable(struct drm_encoder *encoder)
{
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int pp_id = GET_PING_PONG_ID(mdp5_crtc_get_lm(encoder->crtc));
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ int pp_id = mixer->pp;
mdp5_write(mdp5_kms, REG_MDP5_PP_TEAR_CHECK_EN(pp_id), 0);
clk_disable_unprepare(mdp5_kms->vsync_clk);
@@ -129,8 +132,6 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
-
mode = adjusted_mode;
DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
@@ -142,23 +143,23 @@ void mdp5_cmd_encoder_mode_set(struct drm_encoder *encoder,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
pingpong_tearcheck_setup(encoder, mode);
- mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_cmd_enc->intf,
- mdp5_cmd_enc->ctl);
+ mdp5_crtc_set_pipeline(encoder->crtc);
}
void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
- struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(!mdp5_cmd_enc->enabled))
return;
pingpong_tearcheck_disable(encoder);
- mdp5_ctl_set_encoder_state(ctl, false);
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
bs_set(mdp5_cmd_enc, 0);
@@ -169,7 +170,8 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_cmd_enc = to_mdp5_encoder(encoder);
struct mdp5_ctl *ctl = mdp5_cmd_enc->ctl;
- struct mdp5_interface *intf = &mdp5_cmd_enc->intf;
+ struct mdp5_interface *intf = mdp5_cmd_enc->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
if (WARN_ON(mdp5_cmd_enc->enabled))
return;
@@ -178,9 +180,9 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
if (pingpong_tearcheck_enable(encoder))
return;
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
- mdp5_ctl_set_encoder_state(ctl, true);
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_cmd_enc->enabled = true;
}
@@ -197,7 +199,7 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
return -EINVAL;
mdp5_kms = get_kms(encoder);
- intf_num = mdp5_cmd_enc->intf.num;
+ intf_num = mdp5_cmd_enc->intf->num;
/* Switch slave encoder's trigger MUX, to use the master's
* start signal for the slave encoder
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index d0c8b38b96ce..9217e0d6e93e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -32,13 +32,7 @@ struct mdp5_crtc {
int id;
bool enabled;
- /* layer mixer used for this CRTC (+ its lock): */
-#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
- int lm;
- spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
-
- /* CTL used for this CRTC: */
- struct mdp5_ctl *ctl;
+ spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
/* if there is a pending flip, these will be non-null: */
struct drm_pending_vblank_event *event;
@@ -61,8 +55,6 @@ struct mdp5_crtc {
struct completion pp_completion;
- bool cmd_mode;
-
struct {
/* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
spinlock_t lock;
@@ -97,10 +89,12 @@ static void request_pp_done_pending(struct drm_crtc *crtc)
static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
DBG("%s: flush=%08x", crtc->name, flush_mask);
- return mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
+ return mdp5_ctl_commit(ctl, pipeline, flush_mask);
}
/*
@@ -110,19 +104,25 @@ static u32 crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
*/
static u32 crtc_flush_all(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_hw_mixer *mixer, *r_mixer;
struct drm_plane *plane;
uint32_t flush_mask = 0;
/* this should not happen: */
- if (WARN_ON(!mdp5_crtc->ctl))
+ if (WARN_ON(!mdp5_cstate->ctl))
return 0;
drm_atomic_crtc_for_each_plane(plane, crtc) {
flush_mask |= mdp5_plane_get_flush(plane);
}
- flush_mask |= mdp_ctl_flush_mask_lm(mdp5_crtc->lm);
+ mixer = mdp5_cstate->pipeline.mixer;
+ flush_mask |= mdp_ctl_flush_mask_lm(mixer->lm);
+
+ r_mixer = mdp5_cstate->pipeline.r_mixer;
+ if (r_mixer)
+ flush_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
return crtc_flush(crtc, flush_mask);
}
@@ -130,7 +130,10 @@ static u32 crtc_flush_all(struct drm_crtc *crtc)
/* if file!=NULL, this is preclose potential cancel-flip path */
static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
struct drm_device *dev = crtc->dev;
struct drm_pending_vblank_event *event;
unsigned long flags;
@@ -138,22 +141,17 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_lock_irqsave(&dev->event_lock, flags);
event = mdp5_crtc->event;
if (event) {
- /* if regular vblank case (!file) or if cancel-flip from
- * preclose on file that requested flip, then send the
- * event:
- */
- if (!file || (event->base.file_priv == file)) {
- mdp5_crtc->event = NULL;
- DBG("%s: send event: %p", crtc->name, event);
- drm_crtc_send_vblank_event(crtc, event);
- }
+ mdp5_crtc->event = NULL;
+ DBG("%s: send event: %p", crtc->name, event);
+ drm_crtc_send_vblank_event(crtc, event);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
- if (mdp5_crtc->ctl && !crtc->state->enable) {
+ if (ctl && !crtc->state->enable) {
/* set STAGE_UNUSED for all layers */
- mdp5_ctl_blend(mdp5_crtc->ctl, NULL, 0, 0);
- mdp5_crtc->ctl = NULL;
+ mdp5_ctl_blend(ctl, pipeline, NULL, NULL, 0, 0);
+ /* XXX: What to do here? */
+ /* mdp5_crtc->ctl = NULL; */
}
}
@@ -193,6 +191,12 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
}
/*
+ * left/right pipe offsets for the stage array used in blend_setup()
+ */
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+
+/*
* blend_setup() - blend all the planes of a CRTC
*
* If no base layer is available, border will be enabled as the base layer.
@@ -202,18 +206,26 @@ static inline u32 mdp5_lm_use_fg_alpha_mask(enum mdp_mixer_stage_id stage)
static void blend_setup(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_plane *plane;
const struct mdp5_cfg_hw *hw_cfg;
struct mdp5_plane_state *pstate, *pstates[STAGE_MAX + 1] = {NULL};
const struct mdp_format *format;
- uint32_t lm = mdp5_crtc->lm;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ uint32_t lm = mixer->lm;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
+ uint32_t r_lm = r_mixer ? r_mixer->lm : 0;
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags;
- enum mdp5_pipe stage[STAGE_MAX + 1] = { SSPP_NONE };
+ enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
+ enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE };
int i, plane_cnt = 0;
bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0;
+ u32 val;
#define blender(stage) ((stage) - STAGE0)
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
@@ -221,14 +233,35 @@ static void blend_setup(struct drm_crtc *crtc)
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
/* ctl could be released already when we are shutting down: */
- if (!mdp5_crtc->ctl)
+ /* XXX: Can this happen now? */
+ if (!ctl)
goto out;
/* Collect all plane information */
drm_atomic_crtc_for_each_plane(plane, crtc) {
+ enum mdp5_pipe right_pipe;
+
pstate = to_mdp5_plane_state(plane->state);
pstates[pstate->stage] = pstate;
- stage[pstate->stage] = mdp5_plane_pipe(plane);
+ stage[pstate->stage][PIPE_LEFT] = mdp5_plane_pipe(plane);
+ /*
+ * if we have a right mixer, stage the same pipe as we
+ * have on the left mixer
+ */
+ if (r_mixer)
+ r_stage[pstate->stage][PIPE_LEFT] =
+ mdp5_plane_pipe(plane);
+ /*
+ * if we have a right pipe (i.e, the plane comprises of 2
+ * hwpipes, then stage the right pipe on the right side of both
+ * the layer mixers
+ */
+ right_pipe = mdp5_plane_right_pipe(plane);
+ if (right_pipe) {
+ stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ r_stage[pstate->stage][PIPE_RIGHT] = right_pipe;
+ }
+
plane_cnt++;
}
@@ -294,12 +327,27 @@ static void blend_setup(struct drm_crtc *crtc)
blender(i)), fg_alpha);
mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
blender(i)), bg_alpha);
+ if (r_mixer) {
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(r_lm,
+ blender(i)), blend_op);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(r_lm,
+ blender(i)), fg_alpha);
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(r_lm,
+ blender(i)), bg_alpha);
+ }
}
- mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), mixer_op_mode);
-
- mdp5_ctl_blend(mdp5_crtc->ctl, stage, plane_cnt, ctl_blend_flags);
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm),
+ val | mixer_op_mode);
+ if (r_mixer) {
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm),
+ val | mixer_op_mode);
+ }
+ mdp5_ctl_blend(ctl, pipeline, stage, r_stage, plane_cnt,
+ ctl_blend_flags);
out:
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
@@ -307,7 +355,12 @@ out:
static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_cstate->pipeline.mixer;
+ struct mdp5_hw_mixer *r_mixer = mdp5_cstate->pipeline.r_mixer;
+ uint32_t lm = mixer->lm;
+ u32 mixer_width, val;
unsigned long flags;
struct drm_display_mode *mode;
@@ -325,16 +378,40 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ mixer_width = mode->hdisplay;
+ if (r_mixer)
+ mixer_width /= 2;
+
spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
- MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to LEFT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm));
+ val &= ~MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(lm), val);
+
+ if (r_mixer) {
+ u32 r_lm = r_mixer->lm;
+
+ mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(r_lm),
+ MDP5_LM_OUT_SIZE_WIDTH(mixer_width) |
+ MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
+
+ /* Assign mixer to RIGHT side in source split mode */
+ val = mdp5_read(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm));
+ val |= MDP5_LM_BLEND_COLOR_OUT_SPLIT_LEFT_RIGHT;
+ mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(r_lm), val);
+ }
+
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
static void mdp5_crtc_disable(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
DBG("%s", crtc->name);
@@ -342,7 +419,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
if (WARN_ON(!mdp5_crtc->enabled))
return;
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
@@ -354,6 +431,7 @@ static void mdp5_crtc_disable(struct drm_crtc *crtc)
static void mdp5_crtc_enable(struct drm_crtc *crtc)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
DBG("%s", crtc->name);
@@ -364,12 +442,73 @@ static void mdp5_crtc_enable(struct drm_crtc *crtc)
mdp5_enable(mdp5_kms);
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->pp_done);
mdp5_crtc->enabled = true;
}
+int mdp5_crtc_setup_pipeline(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ bool need_right_mixer)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_interface *intf;
+ bool new_mixer = false;
+
+ new_mixer = !pipeline->mixer;
+
+ if ((need_right_mixer && !pipeline->r_mixer) ||
+ (!need_right_mixer && pipeline->r_mixer))
+ new_mixer = true;
+
+ if (new_mixer) {
+ struct mdp5_hw_mixer *old_mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *old_r_mixer = pipeline->r_mixer;
+ u32 caps;
+ int ret;
+
+ caps = MDP_LM_CAP_DISPLAY;
+ if (need_right_mixer)
+ caps |= MDP_LM_CAP_PAIR;
+
+ ret = mdp5_mixer_assign(new_crtc_state->state, crtc, caps,
+ &pipeline->mixer, need_right_mixer ?
+ &pipeline->r_mixer : NULL);
+ if (ret)
+ return ret;
+
+ mdp5_mixer_release(new_crtc_state->state, old_mixer);
+ if (old_r_mixer) {
+ mdp5_mixer_release(new_crtc_state->state, old_r_mixer);
+ if (!need_right_mixer)
+ pipeline->r_mixer = NULL;
+ }
+ }
+
+ /*
+ * these should have been already set up in the encoder's atomic
+ * check (called by drm_atomic_helper_check_modeset)
+ */
+ intf = pipeline->intf;
+
+ mdp5_cstate->err_irqmask = intf2err(intf->num);
+ mdp5_cstate->vblank_irqmask = intf2vblank(pipeline->mixer, intf);
+
+ if ((intf->type == INTF_DSI) &&
+ (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
+ mdp5_cstate->pp_done_irqmask = lm2ppdone(pipeline->mixer);
+ mdp5_cstate->cmd_mode = true;
+ } else {
+ mdp5_cstate->pp_done_irqmask = 0;
+ mdp5_cstate->cmd_mode = false;
+ }
+
+ return 0;
+}
+
struct plane_state {
struct drm_plane *plane;
struct mdp5_plane_state *state;
@@ -391,6 +530,29 @@ static bool is_fullscreen(struct drm_crtc_state *cstate,
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
+enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state,
+ struct drm_plane_state *bpstate)
+{
+ struct mdp5_crtc_state *mdp5_cstate =
+ to_mdp5_crtc_state(new_crtc_state);
+
+ /*
+ * if we're in source split mode, it's mandatory to have
+ * border out on the base stage
+ */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return STAGE0;
+
+ /* if the bottom-most layer is not fullscreen, we need to use
+ * it for solid-color:
+ */
+ if (!is_fullscreen(new_crtc_state, bpstate))
+ return STAGE0;
+
+ return STAGE_BASE;
+}
+
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -400,8 +562,12 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate;
+ const struct drm_display_mode *mode = &state->adjusted_mode;
bool cursor_plane = false;
- int cnt = 0, base = 0, i;
+ bool need_right_mixer = false;
+ int cnt = 0, i;
+ int ret;
+ enum mdp_mixer_stage_id start;
DBG("%s: check", crtc->name);
@@ -409,32 +575,52 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate);
+ /*
+ * if any plane on this crtc uses 2 hwpipes, then we need
+ * the crtc to have a right hwmixer.
+ */
+ if (pstates[cnt].state->r_hwpipe)
+ need_right_mixer = true;
cnt++;
if (plane->type == DRM_PLANE_TYPE_CURSOR)
cursor_plane = true;
}
- /* assign a stage based on sorted zpos property */
- sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+ /* bail out early if there aren't any planes */
+ if (!cnt)
+ return 0;
- /* if the bottom-most layer is not fullscreen, we need to use
- * it for solid-color:
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ /*
+ * we need a right hwmixer if the mode's width is greater than a single
+ * LM's max width
*/
- if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
- base++;
+ if (mode->hdisplay > hw_cfg->lm.max_width)
+ need_right_mixer = true;
+
+ ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
+ if (ret) {
+ dev_err(dev->dev, "couldn't assign mixers %d\n", ret);
+ return ret;
+ }
+
+ /* assign a stage based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
/* trigger a warning if cursor isn't the highest zorder */
WARN_ON(cursor_plane &&
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
+ start = get_start_stage(crtc, state, &pstates[0].state->base);
+
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
- hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-
- if ((cnt + base) >= hw_cfg->lm.nb_stages) {
- dev_err(dev->dev, "too many planes! cnt=%d, base=%d\n", cnt, base);
+ if ((cnt + start - 1) >= hw_cfg->lm.nb_stages) {
+ dev_err(dev->dev, "too many planes! cnt=%d, start stage=%d\n",
+ cnt, start);
return -EINVAL;
}
@@ -442,7 +628,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
if (cursor_plane && (i == (cnt - 1)))
pstates[i].state->stage = hw_cfg->lm.nb_stages;
else
- pstates[i].state->stage = STAGE_BASE + i + base;
+ pstates[i].state->stage = start + i;
DBG("%s: assign pipe %s on stage=%d", crtc->name,
pstates[i].plane->name,
pstates[i].state->stage);
@@ -461,6 +647,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct drm_device *dev = crtc->dev;
unsigned long flags;
@@ -477,7 +664,8 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
* it means we are trying to flush a CRTC whose state is disabled:
* nothing else needs to be done.
*/
- if (unlikely(!mdp5_crtc->ctl))
+ /* XXX: Can this happen now ? */
+ if (unlikely(!mdp5_cstate->ctl))
return;
blend_setup(crtc);
@@ -488,11 +676,16 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
* This is safe because no pp_done will happen before SW trigger
* in command mode.
*/
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
request_pp_done_pending(crtc);
mdp5_crtc->flushed_mask = crtc_flush_all(crtc);
+ /* XXX are we leaking out state here? */
+ mdp5_crtc->vblank.irqmask = mdp5_cstate->vblank_irqmask;
+ mdp5_crtc->err.irqmask = mdp5_cstate->err_irqmask;
+ mdp5_crtc->pp_done.irqmask = mdp5_cstate->pp_done_irqmask;
+
request_pending(crtc, PENDING_FLIP);
}
@@ -527,11 +720,14 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
uint32_t blendcfg, stride;
uint64_t cursor_addr;
+ struct mdp5_ctl *ctl;
int ret, lm;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
@@ -544,7 +740,12 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- if (NULL == mdp5_crtc->ctl)
+ ctl = mdp5_cstate->ctl;
+ if (!ctl)
+ return -EINVAL;
+
+ /* don't support LM cursors when we we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
return -EINVAL;
if (!handle) {
@@ -561,7 +762,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (ret)
return -EINVAL;
- lm = mdp5_crtc->lm;
+ lm = mdp5_cstate->pipeline.mixer->lm;
stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
@@ -591,7 +792,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
set_cursor:
- ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, 0, cursor_enable);
+ ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable);
if (ret) {
dev_err(dev->dev, "failed to %sable cursor: %d\n",
cursor_enable ? "en" : "dis", ret);
@@ -613,11 +814,17 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ uint32_t lm = mdp5_cstate->pipeline.mixer->lm;
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
uint32_t roi_w;
uint32_t roi_h;
unsigned long flags;
+ /* don't support LM cursors when we we have source split enabled */
+ if (mdp5_cstate->pipeline.r_mixer)
+ return -EINVAL;
+
/* In case the CRTC is disabled, just drop the cursor update */
if (unlikely(!crtc->state->enable))
return 0;
@@ -628,10 +835,10 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
- mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
@@ -641,16 +848,80 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
+static void
+mdp5_crtc_atomic_print_state(struct drm_printer *p,
+ const struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+ struct mdp5_pipeline *pipeline = &mdp5_cstate->pipeline;
+ struct mdp5_kms *mdp5_kms = get_kms(state->crtc);
+
+ if (WARN_ON(!pipeline))
+ return;
+
+ drm_printf(p, "\thwmixer=%s\n", pipeline->mixer ?
+ pipeline->mixer->name : "(null)");
+
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright hwmixer=%s\n", pipeline->r_mixer ?
+ pipeline->r_mixer->name : "(null)");
+}
+
+static void mdp5_crtc_reset(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (crtc->state) {
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ kfree(to_mdp5_crtc_state(crtc->state));
+ }
+
+ mdp5_cstate = kzalloc(sizeof(*mdp5_cstate), GFP_KERNEL);
+
+ if (mdp5_cstate) {
+ mdp5_cstate->base.crtc = crtc;
+ crtc->state = &mdp5_cstate->base;
+ }
+}
+
+static struct drm_crtc_state *
+mdp5_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ mdp5_cstate = kmemdup(to_mdp5_crtc_state(crtc->state),
+ sizeof(*mdp5_cstate), GFP_KERNEL);
+ if (!mdp5_cstate)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &mdp5_cstate->base);
+
+ return &mdp5_cstate->base;
+}
+
+static void mdp5_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(mdp5_cstate);
+}
+
static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.set_property = drm_atomic_helper_crtc_set_property,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
@@ -658,9 +929,10 @@ static const struct drm_crtc_funcs mdp5_crtc_no_lm_cursor_funcs = {
.destroy = mdp5_crtc_destroy,
.page_flip = drm_atomic_helper_page_flip,
.set_property = drm_atomic_helper_crtc_set_property,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = mdp5_crtc_reset,
+ .atomic_duplicate_state = mdp5_crtc_duplicate_state,
+ .atomic_destroy_state = mdp5_crtc_destroy_state,
+ .atomic_print_state = mdp5_crtc_atomic_print_state,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -710,22 +982,26 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
int ret;
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
- dev_warn(dev->dev, "pp done time out, lm=%d\n", mdp5_crtc->lm);
+ dev_warn(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+ struct mdp5_ctl *ctl = mdp5_cstate->ctl;
int ret;
/* Should not call this function if crtc is disabled. */
- if (!mdp5_crtc->ctl)
+ if (!ctl)
return;
ret = drm_crtc_vblank_get(crtc);
@@ -733,7 +1009,7 @@ static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
return;
ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
- ((mdp5_ctl_get_commit_status(mdp5_crtc->ctl) &
+ ((mdp5_ctl_get_commit_status(ctl) &
mdp5_crtc->flushed_mask) == 0),
msecs_to_jiffies(50));
if (ret <= 0)
@@ -750,52 +1026,54 @@ uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
return mdp5_crtc->vblank.irqmask;
}
-void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl)
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
struct mdp5_kms *mdp5_kms = get_kms(crtc);
- int lm = mdp5_crtc_get_lm(crtc);
-
- /* now that we know what irq's we want: */
- mdp5_crtc->err.irqmask = intf2err(intf->num);
- mdp5_crtc->vblank.irqmask = intf2vblank(lm, intf);
-
- if ((intf->type == INTF_DSI) &&
- (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)) {
- mdp5_crtc->pp_done.irqmask = lm2ppdone(lm);
- mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
- mdp5_crtc->cmd_mode = true;
- } else {
- mdp5_crtc->pp_done.irqmask = 0;
- mdp5_crtc->pp_done.irq = NULL;
- mdp5_crtc->cmd_mode = false;
- }
+ /* should this be done elsewhere ? */
mdp_irq_update(&mdp5_kms->base);
- mdp5_crtc->ctl = ctl;
- mdp5_ctl_set_pipeline(ctl, intf, lm);
+ mdp5_ctl_set_pipeline(mdp5_cstate->ctl, &mdp5_cstate->pipeline);
}
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
- return mdp5_crtc->ctl;
+ return mdp5_cstate->ctl;
}
-int mdp5_crtc_get_lm(struct drm_crtc *crtc)
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
- return WARN_ON(!crtc) ? -EINVAL : mdp5_crtc->lm;
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return WARN_ON(!mdp5_cstate->pipeline.mixer) ?
+ ERR_PTR(-EINVAL) : mdp5_cstate->pipeline.mixer;
+}
+
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc)
+{
+ struct mdp5_crtc_state *mdp5_cstate;
+
+ if (WARN_ON(!crtc))
+ return ERR_PTR(-EINVAL);
+
+ mdp5_cstate = to_mdp5_crtc_state(crtc->state);
+
+ return &mdp5_cstate->pipeline;
}
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc)
{
- struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
- if (mdp5_crtc->cmd_mode)
+ if (mdp5_cstate->cmd_mode)
mdp5_crtc_wait_for_pp_done(crtc);
else
mdp5_crtc_wait_for_flush_done(crtc);
@@ -816,7 +1094,6 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
crtc = &mdp5_crtc->base;
mdp5_crtc->id = id;
- mdp5_crtc->lm = GET_LM_ID(id);
spin_lock_init(&mdp5_crtc->lm_lock);
spin_lock_init(&mdp5_crtc->cursor.lock);
@@ -824,6 +1101,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
mdp5_crtc->err.irq = mdp5_crtc_err_irq;
+ mdp5_crtc->pp_done.irq = mdp5_crtc_pp_done_irq;
if (cursor_plane)
drm_crtc_init_with_planes(dev, crtc, plane, cursor_plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 8b93f7e13200..439e0a300e25 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -32,24 +32,16 @@
#define CTL_STAT_BUSY 0x1
#define CTL_STAT_BOOKED 0x2
-struct op_mode {
- struct mdp5_interface intf;
-
- bool encoder_enabled;
- uint32_t start_mask;
-};
-
struct mdp5_ctl {
struct mdp5_ctl_manager *ctlm;
u32 id;
- int lm;
/* CTL status bitmask */
u32 status;
- /* Operation Mode Configuration for the Pipeline */
- struct op_mode pipeline;
+ bool encoder_enabled;
+ uint32_t start_mask;
/* REG_MDP5_CTL_*(<id>) registers access info + lock: */
spinlock_t hw_lock;
@@ -146,9 +138,10 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
}
-static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
+static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
unsigned long flags;
+ struct mdp5_interface *intf = pipeline->intf;
u32 ctl_op = 0;
if (!mdp5_cfg_intf_is_virtual(intf->type))
@@ -169,52 +162,50 @@ static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_interface *intf)
break;
}
+ if (pipeline->r_mixer)
+ ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
+ MDP5_CTL_OP_PACK_3D(1);
+
spin_lock_irqsave(&ctl->hw_lock, flags);
ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl,
- struct mdp5_interface *intf, int lm)
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
+ struct mdp5_interface *intf = pipeline->intf;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
- if (unlikely(WARN_ON(intf->num != ctl->pipeline.intf.num))) {
- dev_err(mdp5_kms->dev->dev,
- "CTL %d is allocated by INTF %d, but used by INTF %d\n",
- ctl->id, ctl->pipeline.intf.num, intf->num);
- return -EINVAL;
- }
-
- ctl->lm = lm;
-
- memcpy(&ctl->pipeline.intf, intf, sizeof(*intf));
-
- ctl->pipeline.start_mask = mdp_ctl_flush_mask_lm(ctl->lm) |
- mdp_ctl_flush_mask_encoder(intf);
+ ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm) |
+ mdp_ctl_flush_mask_encoder(intf);
+ if (r_mixer)
+ ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/* Virtual interfaces need not set a display intf (e.g.: Writeback) */
if (!mdp5_cfg_intf_is_virtual(intf->type))
set_display_intf(mdp5_kms, intf);
- set_ctl_op(ctl, intf);
+ set_ctl_op(ctl, pipeline);
return 0;
}
-static bool start_signal_needed(struct mdp5_ctl *ctl)
+static bool start_signal_needed(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline)
{
- struct op_mode *pipeline = &ctl->pipeline;
+ struct mdp5_interface *intf = pipeline->intf;
- if (!pipeline->encoder_enabled || pipeline->start_mask != 0)
+ if (!ctl->encoder_enabled || ctl->start_mask != 0)
return false;
- switch (pipeline->intf.type) {
+ switch (intf->type) {
case INTF_WB:
return true;
case INTF_DSI:
- return pipeline->intf.mode == MDP5_INTF_DSI_MODE_COMMAND;
+ return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
default:
return false;
}
@@ -236,19 +227,23 @@ static void send_start_signal(struct mdp5_ctl *ctl)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
-static void refill_start_mask(struct mdp5_ctl *ctl)
+static void refill_start_mask(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline)
{
- struct op_mode *pipeline = &ctl->pipeline;
- struct mdp5_interface *intf = &ctl->pipeline.intf;
+ struct mdp5_interface *intf = pipeline->intf;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
- pipeline->start_mask = mdp_ctl_flush_mask_lm(ctl->lm);
+ ctl->start_mask = mdp_ctl_flush_mask_lm(mixer->lm);
+ if (r_mixer)
+ ctl->start_mask |= mdp_ctl_flush_mask_lm(r_mixer->lm);
/*
* Writeback encoder needs to program & flush
* address registers for each page flip..
*/
if (intf->type == INTF_WB)
- pipeline->start_mask |= mdp_ctl_flush_mask_encoder(intf);
+ ctl->start_mask |= mdp_ctl_flush_mask_encoder(intf);
}
/**
@@ -259,17 +254,21 @@ static void refill_start_mask(struct mdp5_ctl *ctl)
* Note:
* This encoder state is needed to trigger START signal (data path kickoff).
*/
-int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ bool enabled)
{
+ struct mdp5_interface *intf = pipeline->intf;
+
if (WARN_ON(!ctl))
return -EINVAL;
- ctl->pipeline.encoder_enabled = enabled;
- DBG("intf_%d: %s", ctl->pipeline.intf.num, enabled ? "on" : "off");
+ ctl->encoder_enabled = enabled;
+ DBG("intf_%d: %s", intf->num, enabled ? "on" : "off");
- if (start_signal_needed(ctl)) {
+ if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl);
+ refill_start_mask(ctl, pipeline);
}
return 0;
@@ -280,29 +279,35 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled)
* CTL registers need to be flushed after calling this function
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable)
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
unsigned long flags;
u32 blend_cfg;
- int lm = ctl->lm;
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+
+ if (unlikely(WARN_ON(!mixer))) {
+ dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM",
+ ctl->id);
+ return -EINVAL;
+ }
- if (unlikely(WARN_ON(lm < 0))) {
- dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
- ctl->id, lm);
+ if (pipeline->r_mixer) {
+ dev_err(ctl_mgr->dev->dev, "unsupported configuration");
return -EINVAL;
}
spin_lock_irqsave(&ctl->hw_lock, flags);
- blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
+ blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
if (enable)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
else
blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
ctl->cursor_on = enable;
spin_unlock_irqrestore(&ctl->hw_lock, flags);
@@ -355,37 +360,88 @@ static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
}
}
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
- u32 ctl_blend_op_flags)
+static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
+{
+ unsigned long flags;
+ struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
+ int i;
+
+ spin_lock_irqsave(&ctl->hw_lock, flags);
+
+ for (i = 0; i < ctl_mgr->nlm; i++) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
+ }
+
+ spin_unlock_irqrestore(&ctl->hw_lock, flags);
+}
+
+#define PIPE_LEFT 0
+#define PIPE_RIGHT 1
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags)
{
+ struct mdp5_hw_mixer *mixer = pipeline->mixer;
+ struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
unsigned long flags;
u32 blend_cfg = 0, blend_ext_cfg = 0;
+ u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
int i, start_stage;
+ mdp5_ctl_reset_blend_regs(ctl);
+
if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
start_stage = STAGE0;
blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
+ if (r_mixer)
+ r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
} else {
start_stage = STAGE_BASE;
}
for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
- blend_cfg |= mdp_ctl_blend_mask(stage[i], i);
- blend_ext_cfg |= mdp_ctl_blend_ext_mask(stage[i], i);
+ blend_cfg |=
+ mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
+ blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
+ if (r_mixer) {
+ r_blend_cfg |=
+ mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
+ r_blend_ext_cfg |=
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
+ mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
+ }
}
spin_lock_irqsave(&ctl->hw_lock, flags);
if (ctl->cursor_on)
blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
- ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, ctl->lm), blend_cfg);
- ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, ctl->lm), blend_ext_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
+ blend_ext_cfg);
+ if (r_mixer) {
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
+ r_blend_cfg);
+ ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
+ r_blend_ext_cfg);
+ }
spin_unlock_irqrestore(&ctl->hw_lock, flags);
- ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(ctl->lm);
+ ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
+ if (r_mixer)
+ ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
- DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", ctl->lm,
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
blend_cfg, blend_ext_cfg);
+ if (r_mixer)
+ DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
+ r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
return 0;
}
@@ -443,7 +499,8 @@ u32 mdp_ctl_flush_mask_lm(int lm)
}
}
-static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
+static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
u32 sw_mask = 0;
@@ -452,7 +509,7 @@ static u32 fix_sw_flush(struct mdp5_ctl *ctl, u32 flush_mask)
/* for some targets, cursor bit is the same as LM bit */
if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
- sw_mask |= mdp_ctl_flush_mask_lm(ctl->lm);
+ sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
return sw_mask;
}
@@ -498,25 +555,26 @@ static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
*
* Return H/W flushed bit mask.
*/
-u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
+ struct mdp5_pipeline *pipeline,
+ u32 flush_mask)
{
struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
- struct op_mode *pipeline = &ctl->pipeline;
unsigned long flags;
u32 flush_id = ctl->id;
u32 curr_ctl_flush_mask;
- pipeline->start_mask &= ~flush_mask;
+ ctl->start_mask &= ~flush_mask;
VERB("flush_mask=%x, start_mask=%x, trigger=%x", flush_mask,
- pipeline->start_mask, ctl->pending_ctl_trigger);
+ ctl->start_mask, ctl->pending_ctl_trigger);
if (ctl->pending_ctl_trigger & flush_mask) {
flush_mask |= MDP5_CTL_FLUSH_CTL;
ctl->pending_ctl_trigger = 0;
}
- flush_mask |= fix_sw_flush(ctl, flush_mask);
+ flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
flush_mask &= ctl_mgr->flush_hw_mask;
@@ -530,9 +588,9 @@ u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
spin_unlock_irqrestore(&ctl->hw_lock, flags);
}
- if (start_signal_needed(ctl)) {
+ if (start_signal_needed(ctl, pipeline)) {
send_start_signal(ctl);
- refill_start_mask(ctl);
+ refill_start_mask(ctl, pipeline);
}
return curr_ctl_flush_mask;
@@ -619,8 +677,6 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
found:
ctl = &ctl_mgr->ctls[c];
- ctl->pipeline.intf.num = intf_num;
- ctl->lm = -1;
ctl->status |= CTL_STAT_BUSY;
ctl->pending_ctl_trigger = 0;
DBG("CTL %d allocated", ctl->id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
index fda00d33e4db..b63120388dc6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -37,13 +37,17 @@ struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, int intf_num);
int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl);
struct mdp5_interface;
-int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_interface *intf,
- int lm);
-int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, bool enabled);
+struct mdp5_pipeline;
+int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *p);
+int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, struct mdp5_pipeline *p,
+ bool enabled);
-int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, int cursor_id, bool enable);
+int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ int cursor_id, bool enable);
int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
+#define MAX_PIPE_STAGE 2
+
/*
* mdp5_ctl_blend() - Blend multiple layers on a Layer Mixer (LM)
*
@@ -56,8 +60,10 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable);
* (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
*/
#define MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT BIT(0)
-int mdp5_ctl_blend(struct mdp5_ctl *ctl, enum mdp5_pipe *stage, u32 stage_cnt,
- u32 ctl_blend_op_flags);
+int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ enum mdp5_pipe stage[][MAX_PIPE_STAGE],
+ enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
+ u32 stage_cnt, u32 ctl_blend_op_flags);
/**
* mdp_ctl_flush_mask...() - Register FLUSH masks
@@ -71,7 +77,8 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id);
u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf);
/* @flush_mask: see CTL flush masks definitions below */
-u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
+u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
+ u32 flush_mask);
u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 80fa482ae8ed..c2ab0f033031 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -109,7 +109,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
- int intf = mdp5_encoder->intf.num;
+ int intf = mdp5_encoder->intf->num;
uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
uint32_t display_v_start, display_v_end;
uint32_t hsync_start_x, hsync_end_x;
@@ -130,7 +130,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
ctrl_pol = 0;
/* DSI controller cannot handle active-low sync signals. */
- if (mdp5_encoder->intf.type != INTF_DSI) {
+ if (mdp5_encoder->intf->type != INTF_DSI) {
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
ctrl_pol |= MDP5_INTF_POLARITY_CTL_HSYNC_LOW;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
@@ -175,7 +175,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
* DISPLAY_V_START = (VBP * HCYCLE) + HBP
* DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
*/
- if (mdp5_encoder->intf.type == INTF_eDP) {
+ if (mdp5_encoder->intf->type == INTF_eDP) {
display_v_start += mode->htotal - mode->hsync_start;
display_v_end -= mode->hsync_start - mode->hdisplay;
}
@@ -206,8 +206,7 @@ static void mdp5_vid_encoder_mode_set(struct drm_encoder *encoder,
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_crtc_set_pipeline(encoder->crtc, &mdp5_encoder->intf,
- mdp5_encoder->ctl);
+ mdp5_crtc_set_pipeline(encoder->crtc);
}
static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
@@ -215,20 +214,21 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
- int lm = mdp5_crtc_get_lm(encoder->crtc);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
- int intfn = mdp5_encoder->intf.num;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ struct mdp5_hw_mixer *mixer = mdp5_crtc_get_mixer(encoder->crtc);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ int intfn = mdp5_encoder->intf->num;
unsigned long flags;
if (WARN_ON(!mdp5_encoder->enabled))
return;
- mdp5_ctl_set_encoder_state(ctl, false);
+ mdp5_ctl_set_encoder_state(ctl, pipeline, false);
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 0);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
/*
* Wait for a vsync so we know the ENABLE=0 latched before
@@ -238,7 +238,7 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
* the settings changes for the new modeset (like new
* scanout buffer) don't latch properly..
*/
- mdp_irq_wait(&mdp5_kms->base, intf2vblank(lm, intf));
+ mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
bs_set(mdp5_encoder, 0);
@@ -250,8 +250,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
struct mdp5_ctl *ctl = mdp5_encoder->ctl;
- struct mdp5_interface *intf = &mdp5_encoder->intf;
- int intfn = mdp5_encoder->intf.num;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(encoder->crtc);
+ int intfn = intf->num;
unsigned long flags;
if (WARN_ON(mdp5_encoder->enabled))
@@ -261,9 +262,9 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
- mdp5_ctl_commit(ctl, mdp_ctl_flush_mask_encoder(intf));
+ mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf));
- mdp5_ctl_set_encoder_state(ctl, true);
+ mdp5_ctl_set_encoder_state(ctl, pipeline, true);
mdp5_encoder->enabled = true;
}
@@ -273,7 +274,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_mode_set(encoder, mode, adjusted_mode);
@@ -284,7 +285,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
static void mdp5_encoder_disable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
@@ -295,7 +296,7 @@ static void mdp5_encoder_disable(struct drm_encoder *encoder)
static void mdp5_encoder_enable(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder);
@@ -303,17 +304,33 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
mdp5_vid_encoder_enable(encoder);
}
+static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
+ struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc_state);
+ struct mdp5_interface *intf = mdp5_encoder->intf;
+ struct mdp5_ctl *ctl = mdp5_encoder->ctl;
+
+ mdp5_cstate->ctl = ctl;
+ mdp5_cstate->pipeline.intf = intf;
+
+ return 0;
+}
+
static const struct drm_encoder_helper_funcs mdp5_encoder_helper_funcs = {
.mode_set = mdp5_encoder_mode_set,
.disable = mdp5_encoder_disable,
.enable = mdp5_encoder_enable,
+ .atomic_check = mdp5_encoder_atomic_check,
};
int mdp5_encoder_get_linecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf.num;
+ int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_LINE_COUNT(intf));
}
@@ -322,7 +339,7 @@ u32 mdp5_encoder_get_framecount(struct drm_encoder *encoder)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
struct mdp5_kms *mdp5_kms = get_kms(encoder);
- int intf = mdp5_encoder->intf.num;
+ int intf = mdp5_encoder->intf->num;
return mdp5_read(mdp5_kms, REG_MDP5_INTF_FRAME_COUNT(intf));
}
@@ -340,7 +357,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
return -EINVAL;
mdp5_kms = get_kms(encoder);
- intf_num = mdp5_encoder->intf.num;
+ intf_num = mdp5_encoder->intf->num;
/* Switch slave encoder's TimingGen Sync mode,
* to use the master's enable signal for the slave encoder.
@@ -369,7 +386,7 @@ int mdp5_vid_encoder_set_split_display(struct drm_encoder *encoder,
void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
{
struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
- struct mdp5_interface *intf = &mdp5_encoder->intf;
+ struct mdp5_interface *intf = mdp5_encoder->intf;
/* TODO: Expand this to set writeback modes too */
if (cmd_mode) {
@@ -385,7 +402,8 @@ void mdp5_encoder_set_intf_mode(struct drm_encoder *encoder, bool cmd_mode)
/* initialize encoder */
struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl)
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
{
struct drm_encoder *encoder = NULL;
struct mdp5_encoder *mdp5_encoder;
@@ -399,9 +417,9 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
goto fail;
}
- memcpy(&mdp5_encoder->intf, intf, sizeof(mdp5_encoder->intf));
encoder = &mdp5_encoder->base;
mdp5_encoder->ctl = ctl;
+ mdp5_encoder->intf = intf;
spin_lock_init(&mdp5_encoder->intf_lock);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 41ccd2a15d3c..d3d6b4cae1e6 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -93,6 +93,7 @@ struct mdp5_state *mdp5_get_state(struct drm_atomic_state *s)
/* Copy state: */
new_state->hwpipe = mdp5_kms->state->hwpipe;
+ new_state->hwmixer = mdp5_kms->state->hwmixer;
if (mdp5_kms->smp)
new_state->smp = mdp5_kms->state->smp;
@@ -165,13 +166,16 @@ static void mdp5_kms_destroy(struct msm_kms *kms)
struct msm_gem_address_space *aspace = mdp5_kms->aspace;
int i;
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++)
+ mdp5_mixer_destroy(mdp5_kms->hwmixers[i]);
+
for (i = 0; i < mdp5_kms->num_hwpipes; i++)
mdp5_pipe_destroy(mdp5_kms->hwpipes[i]);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ msm_gem_address_space_put(aspace);
}
}
@@ -268,19 +272,14 @@ int mdp5_enable(struct mdp5_kms *mdp5_kms)
}
static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
- enum mdp5_intf_type intf_type, int intf_num,
- struct mdp5_ctl *ctl)
+ struct mdp5_interface *intf,
+ struct mdp5_ctl *ctl)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_encoder *encoder;
- struct mdp5_interface intf = {
- .num = intf_num,
- .type = intf_type,
- .mode = MDP5_INTF_MODE_NONE,
- };
- encoder = mdp5_encoder_init(dev, &intf, ctl);
+ encoder = mdp5_encoder_init(dev, intf, ctl);
if (IS_ERR(encoder)) {
dev_err(dev->dev, "failed to construct encoder\n");
return encoder;
@@ -309,32 +308,28 @@ static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
return -EINVAL;
}
-static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
+static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
+ struct mdp5_interface *intf)
{
struct drm_device *dev = mdp5_kms->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct mdp5_cfg_hw *hw_cfg =
- mdp5_cfg_get_hw_config(mdp5_kms->cfg);
- enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
struct mdp5_ctl_manager *ctlm = mdp5_kms->ctlm;
struct mdp5_ctl *ctl;
struct drm_encoder *encoder;
int ret = 0;
- switch (intf_type) {
- case INTF_DISABLED:
- break;
+ switch (intf->type) {
case INTF_eDP:
if (!priv->edp)
break;
- ctl = mdp5_ctlm_request(ctlm, intf_num);
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_eDP, intf_num, ctl);
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -346,13 +341,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->hdmi)
break;
- ctl = mdp5_ctlm_request(ctlm, intf_num);
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_HDMI, intf_num, ctl);
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -362,11 +357,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
case INTF_DSI:
{
- int dsi_id = get_dsi_id_from_intf(hw_cfg, intf_num);
+ const struct mdp5_cfg_hw *hw_cfg =
+ mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ int dsi_id = get_dsi_id_from_intf(hw_cfg, intf->num);
if ((dsi_id >= ARRAY_SIZE(priv->dsi)) || (dsi_id < 0)) {
dev_err(dev->dev, "failed to find dsi from intf %d\n",
- intf_num);
+ intf->num);
ret = -EINVAL;
break;
}
@@ -374,13 +371,13 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
if (!priv->dsi[dsi_id])
break;
- ctl = mdp5_ctlm_request(ctlm, intf_num);
+ ctl = mdp5_ctlm_request(ctlm, intf->num);
if (!ctl) {
ret = -EINVAL;
break;
}
- encoder = construct_encoder(mdp5_kms, INTF_DSI, intf_num, ctl);
+ encoder = construct_encoder(mdp5_kms, intf, ctl);
if (IS_ERR(encoder)) {
ret = PTR_ERR(encoder);
break;
@@ -390,7 +387,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
break;
}
default:
- dev_err(dev->dev, "unknown intf: %d\n", intf_type);
+ dev_err(dev->dev, "unknown intf: %d\n", intf->type);
ret = -EINVAL;
break;
}
@@ -414,8 +411,8 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* Construct encoders and modeset initialize connector devices
* for each external display interface.
*/
- for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
- ret = modeset_init_intf(mdp5_kms, i);
+ for (i = 0; i < mdp5_kms->num_intfs; i++) {
+ ret = modeset_init_intf(mdp5_kms, mdp5_kms->intfs[i]);
if (ret)
goto fail;
}
@@ -425,7 +422,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
* the MDP5 interfaces) than the number of layer mixers present in HW,
* but let's be safe here anyway
*/
- num_crtcs = min(priv->num_encoders, mdp5_cfg->lm.count);
+ num_crtcs = min(priv->num_encoders, mdp5_kms->num_hwmixers);
/*
* Construct planes equaling the number of hw pipes, and CRTCs for the
@@ -744,6 +741,7 @@ fail:
static void mdp5_destroy(struct platform_device *pdev)
{
struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
+ int i;
if (mdp5_kms->ctlm)
mdp5_ctlm_destroy(mdp5_kms->ctlm);
@@ -752,6 +750,9 @@ static void mdp5_destroy(struct platform_device *pdev)
if (mdp5_kms->cfg)
mdp5_cfg_destroy(mdp5_kms->cfg);
+ for (i = 0; i < mdp5_kms->num_intfs; i++)
+ kfree(mdp5_kms->intfs[i]);
+
if (mdp5_kms->rpm_enabled)
pm_runtime_disable(&pdev->dev);
@@ -829,6 +830,64 @@ static int hwpipe_init(struct mdp5_kms *mdp5_kms)
return 0;
}
+static int hwmixer_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ int i, ret;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+
+ for (i = 0; i < hw_cfg->lm.count; i++) {
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = mdp5_mixer_init(&hw_cfg->lm.instances[i]);
+ if (IS_ERR(mixer)) {
+ ret = PTR_ERR(mixer);
+ dev_err(dev->dev, "failed to construct LM%d (%d)\n",
+ i, ret);
+ return ret;
+ }
+
+ mixer->idx = mdp5_kms->num_hwmixers;
+ mdp5_kms->hwmixers[mdp5_kms->num_hwmixers++] = mixer;
+ }
+
+ return 0;
+}
+
+static int interface_init(struct mdp5_kms *mdp5_kms)
+{
+ struct drm_device *dev = mdp5_kms->dev;
+ const struct mdp5_cfg_hw *hw_cfg;
+ const enum mdp5_intf_type *intf_types;
+ int i;
+
+ hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
+ intf_types = hw_cfg->intf.connect;
+
+ for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
+ struct mdp5_interface *intf;
+
+ if (intf_types[i] == INTF_DISABLED)
+ continue;
+
+ intf = kzalloc(sizeof(*intf), GFP_KERNEL);
+ if (!intf) {
+ dev_err(dev->dev, "failed to construct INTF%d\n", i);
+ return -ENOMEM;
+ }
+
+ intf->num = i;
+ intf->type = intf_types[i];
+ intf->mode = MDP5_INTF_MODE_NONE;
+ intf->idx = mdp5_kms->num_intfs;
+ mdp5_kms->intfs[mdp5_kms->num_intfs++] = intf;
+ }
+
+ return 0;
+}
+
static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -929,6 +988,14 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
if (ret)
goto fail;
+ ret = hwmixer_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
+ ret = interface_init(mdp5_kms);
+ if (ret)
+ goto fail;
+
/* set uninit-ed kms */
priv->kms = &mdp5_kms->base.base;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9de471191eba..8bdb7ee4983b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -23,8 +23,9 @@
#include "mdp/mdp_kms.h"
#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
#include "mdp5.xml.h"
-#include "mdp5_ctl.h"
#include "mdp5_pipe.h"
+#include "mdp5_mixer.h"
+#include "mdp5_ctl.h"
#include "mdp5_smp.h"
struct mdp5_state;
@@ -39,6 +40,12 @@ struct mdp5_kms {
unsigned num_hwpipes;
struct mdp5_hw_pipe *hwpipes[SSPP_MAX];
+ unsigned num_hwmixers;
+ struct mdp5_hw_mixer *hwmixers[8];
+
+ unsigned num_intfs;
+ struct mdp5_interface *intfs[5];
+
struct mdp5_cfg_handler *cfg;
uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
@@ -83,6 +90,7 @@ struct mdp5_kms {
*/
struct mdp5_state {
struct mdp5_hw_pipe_state hwpipe;
+ struct mdp5_hw_mixer_state hwmixer;
struct mdp5_smp_state smp;
};
@@ -96,6 +104,7 @@ struct mdp5_plane_state {
struct drm_plane_state base;
struct mdp5_hw_pipe *hwpipe;
+ struct mdp5_hw_pipe *r_hwpipe; /* right hwpipe */
/* aligned with property */
uint8_t premultiplied;
@@ -108,6 +117,28 @@ struct mdp5_plane_state {
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
+struct mdp5_pipeline {
+ struct mdp5_interface *intf;
+ struct mdp5_hw_mixer *mixer;
+ struct mdp5_hw_mixer *r_mixer; /* right mixer */
+};
+
+struct mdp5_crtc_state {
+ struct drm_crtc_state base;
+
+ struct mdp5_ctl *ctl;
+ struct mdp5_pipeline pipeline;
+
+ /* these are derivatives of intf/mixer state in mdp5_pipeline */
+ u32 vblank_irqmask;
+ u32 err_irqmask;
+ u32 pp_done_irqmask;
+
+ bool cmd_mode;
+};
+#define to_mdp5_crtc_state(x) \
+ container_of(x, struct mdp5_crtc_state, base)
+
enum mdp5_intf_mode {
MDP5_INTF_MODE_NONE = 0,
@@ -121,6 +152,7 @@ enum mdp5_intf_mode {
};
struct mdp5_interface {
+ int idx;
int num; /* display interface number */
enum mdp5_intf_type type;
enum mdp5_intf_mode mode;
@@ -128,11 +160,11 @@ struct mdp5_interface {
struct mdp5_encoder {
struct drm_encoder base;
- struct mdp5_interface intf;
spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
bool enabled;
uint32_t bsc;
+ struct mdp5_interface *intf;
struct mdp5_ctl *ctl;
};
#define to_mdp5_encoder(x) container_of(x, struct mdp5_encoder, base)
@@ -197,8 +229,8 @@ static inline uint32_t intf2err(int intf_num)
}
}
-#define GET_PING_PONG_ID(layer_mixer) ((layer_mixer == 5) ? 3 : layer_mixer)
-static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
+static inline uint32_t intf2vblank(struct mdp5_hw_mixer *mixer,
+ struct mdp5_interface *intf)
{
/*
* In case of DSI Command Mode, the Ping Pong's read pointer IRQ
@@ -208,7 +240,7 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
if ((intf->type == INTF_DSI) &&
(intf->mode == MDP5_INTF_DSI_MODE_COMMAND))
- return MDP5_IRQ_PING_PONG_0_RD_PTR << GET_PING_PONG_ID(lm);
+ return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp;
if (intf->type == INTF_WB)
return MDP5_IRQ_WB_2_DONE;
@@ -222,9 +254,9 @@ static inline uint32_t intf2vblank(int lm, struct mdp5_interface *intf)
}
}
-static inline uint32_t lm2ppdone(int lm)
+static inline uint32_t lm2ppdone(struct mdp5_hw_mixer *mixer)
{
- return MDP5_IRQ_PING_PONG_0_DONE << GET_PING_PONG_ID(lm);
+ return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp;
}
int mdp5_disable(struct mdp5_kms *mdp5_kms);
@@ -243,15 +275,16 @@ void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev,
enum drm_plane_type type);
struct mdp5_ctl *mdp5_crtc_get_ctl(struct drm_crtc *crtc);
uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
-int mdp5_crtc_get_lm(struct drm_crtc *crtc);
-void mdp5_crtc_set_pipeline(struct drm_crtc *crtc,
- struct mdp5_interface *intf, struct mdp5_ctl *ctl);
+struct mdp5_hw_mixer *mdp5_crtc_get_mixer(struct drm_crtc *crtc);
+struct mdp5_pipeline *mdp5_crtc_get_pipeline(struct drm_crtc *crtc);
+void mdp5_crtc_set_pipeline(struct drm_crtc *crtc);
void mdp5_crtc_wait_for_commit_done(struct drm_crtc *crtc);
struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
struct drm_plane *plane,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
new file mode 100644
index 000000000000..8a00991f03c7
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp5_kms.h"
+
+/*
+ * As of now, there are only 2 combinations possible for source split:
+ *
+ * Left | Right
+ * -----|------
+ * LM0 | LM1
+ * LM2 | LM5
+ *
+ */
+static int lm_right_pair[] = { 1, -1, 5, -1, -1, -1 };
+
+static int get_right_pair_idx(struct mdp5_kms *mdp5_kms, int lm)
+{
+ int i;
+ int pair_lm;
+
+ pair_lm = lm_right_pair[lm];
+ if (pair_lm < 0)
+ return -EINVAL;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *mixer = mdp5_kms->hwmixers[i];
+
+ if (mixer->lm == pair_lm)
+ return mixer->idx;
+ }
+
+ return -1;
+}
+
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer)
+{
+ struct msm_drm_private *priv = s->dev->dev_private;
+ struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(priv->kms));
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_mixer_state *new_state;
+ int i;
+
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ new_state = &state->hwmixer;
+
+ for (i = 0; i < mdp5_kms->num_hwmixers; i++) {
+ struct mdp5_hw_mixer *cur = mdp5_kms->hwmixers[i];
+
+ /*
+ * skip if already in-use by a different CRTC. If there is a
+ * mixer already assigned to this CRTC, it means this call is
+ * a request to get an additional right mixer. Assume that the
+ * existing mixer is the 'left' one, and try to see if we can
+ * get its corresponding 'right' pair.
+ */
+ if (new_state->hwmixer_to_crtc[cur->idx] &&
+ new_state->hwmixer_to_crtc[cur->idx] != crtc)
+ continue;
+
+ /* skip if doesn't support some required caps: */
+ if (caps & ~cur->caps)
+ continue;
+
+ if (r_mixer) {
+ int pair_idx;
+
+ pair_idx = get_right_pair_idx(mdp5_kms, cur->lm);
+ if (pair_idx < 0)
+ return -EINVAL;
+
+ if (new_state->hwmixer_to_crtc[pair_idx])
+ continue;
+
+ *r_mixer = mdp5_kms->hwmixers[pair_idx];
+ }
+
+ /*
+ * prefer a pair-able LM over an unpairable one. We can
+ * switch the CRTC from Normal mode to Source Split mode
+ * without requiring a full modeset if we had already
+ * assigned this CRTC a pair-able LM.
+ *
+ * TODO: There will be assignment sequences which would
+ * result in the CRTC requiring a full modeset, even
+ * if we have the LM resources to prevent it. For a platform
+ * with a few displays, we don't run out of pair-able LMs
+ * so easily. For now, ignore the possibility of requiring
+ * a full modeset.
+ */
+ if (!(*mixer) || cur->caps & MDP_LM_CAP_PAIR)
+ *mixer = cur;
+ }
+
+ if (!(*mixer))
+ return -ENOMEM;
+
+ if (r_mixer && !(*r_mixer))
+ return -ENOMEM;
+
+ DBG("assigning Layer Mixer %d to crtc %s", (*mixer)->lm, crtc->name);
+
+ new_state->hwmixer_to_crtc[(*mixer)->idx] = crtc;
+ if (r_mixer) {
+ DBG("assigning Right Layer Mixer %d to crtc %s", (*r_mixer)->lm,
+ crtc->name);
+ new_state->hwmixer_to_crtc[(*r_mixer)->idx] = crtc;
+ }
+
+ return 0;
+}
+
+void mdp5_mixer_release(struct drm_atomic_state *s, struct mdp5_hw_mixer *mixer)
+{
+ struct mdp5_state *state = mdp5_get_state(s);
+ struct mdp5_hw_mixer_state *new_state = &state->hwmixer;
+
+ if (!mixer)
+ return;
+
+ if (WARN_ON(!new_state->hwmixer_to_crtc[mixer->idx]))
+ return;
+
+ DBG("%s: release from crtc %s", mixer->name,
+ new_state->hwmixer_to_crtc[mixer->idx]->name);
+
+ new_state->hwmixer_to_crtc[mixer->idx] = NULL;
+}
+
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *mixer)
+{
+ kfree(mixer);
+}
+
+static const char * const mixer_names[] = {
+ "LM0", "LM1", "LM2", "LM3", "LM4", "LM5",
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm)
+{
+ struct mdp5_hw_mixer *mixer;
+
+ mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
+ if (!mixer)
+ return ERR_PTR(-ENOMEM);
+
+ mixer->name = mixer_names[lm->id];
+ mixer->lm = lm->id;
+ mixer->caps = lm->caps;
+ mixer->pp = lm->pp;
+ mixer->dspp = lm->dspp;
+ mixer->flush_mask = mdp_ctl_flush_mask_lm(lm->id);
+
+ return mixer;
+}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
new file mode 100644
index 000000000000..9be94f567fbd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mixer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP5_LM_H__
+#define __MDP5_LM_H__
+
+/* represents a hw Layer Mixer, one (or more) is dynamically assigned to a crtc */
+struct mdp5_hw_mixer {
+ int idx;
+
+ const char *name;
+
+ int lm; /* the LM instance # */
+ uint32_t caps;
+ int pp;
+ int dspp;
+
+ uint32_t flush_mask; /* used to commit LM registers */
+};
+
+/* global atomic state of assignment between CRTCs and Layer Mixers: */
+struct mdp5_hw_mixer_state {
+ struct drm_crtc *hwmixer_to_crtc[8];
+};
+
+struct mdp5_hw_mixer *mdp5_mixer_init(const struct mdp5_lm_instance *lm);
+void mdp5_mixer_destroy(struct mdp5_hw_mixer *lm);
+int mdp5_mixer_assign(struct drm_atomic_state *s, struct drm_crtc *crtc,
+ uint32_t caps, struct mdp5_hw_mixer **mixer,
+ struct mdp5_hw_mixer **r_mixer);
+void mdp5_mixer_release(struct drm_atomic_state *s,
+ struct mdp5_hw_mixer *mixer);
+
+#endif /* __MDP5_LM_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
index 35c4dabb0c0c..2bfac3712685 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.c
@@ -135,7 +135,5 @@ struct mdp5_hw_pipe *mdp5_pipe_init(enum mdp5_pipe pipe,
hwpipe->caps = caps;
hwpipe->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
- spin_lock_init(&hwpipe->pipe_lock);
-
return hwpipe;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
index 611da7a660c9..924c3e6f9517 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_pipe.h
@@ -18,7 +18,8 @@
#ifndef __MDP5_PIPE_H__
#define __MDP5_PIPE_H__
-#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
+/* TODO: Add SSPP_MAX in mdp5.xml.h */
+#define SSPP_MAX (SSPP_CURSOR1 + 1)
/* represents a hw pipe, which is dynamically assigned to a plane */
struct mdp5_hw_pipe {
@@ -27,7 +28,6 @@ struct mdp5_hw_pipe {
const char *name;
enum mdp5_pipe pipe;
- spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
uint32_t reg_offset;
uint32_t caps;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 60a5451ae0b9..a38c5fe6cc19 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -22,6 +22,8 @@
struct mdp5_plane {
struct drm_plane base;
+ spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
+
uint32_t nformats;
uint32_t formats[32];
};
@@ -40,9 +42,6 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx);
-static void set_scanout_locked(struct drm_plane *plane,
- struct drm_framebuffer *fb);
-
static struct mdp5_kms *get_kms(struct drm_plane *plane)
{
struct msm_drm_private *priv = plane->dev->dev_private;
@@ -178,9 +177,14 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
const struct drm_plane_state *state)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(state);
+ struct mdp5_kms *mdp5_kms = get_kms(state->plane);
drm_printf(p, "\thwpipe=%s\n", pstate->hwpipe ?
pstate->hwpipe->name : "(null)");
+ if (mdp5_kms->caps & MDP_CAP_SRC_SPLIT)
+ drm_printf(p, "\tright-hwpipe=%s\n",
+ pstate->r_hwpipe ? pstate->r_hwpipe->name :
+ "(null)");
drm_printf(p, "\tpremultiplied=%u\n", pstate->premultiplied);
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
drm_printf(p, "\talpha=%u\n", pstate->alpha);
@@ -300,7 +304,9 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
struct drm_plane_state *old_state = plane->state;
struct mdp5_cfg *config = mdp5_cfg_get_config(get_kms(plane)->cfg);
bool new_hwpipe = false;
+ bool need_right_hwpipe = false;
uint32_t max_width, max_height;
+ bool out_of_bounds = false;
uint32_t caps = 0;
struct drm_rect clip;
int min_scale, max_scale;
@@ -313,7 +319,23 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
max_height = config->hw->lm.max_height << 16;
/* Make sure source dimensions are within bounds. */
- if ((state->src_w > max_width) || (state->src_h > max_height)) {
+ if (state->src_h > max_height)
+ out_of_bounds = true;
+
+ if (state->src_w > max_width) {
+ /* If source split is supported, we can go up to 2x
+ * the max LM width, but we'd need to stage another
+ * hwpipe to the right LM. So, the drm_plane would
+ * consist of 2 hwpipes.
+ */
+ if (config->hw->mdp.caps & MDP_CAP_SRC_SPLIT &&
+ (state->src_w <= 2 * max_width))
+ need_right_hwpipe = true;
+ else
+ out_of_bounds = true;
+ }
+
+ if (out_of_bounds) {
struct drm_rect src = drm_plane_state_src(state);
DBG("Invalid source size "DRM_RECT_FP_FMT,
DRM_RECT_FP_ARG(&src));
@@ -364,6 +386,15 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
if (!mdp5_state->hwpipe || (caps & ~mdp5_state->hwpipe->caps))
new_hwpipe = true;
+ /*
+ * (re)allocte hw pipe if we're either requesting for 2 hw pipes
+ * or we're switching from 2 hw pipes to 1 hw pipe because the
+ * new src_w can be supported by 1 hw pipe itself.
+ */
+ if ((need_right_hwpipe && !mdp5_state->r_hwpipe) ||
+ (!need_right_hwpipe && mdp5_state->r_hwpipe))
+ new_hwpipe = true;
+
if (mdp5_kms->smp) {
const struct mdp_format *format =
to_mdp_format(msm_framebuffer_format(state->fb));
@@ -382,13 +413,36 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state,
* it available for other planes?
*/
struct mdp5_hw_pipe *old_hwpipe = mdp5_state->hwpipe;
+ struct mdp5_hw_pipe *old_right_hwpipe =
+ mdp5_state->r_hwpipe;
+
mdp5_state->hwpipe = mdp5_pipe_assign(state->state,
plane, caps, blkcfg);
if (IS_ERR(mdp5_state->hwpipe)) {
DBG("%s: failed to assign hwpipe!", plane->name);
return PTR_ERR(mdp5_state->hwpipe);
}
+
+ if (need_right_hwpipe) {
+ mdp5_state->r_hwpipe =
+ mdp5_pipe_assign(state->state, plane,
+ caps, blkcfg);
+ if (IS_ERR(mdp5_state->r_hwpipe)) {
+ DBG("%s: failed to assign right hwpipe",
+ plane->name);
+ return PTR_ERR(mdp5_state->r_hwpipe);
+ }
+ } else {
+ /*
+ * set it to NULL so that the driver knows we
+ * don't have a right hwpipe when committing a
+ * new state
+ */
+ mdp5_state->r_hwpipe = NULL;
+ }
+
mdp5_pipe_release(state->state, old_hwpipe);
+ mdp5_pipe_release(state->state, old_right_hwpipe);
}
}
@@ -437,13 +491,10 @@ static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
.atomic_update = mdp5_plane_atomic_update,
};
-static void set_scanout_locked(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+static void set_scanout_locked(struct mdp5_kms *mdp5_kms,
+ enum mdp5_pipe pipe,
+ struct drm_framebuffer *fb)
{
- struct mdp5_kms *mdp5_kms = get_kms(plane);
- struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(plane->state)->hwpipe;
- enum mdp5_pipe pipe = hwpipe->pipe;
-
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
@@ -460,8 +511,6 @@ static void set_scanout_locked(struct drm_plane *plane,
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
msm_framebuffer_iova(fb, mdp5_kms->id, 3));
-
- plane->fb = fb;
}
/* Note: mdp5_plane->pipe_lock must be locked */
@@ -714,21 +763,129 @@ static void mdp5_write_pixel_ext(struct mdp5_kms *mdp5_kms, enum mdp5_pipe pipe,
}
}
+struct pixel_ext {
+ int left[COMP_MAX];
+ int right[COMP_MAX];
+ int top[COMP_MAX];
+ int bottom[COMP_MAX];
+};
+
+struct phase_step {
+ u32 x[COMP_MAX];
+ u32 y[COMP_MAX];
+};
+
+static void mdp5_hwpipe_mode_set(struct mdp5_kms *mdp5_kms,
+ struct mdp5_hw_pipe *hwpipe,
+ struct drm_framebuffer *fb,
+ struct phase_step *step,
+ struct pixel_ext *pe,
+ u32 scale_config, u32 hdecm, u32 vdecm,
+ bool hflip, bool vflip,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ u32 src_img_w, u32 src_img_h,
+ u32 src_x, u32 src_y,
+ u32 src_w, u32 src_h)
+{
+ enum mdp5_pipe pipe = hwpipe->pipe;
+ bool has_pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
+ const struct mdp_format *format =
+ to_mdp_format(msm_framebuffer_format(fb));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
+ MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_img_w) |
+ MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_img_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
+ MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
+ MDP5_PIPE_SRC_XY_X(src_x) |
+ MDP5_PIPE_SRC_XY_Y(src_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
+ MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
+ MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
+ MDP5_PIPE_OUT_XY_X(crtc_x) |
+ MDP5_PIPE_OUT_XY_Y(crtc_y));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
+ MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
+ MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
+ MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
+ MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
+ (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
+ (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
+ COND(has_pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
+ MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
+
+ /* not using secure mode: */
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
+ mdp5_write_pixel_ext(mdp5_kms, pipe, format,
+ src_w, pe->left, pe->right,
+ src_h, pe->top, pe->bottom);
+
+ if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
+ step->x[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
+ step->y[COMP_0]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
+ step->x[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
+ step->y[COMP_1_2]);
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
+ MDP5_PIPE_DECIMATION_VERT(vdecm) |
+ MDP5_PIPE_DECIMATION_HORZ(hdecm));
+ mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe),
+ scale_config);
+ }
+
+ if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
+ if (MDP_FORMAT_IS_YUV(format))
+ csc_enable(mdp5_kms, pipe,
+ mdp_get_default_csc_cfg(CSC_YUV2RGB));
+ else
+ csc_disable(mdp5_kms, pipe);
+ }
+
+ set_scanout_locked(mdp5_kms, pipe, fb);
+}
static int mdp5_plane_mode_set(struct drm_plane *plane,
struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_rect *src, struct drm_rect *dest)
{
+ struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
struct drm_plane_state *pstate = plane->state;
struct mdp5_hw_pipe *hwpipe = to_mdp5_plane_state(pstate)->hwpipe;
struct mdp5_kms *mdp5_kms = get_kms(plane);
enum mdp5_pipe pipe = hwpipe->pipe;
+ struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format;
uint32_t nplanes, config = 0;
- uint32_t phasex_step[COMP_MAX] = {0,}, phasey_step[COMP_MAX] = {0,};
- bool pe = hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT;
- int pe_left[COMP_MAX], pe_right[COMP_MAX];
- int pe_top[COMP_MAX], pe_bottom[COMP_MAX];
+ struct phase_step step = { 0 };
+ struct pixel_ext pe = { 0 };
uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format;
unsigned int rotation;
@@ -737,6 +894,9 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
unsigned int crtc_w, crtc_h;
uint32_t src_x, src_y;
uint32_t src_w, src_h;
+ uint32_t src_img_w, src_img_h;
+ uint32_t src_x_r;
+ int crtc_x_r;
unsigned long flags;
int ret;
@@ -765,23 +925,41 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
src_w = src_w >> 16;
src_h = src_h >> 16;
+ src_img_w = min(fb->width, src_w);
+ src_img_h = min(fb->height, src_h);
+
DBG("%s: FB[%u] %u,%u,%u,%u -> CRTC[%u] %d,%d,%u,%u", plane->name,
fb->base.id, src_x, src_y, src_w, src_h,
crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
- ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, phasex_step);
+ right_hwpipe = to_mdp5_plane_state(pstate)->r_hwpipe;
+ if (right_hwpipe) {
+ /*
+ * if the plane comprises of 2 hw pipes, assume that the width
+ * is split equally across them. The only parameters that varies
+ * between the 2 pipes are src_x and crtc_x
+ */
+ crtc_w /= 2;
+ src_w /= 2;
+ src_img_w /= 2;
+
+ crtc_x_r = crtc_x + crtc_w;
+ src_x_r = src_x + src_w;
+ }
+
+ ret = calc_scalex_steps(plane, pix_format, src_w, crtc_w, step.x);
if (ret)
return ret;
- ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, phasey_step);
+ ret = calc_scaley_steps(plane, pix_format, src_h, crtc_h, step.y);
if (ret)
return ret;
if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT) {
- calc_pixel_ext(format, src_w, crtc_w, phasex_step,
- pe_left, pe_right, true);
- calc_pixel_ext(format, src_h, crtc_h, phasey_step,
- pe_top, pe_bottom, false);
+ calc_pixel_ext(format, src_w, crtc_w, step.x,
+ pe.left, pe.right, true);
+ calc_pixel_ext(format, src_h, crtc_h, step.y,
+ pe.top, pe.bottom, false);
}
/* TODO calc hdecm, vdecm */
@@ -798,86 +976,23 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
hflip = !!(rotation & DRM_REFLECT_X);
vflip = !!(rotation & DRM_REFLECT_Y);
- spin_lock_irqsave(&hwpipe->pipe_lock, flags);
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
- MDP5_PIPE_SRC_IMG_SIZE_WIDTH(min(fb->width, src_w)) |
- MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(min(fb->height, src_h)));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_SIZE(pipe),
- MDP5_PIPE_SRC_SIZE_WIDTH(src_w) |
- MDP5_PIPE_SRC_SIZE_HEIGHT(src_h));
+ spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_XY(pipe),
- MDP5_PIPE_SRC_XY_X(src_x) |
- MDP5_PIPE_SRC_XY_Y(src_y));
+ mdp5_hwpipe_mode_set(mdp5_kms, hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x, src_y, src_w, src_h);
+ if (right_hwpipe)
+ mdp5_hwpipe_mode_set(mdp5_kms, right_hwpipe, fb, &step, &pe,
+ config, hdecm, vdecm, hflip, vflip,
+ crtc_x_r, crtc_y, crtc_w, crtc_h,
+ src_img_w, src_img_h,
+ src_x_r, src_y, src_w, src_h);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_SIZE(pipe),
- MDP5_PIPE_OUT_SIZE_WIDTH(crtc_w) |
- MDP5_PIPE_OUT_SIZE_HEIGHT(crtc_h));
+ spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_OUT_XY(pipe),
- MDP5_PIPE_OUT_XY_X(crtc_x) |
- MDP5_PIPE_OUT_XY_Y(crtc_y));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
- MDP5_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
- MDP5_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
- MDP5_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
- MDP5_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
- COND(format->alpha_enable, MDP5_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
- MDP5_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
- MDP5_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
- COND(format->unpack_tight, MDP5_PIPE_SRC_FORMAT_UNPACK_TIGHT) |
- MDP5_PIPE_SRC_FORMAT_FETCH_TYPE(format->fetch_type) |
- MDP5_PIPE_SRC_FORMAT_CHROMA_SAMP(format->chroma_sample));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_UNPACK(pipe),
- MDP5_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
- MDP5_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
- MDP5_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
- MDP5_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
-
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_OP_MODE(pipe),
- (hflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_LR : 0) |
- (vflip ? MDP5_PIPE_SRC_OP_MODE_FLIP_UD : 0) |
- COND(pe, MDP5_PIPE_SRC_OP_MODE_SW_PIX_EXT_OVERRIDE) |
- MDP5_PIPE_SRC_OP_MODE_BWC(BWC_LOSSLESS));
-
- /* not using secure mode: */
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_ADDR_SW_STATUS(pipe), 0);
-
- if (hwpipe->caps & MDP_PIPE_CAP_SW_PIX_EXT)
- mdp5_write_pixel_ext(mdp5_kms, pipe, format,
- src_w, pe_left, pe_right,
- src_h, pe_top, pe_bottom);
-
- if (hwpipe->caps & MDP_PIPE_CAP_SCALE) {
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_X(pipe),
- phasex_step[COMP_0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_PHASE_STEP_Y(pipe),
- phasey_step[COMP_0]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_X(pipe),
- phasex_step[COMP_1_2]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CR_PHASE_STEP_Y(pipe),
- phasey_step[COMP_1_2]);
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_DECIMATION(pipe),
- MDP5_PIPE_DECIMATION_VERT(vdecm) |
- MDP5_PIPE_DECIMATION_HORZ(hdecm));
- mdp5_write(mdp5_kms, REG_MDP5_PIPE_SCALE_CONFIG(pipe), config);
- }
-
- if (hwpipe->caps & MDP_PIPE_CAP_CSC) {
- if (MDP_FORMAT_IS_YUV(format))
- csc_enable(mdp5_kms, pipe,
- mdp_get_default_csc_cfg(CSC_YUV2RGB));
- else
- csc_disable(mdp5_kms, pipe);
- }
-
- set_scanout_locked(plane, fb);
-
- spin_unlock_irqrestore(&hwpipe->pipe_lock, flags);
+ plane->fb = fb;
return ret;
}
@@ -934,6 +1049,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
if (new_plane_state->visible) {
struct mdp5_ctl *ctl;
+ struct mdp5_pipeline *pipeline = mdp5_crtc_get_pipeline(crtc);
ret = mdp5_plane_mode_set(plane, crtc, fb,
&new_plane_state->src,
@@ -942,7 +1058,7 @@ static int mdp5_update_cursor_plane_legacy(struct drm_plane *plane,
ctl = mdp5_crtc_get_ctl(crtc);
- mdp5_ctl_commit(ctl, mdp5_plane_get_flush(plane));
+ mdp5_ctl_commit(ctl, pipeline, mdp5_plane_get_flush(plane));
}
*to_mdp5_plane_state(plane_state) =
@@ -959,6 +1075,10 @@ slow:
src_x, src_y, src_w, src_h, ctx);
}
+/*
+ * Use this func and the one below only after the atomic state has been
+ * successfully swapped
+ */
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
@@ -969,14 +1089,30 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
return pstate->hwpipe->pipe;
}
+enum mdp5_pipe mdp5_plane_right_pipe(struct drm_plane *plane)
+{
+ struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+
+ if (!pstate->r_hwpipe)
+ return SSPP_NONE;
+
+ return pstate->r_hwpipe->pipe;
+}
+
uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
{
struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
+ u32 mask;
if (WARN_ON(!pstate->hwpipe))
return 0;
- return pstate->hwpipe->flush_mask;
+ mask = pstate->hwpipe->flush_mask;
+
+ if (pstate->r_hwpipe)
+ mask |= pstate->r_hwpipe->flush_mask;
+
+ return mask;
}
/* initialize plane */
@@ -998,6 +1134,8 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
mdp5_plane->nformats = mdp_get_formats(mdp5_plane->formats,
ARRAY_SIZE(mdp5_plane->formats), false);
+ spin_lock_init(&mdp5_plane->pipe_lock);
+
if (type == DRM_PLANE_TYPE_CURSOR)
ret = drm_universal_plane_init(dev, plane, 0xff,
&mdp5_cursor_plane_funcs,
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 7574cdfef418..1185487e7e5e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -104,6 +104,7 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
#define MDP_CAP_DSC BIT(1) /* VESA Display Stream Compression */
#define MDP_CAP_CDM BIT(2) /* Chroma Down Module (HDMI 2.0 YUV) */
+#define MDP_CAP_SRC_SPLIT BIT(3) /* Source Split of SSPPs */
/* MDP pipe capabilities */
#define MDP_PIPE_CAP_HFLIP BIT(0)
@@ -114,6 +115,11 @@ const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
#define MDP_PIPE_CAP_SW_PIX_EXT BIT(5)
#define MDP_PIPE_CAP_CURSOR BIT(6)
+/* MDP layer mixer caps */
+#define MDP_LM_CAP_DISPLAY BIT(0)
+#define MDP_LM_CAP_WB BIT(1)
+#define MDP_LM_CAP_PAIR BIT(2)
+
static inline bool pipe_supports_yuv(uint32_t pipe_caps)
{
return (pipe_caps & MDP_PIPE_CAP_SCALE) &&
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 4f35d4eb85d0..1855182c76ce 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -28,7 +28,9 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
if (gpu) {
seq_printf(m, "%s Status:\n", gpu->name);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->show(gpu, m);
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
return 0;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9208e67be453..87b5695d4034 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -55,14 +55,13 @@ int msm_register_address_space(struct drm_device *dev,
struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_aspaces++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
+ if (WARN_ON(priv->num_aspaces >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
- priv->aspace[idx] = aspace;
+ priv->aspace[priv->num_aspaces] = aspace;
- return idx;
+ return priv->num_aspaces++;
}
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
@@ -265,6 +264,8 @@ static int msm_drm_uninit(struct device *dev)
if (gpu) {
mutex_lock(&ddev->struct_mutex);
+ // XXX what do we do here?
+ //pm_runtime_enable(&pdev->dev);
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&ddev->struct_mutex);
gpu->funcs->destroy(gpu);
@@ -539,7 +540,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
@@ -812,7 +813,7 @@ static struct drm_driver msm_driver = {
DRIVER_ATOMIC |
DRIVER_MODESET,
.open = msm_open,
- .preclose = msm_preclose,
+ .postclose = msm_postclose,
.lastclose = msm_lastclose,
.irq_handler = msm_irq,
.irq_preinstall = msm_irq_preinstall,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b885c3d5ae4d..28b6f9ba5066 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -191,7 +191,8 @@ void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
-void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 59811f29607d..68e509b3b9e4 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -812,6 +812,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
size = PAGE_ALIGN(size);
+ /* Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (size == 0)
+ return ERR_PTR(-EINVAL);
+
ret = msm_gem_new_impl(dev, size, flags, NULL, &obj);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 7d529516b332..1b4cf20043ea 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,6 +18,7 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
@@ -31,6 +32,7 @@ struct msm_gem_address_space {
*/
struct drm_mm mm;
struct msm_mmu *mmu;
+ struct kref kref;
};
struct msm_gem_vma {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1172fe7a9252..1c545ebe6a5a 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -404,6 +404,24 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (MSM_PIPE_FLAGS(args->flags) & ~MSM_SUBMIT_FLAGS)
return -EINVAL;
+ if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
+ in_fence = sync_file_get_fence(args->fence_fd);
+
+ if (!in_fence)
+ return -EINVAL;
+
+ /* TODO if we get an array-fence due to userspace merging multiple
+ * fences, we need a way to determine if all the backing fences
+ * are from our own context..
+ */
+
+ if (in_fence->context != gpu->fctx->context) {
+ ret = dma_fence_wait(in_fence, true);
+ if (ret)
+ return ret;
+ }
+ }
+
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
@@ -431,27 +449,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto out;
- if (args->flags & MSM_SUBMIT_FENCE_FD_IN) {
- in_fence = sync_file_get_fence(args->fence_fd);
-
- if (!in_fence) {
- ret = -EINVAL;
- goto out;
- }
-
- /* TODO if we get an array-fence due to userspace merging multiple
- * fences, we need a way to determine if all the backing fences
- * are from our own context..
- */
-
- if (in_fence->context != gpu->fctx->context) {
- ret = dma_fence_wait(in_fence, true);
- if (ret)
- goto out;
- }
-
- }
-
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index b654eca7636a..f285d7e210db 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -19,6 +19,25 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ kfree(aspace);
+}
+
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt)
@@ -34,6 +53,8 @@ msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
drm_mm_remove_node(&vma->node);
vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
}
int
@@ -57,16 +78,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
size, IOMMU_READ | IOMMU_WRITE);
}
- return ret;
-}
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
-void
-msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
-{
- drm_mm_takedown(&aspace->mm);
- if (aspace->mmu)
- aspace->mmu->funcs->destroy(aspace->mmu);
- kfree(aspace);
+ return ret;
}
struct msm_gem_address_space *
@@ -85,5 +100,7 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
(domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+ kref_init(&aspace->kref);
+
return aspace;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 99e05aacbee1..97b9c38c6b3f 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -93,18 +93,18 @@ static int enable_clk(struct msm_gpu *gpu)
{
int i;
- if (gpu->grp_clks[0] && gpu->fast_rate)
- clk_set_rate(gpu->grp_clks[0], gpu->fast_rate);
+ if (gpu->core_clk && gpu->fast_rate)
+ clk_set_rate(gpu->core_clk, gpu->fast_rate);
/* Set the RBBM timer rate to 19.2Mhz */
- if (gpu->grp_clks[2])
- clk_set_rate(gpu->grp_clks[2], 19200000);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_prepare(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -115,19 +115,24 @@ static int disable_clk(struct msm_gpu *gpu)
{
int i;
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i >= 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
- if (gpu->grp_clks[0] && gpu->slow_rate)
- clk_set_rate(gpu->grp_clks[0], gpu->slow_rate);
+ /*
+ * Set the clock to a deliberately low rate. On older targets the clock
+ * speed had to be non zero to avoid problems. On newer targets this
+ * will be rounded down to zero anyway so it all works out.
+ */
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, 27000000);
- if (gpu->grp_clks[2])
- clk_set_rate(gpu->grp_clks[2], 0);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
return 0;
}
@@ -152,18 +157,9 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (gpu->active_cnt++ > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt <= 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = enable_pwrrail(gpu);
if (ret)
@@ -177,23 +173,16 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ gpu->needs_hw_init = true;
+
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (--gpu->active_cnt > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt < 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = disable_axi(gpu);
if (ret)
@@ -210,53 +199,20 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-/*
- * Inactivity detection (for suspend):
- */
-
-static void inactive_worker(struct work_struct *work)
+int msm_gpu_hw_init(struct msm_gpu *gpu)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
- struct drm_device *dev = gpu->dev;
-
- if (gpu->inactive)
- return;
-
- DBG("%s: inactive!\n", gpu->name);
- mutex_lock(&dev->struct_mutex);
- if (!(msm_gpu_active(gpu) || gpu->inactive)) {
- disable_axi(gpu);
- disable_clk(gpu);
- gpu->inactive = true;
- }
- mutex_unlock(&dev->struct_mutex);
-}
+ int ret;
-static void inactive_handler(unsigned long data)
-{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
- struct msm_drm_private *priv = gpu->dev->dev_private;
+ if (!gpu->needs_hw_init)
+ return 0;
- queue_work(priv->wq, &gpu->inactive_work);
-}
+ disable_irq(gpu->irq);
+ ret = gpu->funcs->hw_init(gpu);
+ if (!ret)
+ gpu->needs_hw_init = false;
+ enable_irq(gpu->irq);
-/* cancel inactive timer and make sure we are awake: */
-static void inactive_cancel(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- del_timer(&gpu->inactive_timer);
- if (gpu->inactive) {
- enable_clk(gpu);
- enable_axi(gpu);
- gpu->inactive = false;
- }
-}
-
-static void inactive_start(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- mod_timer(&gpu->inactive_timer,
- round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+ return ret;
}
/*
@@ -296,8 +252,9 @@ static void recover_worker(struct work_struct *work)
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
/* replay the remaining submits after the one that hung: */
list_for_each_entry(submit, &gpu->submit_list, node) {
@@ -400,6 +357,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
{
unsigned long flags;
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
@@ -413,6 +372,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
@@ -458,6 +418,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
drm_gem_object_unreference(&msm_obj->base);
}
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
msm_gem_submit_free(submit);
}
@@ -492,9 +454,6 @@ static void retire_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
-
- if (!msm_gpu_active(gpu))
- inactive_start(gpu);
}
/* call from irq handler to schedule work to retire bo's */
@@ -515,7 +474,9 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ msm_gpu_hw_init(gpu);
list_add_tail(&submit->node, &gpu->submit_list);
@@ -559,16 +520,52 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static const char *clk_names[] = {
- "core", "iface", "rbbmtimer", "mem", "mem_iface", "alt_mem_iface",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+ struct clk *clk = devm_clk_get(dev, name);
+
+ return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+ if (gpu->nr_clocks < 1) {
+ gpu->nr_clocks = 0;
+ return 0;
+ }
+
+ gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+ GFP_KERNEL);
+ if (!gpu->grp_clks)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ gpu->grp_clks[i] = get_clock(dev, name);
+
+ /* Remember the key clocks that we need to control later */
+ if (!strcmp(name, "core"))
+ gpu->core_clk = gpu->grp_clks[i];
+ else if (!strcmp(name, "rbbmtimer"))
+ gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+ ++i;
+ }
+
+ return 0;
+}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
const char *name, const char *ioname, const char *irqname, int ringsz)
{
struct iommu_domain *iommu;
- int i, ret;
+ int ret;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -576,7 +573,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->inactive = true;
gpu->fctx = msm_fence_context_alloc(drm, name);
if (IS_ERR(gpu->fctx)) {
ret = PTR_ERR(gpu->fctx);
@@ -586,19 +582,15 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
- INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
INIT_LIST_HEAD(&gpu->submit_list);
- setup_timer(&gpu->inactive_timer, inactive_handler,
- (unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
spin_lock_init(&gpu->perf_lock);
- BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
gpu->mmio = msm_ioremap(pdev, ioname, name);
@@ -622,13 +614,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- /* Acquire clocks: */
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = msm_clk_get(pdev, clk_names[i]);
- DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
- if (IS_ERR(gpu->grp_clks[i]))
- gpu->grp_clks[i] = NULL;
- }
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
gpu->ebi1_clk = msm_clk_get(pdev, "bus");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -684,6 +672,9 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
bs_init(gpu);
return 0;
@@ -706,9 +697,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu)
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->aspace)
- msm_gem_address_space_destroy(gpu->aspace);
-
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index c4c39d3272c7..aa3241000455 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -64,6 +64,7 @@ struct msm_gpu_funcs {
struct msm_gpu {
const char *name;
struct drm_device *dev;
+ struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
/* performance counters (hw & sw): */
@@ -88,9 +89,8 @@ struct msm_gpu {
/* fencing: */
struct msm_fence_context *fctx;
- /* is gpu powered/active? */
- int active_cnt;
- bool inactive;
+ /* does gpu need hw_init? */
+ bool needs_hw_init;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
@@ -103,8 +103,10 @@ struct msm_gpu {
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk *ebi1_clk, *grp_clks[6];
- uint32_t fast_rate, slow_rate, bus_freq;
+ struct clk **grp_clks;
+ int nr_clocks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ uint32_t fast_rate, bus_freq;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
@@ -114,9 +116,7 @@ struct msm_gpu {
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
-#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
- struct timer_list inactive_timer;
- struct work_struct inactive_work;
+
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
@@ -196,6 +196,8 @@ static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7f5779daf5c8..b23d33622f37 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -38,78 +38,47 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int ret;
+
+ pm_runtime_get_sync(mmu->dev);
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ pm_runtime_put_sync(mmu->dev);
+
+ return ret;
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
+ pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned long da = iova;
- unsigned int i, j;
- int ret;
-
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- dma_addr_t pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
-
- VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
+ size_t ret;
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
+// pm_runtime_get_sync(mmu->dev);
+ ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
+// pm_runtime_put_sync(mmu->dev);
+ WARN_ON(ret < 0);
- da += bytes;
- }
-
- return 0;
-
-fail:
- da = iova;
-
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
+ return (ret == len) ? 0 : -EINVAL;
}
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned long da = iova;
- int i;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
-
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
-
- VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
-
- BUG_ON(!PAGE_ALIGNED(bytes));
- da += bytes;
- }
+ pm_runtime_get_sync(mmu->dev);
+ iommu_unmap(iommu->domain, iova, len);
+ pm_runtime_put_sync(mmu->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 3df7322fd74e..0e81faab2c50 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -322,7 +322,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
}
for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t iova = submit->cmd[i].iova;
+ uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
/* snapshot cmdstream bo's (if we haven't already): */
@@ -341,7 +341,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
- (uint32_t[2]){ iova, szd }, 8);
+ (uint32_t[3]){ iova, szd, iova >> 32 }, 12);
break;
}
}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
index fac0824197f1..bf3e532665fb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
@@ -22,17 +22,14 @@
#ifndef __NVKM_CORE_MSGQUEUE_H
#define __NVKM_CORE_MSGQUEUE_H
-
-#include <core/os.h>
-
-struct nvkm_falcon;
+#include <subdev/secboot.h>
struct nvkm_msgqueue;
-enum nvkm_secboot_falcon;
/* Hopefully we will never have firmware arguments larger than that... */
#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
-int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **);
+int nvkm_msgqueue_new(u32, struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
void nvkm_msgqueue_del(struct nvkm_msgqueue **);
void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
@@ -41,7 +38,6 @@ int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
-int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *,
- enum nvkm_secboot_falcon);
+int nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *, unsigned long);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index e5c9b6268dcc..7c7d91cad09a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -42,6 +42,10 @@ struct nvkm_device_tegra_func {
* Whether the chip requires a reference clock
*/
bool require_ref_clk;
+ /*
+ * Whether the chip requires the VDD regulator
+ */
+ bool require_vdd;
};
int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *,
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 24efa900d8ca..f00527b36acc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -68,4 +68,5 @@ int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
+int gp10b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 0a636833e0eb..c7944b19bed8 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -44,4 +44,6 @@ int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp10b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 891497a0fe3b..28d513fbf44c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -97,6 +97,7 @@ int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm20b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gp102_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gp10b_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
#include <subdev/bios.h>
#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
index c4ecf255ff39..6e2b70bd2f41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ibus.h
@@ -7,4 +7,5 @@ int gf117_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk104_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gk20a_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
int gm200_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
+int gp10b_ibus_new(struct nvkm_device *, int, struct nvkm_subdev **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index e68ba636741b..58f10890c3b6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -29,4 +29,5 @@ int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
+int gp10b_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index d6a4bdb6573b..59f3ba551681 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -55,10 +55,11 @@ struct nvkm_secboot {
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
-int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
+int nvkm_secboot_reset(struct nvkm_secboot *, unsigned long);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gp10b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 548f36d33924..e427f80344c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1574,6 +1574,7 @@ struct ttm_bo_driver nouveau_bo_driver = {
.fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
.io_mem_free = &nouveau_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
struct nvkm_vma *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6104f61b00fc..21b10f9840c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -111,7 +111,7 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
};
struct nouveau_display *disp = nouveau_display(crtc->dev);
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[drm_crtc_index(crtc)];
- int ret, retry = 1;
+ int ret, retry = 20;
do {
ret = nvif_mthd(&disp->disp, 0, &args, sizeof(args));
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 4c4cc2260257..1ada186fab77 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -53,13 +53,21 @@ static int nouveau_platform_remove(struct platform_device *pdev)
#if IS_ENABLED(CONFIG_OF)
static const struct nvkm_device_tegra_func gk20a_platform_data = {
.iommu_bit = 34,
+ .require_vdd = true,
};
static const struct nvkm_device_tegra_func gm20b_platform_data = {
.iommu_bit = 34,
+ .require_vdd = true,
.require_ref_clk = true,
};
+static const struct nvkm_device_tegra_func gp10b_platform_data = {
+ .iommu_bit = 36,
+ /* power provided by generic PM domains */
+ .require_vdd = false,
+};
+
static const struct of_device_id nouveau_platform_match[] = {
{
.compatible = "nvidia,gk20a",
@@ -69,6 +77,10 @@ static const struct of_device_id nouveau_platform_match[] = {
.compatible = "nvidia,gm20b",
.data = &gm20b_platform_data,
},
+ {
+ .compatible = "nvidia,gp10b",
+ .data = &gp10b_platform_data,
+ },
{ }
};
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index bf504788bce6..0e58537352fe 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1002,7 +1002,6 @@ nv50_wndw_atomic_destroy_state(struct drm_plane *plane,
{
struct nv50_wndw_atom *asyw = nv50_wndw_atom(state);
__drm_atomic_helper_plane_destroy_state(&asyw->state);
- dma_fence_put(asyw->state.fence);
kfree(asyw);
}
@@ -1014,7 +1013,6 @@ nv50_wndw_atomic_duplicate_state(struct drm_plane *plane)
if (!(asyw = kmalloc(sizeof(*asyw), GFP_KERNEL)))
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &asyw->state);
- asyw->state.fence = NULL;
asyw->interval = 1;
asyw->sema = armw->sema;
asyw->ntfy = armw->ntfy;
@@ -2043,6 +2041,7 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
u32 vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
u32 hfrontp = mode->hsync_start - mode->hdisplay;
u32 vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ u32 blankus;
struct nv50_head_mode *m = &asyh->mode;
m->h.active = mode->htotal;
@@ -2056,9 +2055,10 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
m->v.blanks = m->v.active - vfrontp - 1;
/*XXX: Safe underestimate, even "0" works */
- m->v.blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
- m->v.blankus *= 1000;
- m->v.blankus /= mode->clock;
+ blankus = (m->v.active - mode->vdisplay - 2) * m->h.active;
+ blankus *= 1000;
+ blankus /= mode->clock;
+ m->v.blankus = blankus;
if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
m->v.blank2e = m->v.active + m->v.synce + vbackp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 1076949b802a..b690bc12a5b7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@ nv4a_chipset = {
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
+ .mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
@@ -2201,8 +2201,6 @@ nv132_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
- .sec2 = gp102_sec2_new,
- .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2215,6 +2213,8 @@ nv132_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2235,8 +2235,6 @@ nv134_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
- .sec2 = gp102_sec2_new,
- .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2249,6 +2247,8 @@ nv134_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
.sw = gf100_sw_new,
};
@@ -2269,8 +2269,6 @@ nv136_chipset = {
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
.secboot = gp102_secboot_new,
- .sec2 = gp102_sec2_new,
- .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2283,6 +2281,65 @@ nv136_chipset = {
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
.gr = gp102_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv137_chipset = {
+ .name = "GP107",
+ .bar = gf100_bar_new,
+ .bios = nvkm_bios_new,
+ .bus = gf100_bus_new,
+ .devinit = gm200_devinit_new,
+ .fb = gp102_fb_new,
+ .fuse = gm107_fuse_new,
+ .gpio = gk104_gpio_new,
+ .i2c = gm200_i2c_new,
+ .ibus = gm200_ibus_new,
+ .imem = nv50_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp100_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gp102_secboot_new,
+ .pci = gp100_pci_new,
+ .pmu = gp102_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[0] = gp102_ce_new,
+ .ce[1] = gp102_ce_new,
+ .ce[2] = gp102_ce_new,
+ .ce[3] = gp102_ce_new,
+ .disp = gp102_disp_new,
+ .dma = gf119_dma_new,
+ .fifo = gp100_fifo_new,
+ .gr = gp107_gr_new,
+ .nvdec = gp102_nvdec_new,
+ .sec2 = gp102_sec2_new,
+ .sw = gf100_sw_new,
+};
+
+static const struct nvkm_device_chip
+nv13b_chipset = {
+ .name = "GP10B",
+ .bar = gk20a_bar_new,
+ .bus = gf100_bus_new,
+ .fb = gp10b_fb_new,
+ .fuse = gm107_fuse_new,
+ .ibus = gp10b_ibus_new,
+ .imem = gk20a_instmem_new,
+ .ltc = gp100_ltc_new,
+ .mc = gp10b_mc_new,
+ .mmu = gf100_mmu_new,
+ .secboot = gp10b_secboot_new,
+ .pmu = gm20b_pmu_new,
+ .timer = gk20a_timer_new,
+ .top = gk104_top_new,
+ .ce[2] = gp102_ce_new,
+ .dma = gf119_dma_new,
+ .fifo = gp10b_fifo_new,
+ .gr = gp10b_gr_new,
.sw = gf100_sw_new,
};
@@ -2724,6 +2781,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
case 0x132: device->chip = &nv132_chipset; break;
case 0x134: device->chip = &nv134_chipset; break;
case 0x136: device->chip = &nv136_chipset; break;
+ case 0x137: device->chip = &nv137_chipset; break;
+ case 0x13b: device->chip = &nv13b_chipset; break;
default:
nvdev_error(device, "unknown chipset (%08x)\n", boot0);
goto done;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index f2bc0b7d9b93..6474bd2a6d07 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -28,9 +28,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev)
{
int ret;
- ret = regulator_enable(tdev->vdd);
- if (ret)
- goto err_power;
+ if (tdev->vdd) {
+ ret = regulator_enable(tdev->vdd);
+ if (ret)
+ goto err_power;
+ }
ret = clk_prepare_enable(tdev->clk);
if (ret)
@@ -67,7 +69,8 @@ err_clk_pwr:
err_clk_ref:
clk_disable_unprepare(tdev->clk);
err_clk:
- regulator_disable(tdev->vdd);
+ if (tdev->vdd)
+ regulator_disable(tdev->vdd);
err_power:
return ret;
}
@@ -75,6 +78,8 @@ err_power:
static int
nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
{
+ int ret;
+
reset_control_assert(tdev->rst);
udelay(10);
@@ -84,7 +89,13 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev)
clk_disable_unprepare(tdev->clk);
udelay(10);
- return regulator_disable(tdev->vdd);
+ if (tdev->vdd) {
+ ret = regulator_disable(tdev->vdd);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static void
@@ -264,10 +275,12 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
tdev->func = func;
tdev->pdev = pdev;
- tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd)) {
- ret = PTR_ERR(tdev->vdd);
- goto free;
+ if (func->require_vdd) {
+ tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
}
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 98651a43bc12..64e51838edf8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -14,6 +14,7 @@ nvkm-y += nvkm/engine/fifo/gm107.o
nvkm-y += nvkm/engine/fifo/gm200.o
nvkm-y += nvkm/engine/fifo/gm20b.o
nvkm-y += nvkm/engine/fifo/gp100.o
+nvkm-y += nvkm/engine/fifo/gp10b.o
nvkm-y += nvkm/engine/fifo/chan.o
nvkm-y += nvkm/engine/fifo/channv50.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
index 0ec179fc40a1..5f722c6e8a2f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv40.c
@@ -44,6 +44,8 @@ nv40_fifo_dma_engine(struct nvkm_engine *engine, u32 *reg, u32 *ctx)
*ctx = 0x38;
return true;
case NVKM_ENGINE_MPEG:
+ if (engine->subdev.device->chipset < 0x44)
+ return false;
*reg = 0x00330c;
*ctx = 0x54;
return true;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
index 679f3ec311e9..44bff98d6725 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.h
@@ -83,4 +83,5 @@ extern const struct nvkm_enum gk104_fifo_fault_hubclient[];
extern const struct nvkm_enum gk104_fifo_fault_gpcclient[];
extern const struct nvkm_enum gm107_fifo_fault_engine[];
+extern const struct nvkm_enum gp100_fifo_fault_engine[];
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
index b2635aea9f6e..41f16cf5a918 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -24,7 +24,7 @@
#include "gk104.h"
#include "changk104.h"
-static const struct nvkm_enum
+const struct nvkm_enum
gp100_fifo_fault_engine[] = {
{ 0x01, "DISPLAY" },
{ 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
new file mode 100644
index 000000000000..4af96c3e69ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp10b.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gk104.h"
+#include "changk104.h"
+
+static const struct gk104_fifo_func
+gp10b_fifo = {
+ .fault.engine = gp100_fifo_fault_engine,
+ .fault.reason = gk104_fifo_fault_reason,
+ .fault.hubclient = gk104_fifo_fault_hubclient,
+ .fault.gpcclient = gk104_fifo_fault_gpcclient,
+ .chan = {
+ &gp100_fifo_gpfifo_oclass,
+ NULL
+ },
+};
+
+int
+gp10b_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
+{
+ return gk104_fifo_new_(&gp10b_fifo, device, index, 512, pfifo);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 2938ad5aca40..8a22558b7b52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -33,6 +33,8 @@ nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
nvkm-y += nvkm/engine/gr/gp100.o
nvkm-y += nvkm/engine/gr/gp102.o
+nvkm-y += nvkm/engine/gr/gp107.o
+nvkm-y += nvkm/engine/gr/gp10b.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -52,3 +54,4 @@ nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
nvkm-y += nvkm/engine/gr/ctxgp100.o
nvkm-y += nvkm/engine/gr/ctxgp102.o
+nvkm-y += nvkm/engine/gr/ctxgp107.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 0ae032fa2909..017180d147cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -106,6 +106,9 @@ void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
void gp100_grctx_generate_pagepool(struct gf100_grctx *);
extern const struct gf100_grctx_func gp102_grctx;
+void gp102_grctx_generate_attrib(struct gf100_grctx *);
+
+extern const struct gf100_grctx_func gp107_grctx;
/* context init value lists */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
index ee26d64af73a..80b7ab0bee3a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -29,7 +29,7 @@
* PGRAPH context implementation
******************************************************************************/
-static void
+void
gp102_grctx_generate_attrib(struct gf100_grctx *info)
{
struct gf100_gr *gr = info->gr;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
new file mode 100644
index 000000000000..8da91a0b3bd2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp107.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+const struct gf100_grctx_func
+gp107_grctx = {
+ .main = gp100_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x300,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gp102_grctx_generate_attrib,
+ .attrib_nr_max = 0x15de,
+ .attrib_nr = 0x540,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index a4410ef19db5..99689f4de502 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1463,25 +1463,27 @@ gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_secboot *sb = device->secboot;
- int ret = 0;
+ u32 secboot_mask = 0;
/* load fuc microcode */
nvkm_mc_unk260(device, 0);
/* securely-managed falcons must be reset using secure boot */
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
- ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
+ secboot_mask |= BIT(NVKM_SECBOOT_FALCON_FECS);
else
gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
- if (ret)
- return ret;
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
- ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
+ secboot_mask |= BIT(NVKM_SECBOOT_FALCON_GPCCS);
else
gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
- if (ret)
- return ret;
+
+ if (secboot_mask != 0) {
+ int ret = nvkm_secboot_reset(sb, secboot_mask);
+ if (ret)
+ return ret;
+ }
nvkm_mc_unk260(device, 1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 1d2101af2a87..a36e45a4a635 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -125,6 +125,7 @@ struct gf100_gr_func {
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_ppc_exceptions)(struct gf100_gr *);
void (*init_swdx_pes_mask)(struct gf100_gr *);
+ void (*init_num_active_ltcs)(struct gf100_gr *);
void (*set_hww_esr_report_mask)(struct gf100_gr *);
const struct gf100_gr_pack *mmio;
struct {
@@ -301,4 +302,8 @@ extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
void gm107_gr_init_bios(struct gf100_gr *);
void gm200_gr_init_gpc_mmu(struct gf100_gr *);
+
+void gp100_gr_init_num_active_ltcs(struct gf100_gr *gr);
+
+void gp102_gr_init_swdx_pes_mask(struct gf100_gr *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 94ed7debb714..867a5f7cc5bc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -40,6 +40,15 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
}
+void
+gp100_gr_init_num_active_ltcs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+ nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+}
+
int
gp100_gr_init(struct gf100_gr *gr)
{
@@ -81,8 +90,7 @@ gp100_gr_init(struct gf100_gr *gr)
}
nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
- nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
- nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
+ gr->func->init_num_active_ltcs(gr);
gr->func->init_rop_active_fbps(gr);
if (gr->func->init_swdx_pes_mask)
@@ -154,6 +162,7 @@ gp100_gr = {
.init_gpc_mmu = gm200_gr_init_gpc_mmu,
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 2,
.grctx = &gp100_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
index 1d5117a16299..61e3a0b08559 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -26,7 +26,7 @@
#include <nvif/class.h>
-static void
+void
gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -47,6 +47,7 @@ gp102_gr = {
.init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
.init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
.init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
.rops = gm200_gr_rops,
.ppc_nr = 3,
.grctx = &gp102_grctx,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
new file mode 100644
index 000000000000..f7272323f694
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp107.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static const struct gf100_gr_func
+gp107_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .init_num_active_ltcs = gp100_gr_init_num_active_ltcs,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 1,
+ .grctx = &gp107_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_B, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gp107_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp107_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
new file mode 100644
index 000000000000..5f3d161a0842
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp10b.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+gp10b_gr_init_num_active_ltcs(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+
+ nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
+}
+
+static const struct gf100_gr_func
+gp10b_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_num_active_ltcs = gp10b_gr_init_num_active_ltcs,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 1,
+ .grctx = &gp102_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_A, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_A },
+ {}
+ }
+};
+
+int
+gp10b_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp10b_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index 003ac915eaad..8a8895246d26 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@ nv31_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv31_mpeg_mthd(mpeg, mthd, data))
+ if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index e536f37e24b0..c3cf02ed468e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@ nv44_mpeg_intr(struct nvkm_engine *engine)
}
if (type == 0x00000010) {
- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+ if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
index 982efedb4b13..d45d7947a964 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -463,26 +463,49 @@ nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
}
int
-nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon)
+nvkm_msgqueue_acr_boot_falcons(struct nvkm_msgqueue *queue,
+ unsigned long falcon_mask)
{
- if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon)
+ unsigned long falcon;
+
+ if (!queue || !queue->func->acr_func)
return -ENODEV;
- return queue->func->acr_func->boot_falcon(queue, falcon);
+ /* Does the firmware support booting multiple falcons? */
+ if (queue->func->acr_func->boot_multiple_falcons)
+ return queue->func->acr_func->boot_multiple_falcons(queue,
+ falcon_mask);
+
+ /* Else boot all requested falcons individually */
+ if (!queue->func->acr_func->boot_falcon)
+ return -ENODEV;
+
+ for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
+ int ret = queue->func->acr_func->boot_falcon(queue, falcon);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
int
-nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon,
+ const struct nvkm_secboot *sb, struct nvkm_msgqueue **queue)
{
const struct nvkm_subdev *subdev = falcon->owner;
int ret = -EINVAL;
switch (version) {
case 0x0137c63d:
- ret = msgqueue_0137c63d_new(falcon, queue);
+ ret = msgqueue_0137c63d_new(falcon, sb, queue);
+ break;
+ case 0x0137bca5:
+ ret = msgqueue_0137bca5_new(falcon, sb, queue);
break;
case 0x0148cdec:
- ret = msgqueue_0148cdec_new(falcon, queue);
+ ret = msgqueue_0148cdec_new(falcon, sb, queue);
break;
default:
nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
index f37afe963d3e..13b54f8d8e04 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
@@ -101,9 +101,11 @@ struct nvkm_msgqueue_init_func {
* struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
*
* @boot_falcon: build and send the command to reset a given falcon
+ * @boot_multiple_falcons: build and send the command to reset several falcons
*/
struct nvkm_msgqueue_acr_func {
int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
+ int (*boot_multiple_falcons)(struct nvkm_msgqueue *, unsigned long);
};
struct nvkm_msgqueue_func {
@@ -201,7 +203,11 @@ int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
struct nvkm_msgqueue_queue *);
-int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
-int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
+int msgqueue_0137c63d_new(struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
+int msgqueue_0137bca5_new(struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
+int msgqueue_0148cdec_new(struct nvkm_falcon *, const struct nvkm_secboot *,
+ struct nvkm_msgqueue **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
index bba91207fb18..fec0273158f6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
@@ -43,6 +43,15 @@ struct msgqueue_0137c63d {
#define msgqueue_0137c63d(q) \
container_of(q, struct msgqueue_0137c63d, base)
+struct msgqueue_0137bca5 {
+ struct msgqueue_0137c63d base;
+
+ u64 wpr_addr;
+};
+#define msgqueue_0137bca5(q) \
+ container_of(container_of(q, struct msgqueue_0137c63d, base), \
+ struct msgqueue_0137bca5, base);
+
static struct nvkm_msgqueue_queue *
msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
enum msgqueue_msg_priority priority)
@@ -180,6 +189,7 @@ msgqueue_0137c63d_init_func = {
enum {
ACR_CMD_INIT_WPR_REGION = 0x00,
ACR_CMD_BOOTSTRAP_FALCON = 0x01,
+ ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS = 0x03,
};
static void
@@ -286,11 +296,81 @@ acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
return 0;
}
+static void
+acr_boot_multiple_falcons_callback(struct nvkm_msgqueue *priv,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ struct acr_bootstrap_falcon_msg {
+ struct nvkm_msgqueue_msg base;
+
+ u32 falcon_mask;
+ } *msg = (void *)hdr;
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ unsigned long falcon_mask = msg->falcon_mask;
+ u32 falcon_id, falcon_treated = 0;
+
+ for_each_set_bit(falcon_id, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
+ nvkm_debug(subdev, "%s booted\n",
+ nvkm_secboot_falcon_name[falcon_id]);
+ falcon_treated |= BIT(falcon_id);
+ }
+
+ if (falcon_treated != msg->falcon_mask) {
+ nvkm_error(subdev, "in bootstrap falcon callback:\n");
+ nvkm_error(subdev, "invalid falcon mask 0x%x\n",
+ msg->falcon_mask);
+ return;
+ }
+}
+
+static int
+acr_boot_multiple_falcons(struct nvkm_msgqueue *priv, unsigned long falcon_mask)
+{
+ DECLARE_COMPLETION_ONSTACK(completed);
+ /*
+ * flags - Flag specifying RESET or no RESET.
+ * falcon id - Falcon id specifying falcon to bootstrap.
+ */
+ struct {
+ struct nvkm_msgqueue_hdr hdr;
+ u8 cmd_type;
+ u32 flags;
+ u32 falcon_mask;
+ u32 use_va_mask;
+ u32 wpr_lo;
+ u32 wpr_hi;
+ } cmd;
+ struct msgqueue_0137bca5 *queue = msgqueue_0137bca5(priv);
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
+ cmd.hdr.size = sizeof(cmd);
+ cmd.cmd_type = ACR_CMD_BOOTSTRAP_MULTIPLE_FALCONS;
+ cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
+ cmd.falcon_mask = falcon_mask;
+ cmd.wpr_lo = lower_32_bits(queue->wpr_addr);
+ cmd.wpr_hi = upper_32_bits(queue->wpr_addr);
+ nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
+ acr_boot_multiple_falcons_callback, &completed, true);
+
+ if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
static const struct nvkm_msgqueue_acr_func
msgqueue_0137c63d_acr_func = {
.boot_falcon = acr_boot_falcon,
};
+static const struct nvkm_msgqueue_acr_func
+msgqueue_0137bca5_acr_func = {
+ .boot_falcon = acr_boot_falcon,
+ .boot_multiple_falcons = acr_boot_multiple_falcons,
+};
+
static void
msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
{
@@ -307,7 +387,8 @@ msgqueue_0137c63d_func = {
};
int
-msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+msgqueue_0137c63d_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
+ struct nvkm_msgqueue **queue)
{
struct msgqueue_0137c63d *ret;
@@ -321,3 +402,35 @@ msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
return 0;
}
+
+static const struct nvkm_msgqueue_func
+msgqueue_0137bca5_func = {
+ .init_func = &msgqueue_0137c63d_init_func,
+ .acr_func = &msgqueue_0137bca5_acr_func,
+ .cmd_queue = msgqueue_0137c63d_cmd_queue,
+ .recv = msgqueue_0137c63d_process_msgs,
+ .dtor = msgqueue_0137c63d_dtor,
+};
+
+int
+msgqueue_0137bca5_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
+ struct nvkm_msgqueue **queue)
+{
+ struct msgqueue_0137bca5 *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ *queue = &ret->base.base;
+
+ /*
+ * FIXME this must be set to the address of a *GPU* mapping within the
+ * ACR address space!
+ */
+ /* ret->wpr_addr = sb->wpr_addr; */
+
+ nvkm_msgqueue_ctor(&msgqueue_0137bca5_func, falcon, &ret->base.base);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
index ed5d0da4f4e9..9424803b9ef4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
@@ -247,7 +247,8 @@ msgqueue_0148cdec_func = {
};
int
-msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+msgqueue_0148cdec_new(struct nvkm_falcon *falcon, const struct nvkm_secboot *sb,
+ struct nvkm_msgqueue **queue)
{
struct msgqueue_0148cdec *ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
index eaf74eb72983..8ab896dd4e92 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/boost.c
@@ -33,7 +33,7 @@ nvbios_boostTe(struct nvkm_bios *bios,
u32 boost = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
- if (bit_P.version == 2)
+ if (bit_P.version == 2 && bit_P.length >= 0x34)
boost = nvbios_rd32(bios, bit_P.offset + 0x30);
if (boost) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
index 5063382d8a6c..7c8c36054f71 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/cstep.c
@@ -33,7 +33,7 @@ nvbios_cstepTe(struct nvkm_bios *bios,
u32 cstep = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
- if (bit_P.version == 2)
+ if (bit_P.version == 2 && bit_P.length >= 0x38)
cstep = nvbios_rd32(bios, bit_P.offset + 0x34);
if (cstep) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 456f9ea920dc..0dfb15a27e4e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -32,7 +32,7 @@ nvbios_fan_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
u32 fan = 0;
if (!bit_entry(bios, 'P', &bit_P)) {
- if (bit_P.version == 2 && bit_P.length >= 0x5a)
+ if (bit_P.version == 2 && bit_P.length >= 0x5c)
fan = nvbios_rd32(bios, bit_P.offset + 0x58);
if (fan) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
index 617bfffce4ad..03d2f970a29f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/power_budget.c
@@ -33,7 +33,7 @@ nvbios_power_budget_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt,
u32 power_budget;
if (bit_entry(bios, 'P', &bit_P) || bit_P.version != 2 ||
- bit_P.length < 0x2c)
+ bit_P.length < 0x30)
return 0;
power_budget = nvbios_rd32(bios, bit_P.offset + 0x2c);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
index f199270163d2..20b6fc8243e0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/vpstate.c
@@ -31,7 +31,7 @@ nvbios_vpstate_offset(struct nvkm_bios *b)
struct bit_entry bit_P;
if (!bit_entry(b, 'P', &bit_P)) {
- if (bit_P.version == 2)
+ if (bit_P.version == 2 && bit_P.length >= 0x3c)
return nvbios_rd32(b, bit_P.offset + 0x38);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 1c5e5ba487a8..2571530e82f1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -28,6 +28,7 @@ nvkm-y += nvkm/subdev/fb/gm200.o
nvkm-y += nvkm/subdev/fb/gm20b.o
nvkm-y += nvkm/subdev/fb/gp100.o
nvkm-y += nvkm/subdev/fb/gp102.o
+nvkm-y += nvkm/subdev/fb/gp10b.o
nvkm-y += nvkm/subdev/fb/ram.o
nvkm-y += nvkm/subdev/fb/ramnv04.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
new file mode 100644
index 000000000000..f2b1fbf428d5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp10b.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "gf100.h"
+
+static const struct nvkm_fb_func
+gp10b_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gm200_fb_init,
+ .init_page = gm200_fb_init_page,
+ .intr = gf100_fb_intr,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gp10b_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gp10b_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 53c32fc694e9..c63975907c90 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -589,7 +589,7 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n",
fbp, size, ltcs);
lcomm = min(lcomm, (u64)(size / ltcs) << 20);
- total += size << 20;
+ total += (u64) size << 20;
ltcn += ltcs;
} else {
nvkm_debug(subdev, "FBP %d: disabled\n", fbp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
index 77c649723ad7..4a57defc99b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c
@@ -164,7 +164,7 @@ static int
nvkm_gpio_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_gpio *gpio = nvkm_gpio(subdev);
- u32 mask = (1 << gpio->func->lines) - 1;
+ u32 mask = (1ULL << gpio->func->lines) - 1;
gpio->func->intr_mask(gpio, NVKM_GPIO_TOGGLED, mask, 0);
gpio->func->intr_stat(gpio, &mask, &mask);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
index ad572d3b5466..7f5883b78efa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/Kbuild
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ibus/gf117.o
nvkm-y += nvkm/subdev/ibus/gk104.o
nvkm-y += nvkm/subdev/ibus/gk20a.o
nvkm-y += nvkm/subdev/ibus/gm200.o
+nvkm-y += nvkm/subdev/ibus/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
new file mode 100644
index 000000000000..39db90aa2c80
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gp10b.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include <subdev/ibus.h>
+
+#include "priv.h"
+
+static int
+gp10b_ibus_init(struct nvkm_subdev *ibus)
+{
+ struct nvkm_device *device = ibus->device;
+
+ nvkm_wr32(device, 0x1200a8, 0x0);
+
+ /* init ring */
+ nvkm_wr32(device, 0x12004c, 0x4);
+ nvkm_wr32(device, 0x122204, 0x2);
+ nvkm_rd32(device, 0x122204);
+
+ /* timeout configuration */
+ nvkm_wr32(device, 0x009080, 0x800186a0);
+
+ return 0;
+}
+
+static const struct nvkm_subdev_func
+gp10b_ibus = {
+ .init = gp10b_ibus_init,
+ .intr = gk104_ibus_intr,
+};
+
+int
+gp10b_ibus_new(struct nvkm_device *device, int index,
+ struct nvkm_subdev **pibus)
+{
+ struct nvkm_subdev *ibus;
+ if (!(ibus = *pibus = kzalloc(sizeof(*ibus), GFP_KERNEL)))
+ return -ENOMEM;
+ nvkm_subdev_ctor(&gp10b_ibus, device, index, ibus);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
index 9dec58ec3d9f..cd5adbec5e57 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/gk20a.c
@@ -94,7 +94,7 @@ struct gk20a_instmem {
struct nvkm_instmem base;
/* protects vaddr_* and gk20a_instobj::vaddr* */
- spinlock_t lock;
+ struct mutex lock;
/* CPU mappings LRU */
unsigned int vaddr_use;
@@ -184,11 +184,10 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
const u64 size = nvkm_memory_size(memory);
- unsigned long flags;
nvkm_ltc_flush(ltc);
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
if (node->base.vaddr) {
if (!node->use_cpt) {
@@ -216,7 +215,7 @@ gk20a_instobj_acquire_iommu(struct nvkm_memory *memory)
out:
node->use_cpt++;
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
return node->base.vaddr;
}
@@ -239,9 +238,8 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
struct gk20a_instobj_iommu *node = gk20a_instobj_iommu(memory);
struct gk20a_instmem *imem = node->base.imem;
struct nvkm_ltc *ltc = imem->base.subdev.device->ltc;
- unsigned long flags;
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
/* we should at least have one user to release... */
if (WARN_ON(node->use_cpt == 0))
@@ -252,7 +250,7 @@ gk20a_instobj_release_iommu(struct nvkm_memory *memory)
list_add_tail(&node->vaddr_node, &imem->vaddr_lru);
out:
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
wmb();
nvkm_ltc_invalidate(ltc);
@@ -306,19 +304,18 @@ gk20a_instobj_dtor_iommu(struct nvkm_memory *memory)
struct gk20a_instmem *imem = node->base.imem;
struct device *dev = imem->base.subdev.device->dev;
struct nvkm_mm_node *r = node->base.mem.mem;
- unsigned long flags;
int i;
if (unlikely(!r))
goto out;
- spin_lock_irqsave(&imem->lock, flags);
+ mutex_lock(&imem->lock);
/* vaddr has already been recycled */
if (node->base.vaddr)
gk20a_instobj_iommu_recycle_vaddr(node);
- spin_unlock_irqrestore(&imem->lock, flags);
+ mutex_unlock(&imem->lock);
/* clear IOMMU bit to unmap pages */
r->offset &= ~BIT(imem->iommu_bit - imem->iommu_pgshift);
@@ -571,7 +568,7 @@ gk20a_instmem_new(struct nvkm_device *device, int index,
if (!(imem = kzalloc(sizeof(*imem), GFP_KERNEL)))
return -ENOMEM;
nvkm_instmem_ctor(&gk20a_instmem, device, index, &imem->base);
- spin_lock_init(&imem->lock);
+ mutex_init(&imem->lock);
*pimem = &imem->base;
/* do not allow more than 1MB of CPU-mapped instmem */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 12943f92c206..2befbe36dc28 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/mc/gf100.o
nvkm-y += nvkm/subdev/mc/gk104.o
nvkm-y += nvkm/subdev/mc/gk20a.o
nvkm-y += nvkm/subdev/mc/gp100.o
+nvkm-y += nvkm/subdev/mc/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
index 4d22f4abd6de..7321ad3758c3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -42,7 +42,7 @@ gp100_mc_intr_update(struct gp100_mc *mc)
}
}
-static void
+void
gp100_mc_intr_unarm(struct nvkm_mc *base)
{
struct gp100_mc *mc = gp100_mc(base);
@@ -53,7 +53,7 @@ gp100_mc_intr_unarm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-static void
+void
gp100_mc_intr_rearm(struct nvkm_mc *base)
{
struct gp100_mc *mc = gp100_mc(base);
@@ -64,7 +64,7 @@ gp100_mc_intr_rearm(struct nvkm_mc *base)
spin_unlock_irqrestore(&mc->lock, flags);
}
-static void
+void
gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
{
struct gp100_mc *mc = gp100_mc(base);
@@ -87,13 +87,14 @@ gp100_mc = {
};
int
-gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+gp100_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
+ int index, struct nvkm_mc **pmc)
{
struct gp100_mc *mc;
if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
return -ENOMEM;
- nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
+ nvkm_mc_ctor(func, device, index, &mc->base);
*pmc = &mc->base;
spin_lock_init(&mc->lock);
@@ -101,3 +102,9 @@ gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
mc->mask = 0x7fffffff;
return 0;
}
+
+int
+gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return gp100_mc_new_(&gp100_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
new file mode 100644
index 000000000000..2283e3b74277
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp10b.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "priv.h"
+
+void
+gp10b_mc_init(struct nvkm_mc *mc)
+{
+ struct nvkm_device *device = mc->subdev.device;
+ nvkm_wr32(device, 0x000200, 0xffffffff); /* everything on */
+ nvkm_wr32(device, 0x00020c, 0xffffffff); /* everything out of ELPG */
+}
+
+static const struct nvkm_mc_func
+gp10b_mc = {
+ .init = gp10b_mc_init,
+ .intr = gk104_mc_intr,
+ .intr_unarm = gp100_mc_intr_unarm,
+ .intr_rearm = gp100_mc_intr_rearm,
+ .intr_mask = gp100_mc_intr_mask,
+ .intr_stat = gf100_mc_intr_stat,
+ .reset = gk104_mc_reset,
+};
+
+int
+gp10b_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
+{
+ return gp100_mc_new_(&gp10b_mc, device, index, pmc);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index 4f0576a06d24..3be4126441e4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -41,12 +41,18 @@ extern const struct nvkm_mc_map nv17_mc_reset[];
void nv44_mc_init(struct nvkm_mc *);
void nv50_mc_init(struct nvkm_mc *);
+void gk104_mc_init(struct nvkm_mc *);
void gf100_mc_intr_unarm(struct nvkm_mc *);
void gf100_mc_intr_rearm(struct nvkm_mc *);
void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
u32 gf100_mc_intr_stat(struct nvkm_mc *);
void gf100_mc_unk260(struct nvkm_mc *, u32);
+void gp100_mc_intr_unarm(struct nvkm_mc *);
+void gp100_mc_intr_rearm(struct nvkm_mc *);
+void gp100_mc_intr_mask(struct nvkm_mc *, u32, u32);
+int gp100_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int,
+ struct nvkm_mc **);
extern const struct nvkm_mc_map gk104_mc_intr[];
extern const struct nvkm_mc_map gk104_mc_reset[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index ac7f50ae53c6..e698f4836521 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -11,3 +11,4 @@ nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
nvkm-y += nvkm/subdev/secboot/gp102.o
+nvkm-y += nvkm/subdev/secboot/gp10b.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index 93d804652d44..b615fc81aca4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -39,8 +39,7 @@ struct nvkm_acr_func {
int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
struct nvkm_gpuobj *, u64);
- int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
+ int (*reset)(struct nvkm_acr *, struct nvkm_secboot *, unsigned long);
};
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 993a38eb3ed5..a721354249ce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -26,8 +26,6 @@
#include <core/gpuobj.h>
#include <core/firmware.h>
#include <engine/falcon.h>
-#include <subdev/mc.h>
-#include <subdev/timer.h>
#include <subdev/pmu.h>
#include <core/msgqueue.h>
#include <engine/sec2.h>
@@ -241,6 +239,7 @@ struct ls_ucode_img_r352 {
*/
struct ls_ucode_img *
acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
+ const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
@@ -253,7 +252,7 @@ acr_r352_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
+ ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
if (ret) {
kfree(img->base.ucode_data);
@@ -462,12 +461,14 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
* will be copied into the WPR region by the HS firmware.
*/
static int
-acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
+acr_r352_prepare_ls_blob(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
struct list_head imgs;
struct ls_ucode_img *img, *t;
unsigned long managed_falcons = acr->base.managed_falcons;
+ u64 wpr_addr = sb->wpr_addr;
+ u32 wpr_size = sb->wpr_size;
int managed_count = 0;
u32 image_wpr_size, ls_blob_size;
int falcon_id;
@@ -479,7 +480,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
struct ls_ucode_img *img;
- img = acr->func->ls_ucode_img_load(acr, falcon_id);
+ img = acr->func->ls_ucode_img_load(acr, sb, falcon_id);
if (IS_ERR(img)) {
if (acr->base.optional_falcons & BIT(falcon_id)) {
managed_falcons &= ~BIT(falcon_id);
@@ -704,7 +705,7 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
return 0;
/* Load and prepare the managed falcon's firmwares */
- ret = acr_r352_prepare_ls_blob(acr, sb->wpr_addr, sb->wpr_size);
+ ret = acr_r352_prepare_ls_blob(acr, sb);
if (ret)
return ret;
@@ -882,7 +883,6 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
const struct nvkm_subdev *subdev = &sb->subdev;
unsigned long managed_falcons = acr->base.managed_falcons;
- u32 reg;
int falcon_id;
int ret;
@@ -917,54 +917,13 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
const struct acr_r352_ls_func *func =
acr->func->ls_func[falcon_id];
- if (func->post_run)
- func->post_run(&acr->base, sb);
- }
-
- /* Re-start ourselves if we are managed */
- if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon))
- return 0;
-
- /* Enable interrupts */
- nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff);
- nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true);
-
- /* Start LS firmware on boot falcon */
- nvkm_falcon_start(sb->boot_falcon);
-
- /*
- * There is a bug where the LS firmware sometimes require to be started
- * twice (this happens only on SEC). Detect and workaround that
- * condition.
- *
- * Once started, the falcon will end up in STOPPED condition (bit 5)
- * if successful, or in HALT condition (bit 4) if not.
- */
- nvkm_msec(subdev->device, 1,
- if ((reg = nvkm_rd32(subdev->device,
- sb->boot_falcon->addr + 0x100)
- & 0x30) != 0)
- break;
- );
- if (reg & BIT(4)) {
- nvkm_debug(subdev, "applying workaround for start bug...");
- nvkm_falcon_start(sb->boot_falcon);
- nvkm_msec(subdev->device, 1,
- if ((reg = nvkm_rd32(subdev->device,
- sb->boot_falcon->addr + 0x100)
- & 0x30) != 0)
- break;
- );
- if (reg & BIT(4)) {
- nvkm_error(subdev, "%s failed to start\n",
- nvkm_secboot_falcon_name[acr->base.boot_falcon]);
- return -EINVAL;
+ if (func->post_run) {
+ ret = func->post_run(&acr->base, sb);
+ if (ret)
+ return ret;
}
}
- nvkm_debug(subdev, "%s started\n",
- nvkm_secboot_falcon_name[acr->base.boot_falcon]);
-
return 0;
}
@@ -976,15 +935,16 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
*/
static int
acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon)
+ unsigned long falcon_mask)
{
+ int falcon;
int ret;
/*
* Perform secure boot each time we are called on FECS. Since only FECS
* and GPCCS are managed and started together, this ought to be safe.
*/
- if (falcon != NVKM_SECBOOT_FALCON_FECS)
+ if (!(falcon_mask & BIT(NVKM_SECBOOT_FALCON_FECS)))
goto end;
ret = acr_r352_shutdown(acr, sb);
@@ -996,7 +956,9 @@ acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
return ret;
end:
- acr->falcon_state[falcon] = RESET;
+ for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END) {
+ acr->falcon_state[falcon] = RESET;
+ }
return 0;
}
@@ -1009,11 +971,11 @@ end:
*/
static int
acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon)
+ unsigned long falcon_mask)
{
struct acr_r352 *acr = acr_r352(_acr);
struct nvkm_msgqueue *queue;
- const char *fname = nvkm_secboot_falcon_name[falcon];
+ int falcon;
bool wpr_already_set = sb->wpr_set;
int ret;
@@ -1026,7 +988,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
/* Redo secure boot entirely if it was already done */
if (wpr_already_set)
- return acr_r352_reset_nopmu(acr, sb, falcon);
+ return acr_r352_reset_nopmu(acr, sb, falcon_mask);
/* Else return the result of the initial invokation */
else
return ret;
@@ -1044,13 +1006,15 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
}
/* Otherwise just ask the LS firmware to reset the falcon */
- nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname);
- ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon);
+ for_each_set_bit(falcon, &falcon_mask, NVKM_SECBOOT_FALCON_END)
+ nvkm_debug(&sb->subdev, "resetting %s falcon\n",
+ nvkm_secboot_falcon_name[falcon]);
+ ret = nvkm_msgqueue_acr_boot_falcons(queue, falcon_mask);
if (ret) {
- nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname);
+ nvkm_error(&sb->subdev, "error during falcon reset: %d\n", ret);
return ret;
}
- nvkm_debug(&sb->subdev, "falcon %s reset\n", fname);
+ nvkm_debug(&sb->subdev, "falcon reset done\n");
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index 6e88520566c9..3d58ab871563 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -57,11 +57,11 @@ hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
* @lhdr_flags: LS flags
*/
struct acr_r352_ls_func {
- int (*load)(const struct nvkm_subdev *, struct ls_ucode_img *);
+ int (*load)(const struct nvkm_secboot *, struct ls_ucode_img *);
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
- void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
+ int (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags;
};
@@ -82,6 +82,7 @@ struct acr_r352_func {
bool shadow_blob;
struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
+ const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
@@ -145,6 +146,7 @@ struct nvkm_acr *acr_r352_new_(const struct acr_r352_func *,
enum nvkm_secboot_falcon, unsigned long);
struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
+ const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
index f860713642f1..866877b88797 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -107,6 +107,7 @@ struct ls_ucode_img_r367 {
struct ls_ucode_img *
acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
+ const struct nvkm_secboot *sb,
enum nvkm_secboot_falcon falcon_id)
{
const struct nvkm_subdev *subdev = acr->base.subdev;
@@ -119,7 +120,7 @@ acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
img->base.falcon_id = falcon_id;
- ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
+ ret = acr->func->ls_func[falcon_id]->load(sb, &img->base);
if (ret) {
kfree(img->base.ucode_data);
kfree(img->base.sig);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
index ec6a71ca36be..8bdfb3e5cd1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
@@ -28,6 +28,7 @@
void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
+ const struct nvkm_secboot *,
enum nvkm_secboot_falcon);
int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 5c11e8c50964..ee29c6c11afd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -102,15 +102,15 @@ nvkm_secboot_falcon_name[] = {
* nvkm_secboot_reset() - reset specified falcon
*/
int
-nvkm_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
+nvkm_secboot_reset(struct nvkm_secboot *sb, unsigned long falcon_mask)
{
/* Unmanaged falcon? */
- if (!(BIT(falcon) & sb->acr->managed_falcons)) {
+ if ((falcon_mask | sb->acr->managed_falcons) != sb->acr->managed_falcons) {
nvkm_error(&sb->subdev, "cannot reset unmanaged falcon!\n");
return -EINVAL;
}
- return sb->acr->func->reset(sb->acr, sb, falcon);
+ return sb->acr->func->reset(sb->acr, sb, falcon_mask);
}
/**
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index 6dc9fc384f24..c8ab3d76bdef 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -41,4 +41,7 @@ void *gm200_secboot_dtor(struct nvkm_secboot *);
int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
struct nvkm_falcon *);
+/* Tegra-only */
+int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *, u32);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 29e6f73dfd7e..30491d132d59 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -23,31 +23,32 @@
#include "acr.h"
#include "gm200.h"
+#define TEGRA210_MC_BASE 0x70019000
+
#ifdef CONFIG_ARCH_TEGRA
-#define TEGRA_MC_BASE 0x70019000
#define MC_SECURITY_CARVEOUT2_CFG0 0xc58
#define MC_SECURITY_CARVEOUT2_BOM_0 0xc5c
#define MC_SECURITY_CARVEOUT2_BOM_HI_0 0xc60
#define MC_SECURITY_CARVEOUT2_SIZE_128K 0xc64
#define TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED (1 << 1)
/**
- * sb_tegra_read_wpr() - read the WPR registers on Tegra
+ * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra
*
* On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region
* is reserved from system memory by the bootloader and irreversibly locked.
* This function reads the address and size of the pre-configured WPR region.
*/
-static int
-gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+int
+gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
{
struct nvkm_secboot *sb = &gsb->base;
void __iomem *mc;
u32 cfg;
- mc = ioremap(TEGRA_MC_BASE, 0xd00);
+ mc = ioremap(mc_base, 0xd00);
if (!mc) {
nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n");
- return PTR_ERR(mc);
+ return -ENOMEM;
}
sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) |
((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32);
@@ -70,8 +71,8 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
return 0;
}
#else
-static int
-gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
+int
+gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base)
{
nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n");
return -EINVAL;
@@ -84,7 +85,7 @@ gm20b_secboot_oneinit(struct nvkm_secboot *sb)
struct gm200_secboot *gsb = gm200_secboot(sb);
int ret;
- ret = gm20b_tegra_read_wpr(gsb);
+ ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA210_MC_BASE);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
new file mode 100644
index 000000000000..632e9545e292
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp10b.c
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr.h"
+#include "gm200.h"
+
+#define TEGRA186_MC_BASE 0x02c10000
+
+static int
+gp10b_secboot_oneinit(struct nvkm_secboot *sb)
+{
+ struct gm200_secboot *gsb = gm200_secboot(sb);
+ int ret;
+
+ ret = gm20b_secboot_tegra_read_wpr(gsb, TEGRA186_MC_BASE);
+ if (ret)
+ return ret;
+
+ return gm200_secboot_oneinit(sb);
+}
+
+static const struct nvkm_secboot_func
+gp10b_secboot = {
+ .dtor = gm200_secboot_dtor,
+ .oneinit = gp10b_secboot_oneinit,
+ .fini = gm200_secboot_fini,
+ .run_blob = gm200_secboot_run_blob,
+};
+
+int
+gp10b_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ int ret;
+ struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS) |
+ BIT(NVKM_SECBOOT_FALCON_PMU));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
+
+ gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+ if (!gsb) {
+ psb = NULL;
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ ret = nvkm_secboot_ctor(&gp10b_secboot, acr, device, index, &gsb->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp10b/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 4ff9138a2a83..9b7c402594e8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -147,11 +147,11 @@ struct fw_bl_desc {
u32 data_size;
};
-int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
-int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
-int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *);
-void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
-int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *);
-void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
+int acr_ls_ucode_load_fecs(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_gpccs(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_ucode_load_pmu(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
+int acr_ls_ucode_load_sec2(const struct nvkm_secboot *, struct ls_ucode_img *);
+int acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
index 40a6df77bb8a..d1cf02d22db1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_gr.c
@@ -144,15 +144,13 @@ error:
}
int
-acr_ls_ucode_load_fecs(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_fecs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(subdev, img, "fecs");
+ return ls_ucode_img_load_gr(&sb->subdev, img, "fecs");
}
int
-acr_ls_ucode_load_gpccs(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_gpccs(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- return ls_ucode_img_load_gr(subdev, img, "gpccs");
+ return ls_ucode_img_load_gr(&sb->subdev, img, "gpccs");
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
index ef0b298b70d7..ee989210725e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -28,6 +28,8 @@
#include <core/msgqueue.h>
#include <subdev/pmu.h>
#include <engine/sec2.h>
+#include <subdev/mc.h>
+#include <subdev/timer.h>
/**
* acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
@@ -73,10 +75,11 @@ acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
return 0;
}
-static void
+static int
acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
struct nvkm_falcon *falcon, u32 addr_args)
{
+ struct nvkm_device *device = falcon->owner->device;
u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
u8 buf[cmdline_size];
@@ -85,65 +88,118 @@ acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
/* rearm the queue so it will wait for the init message */
nvkm_msgqueue_reinit(queue);
+
+ /* Enable interrupts */
+ nvkm_falcon_wr32(falcon, 0x10, 0xff);
+ nvkm_mc_intr_mask(device, falcon->owner->index, true);
+
+ /* Start LS firmware on boot falcon */
+ nvkm_falcon_start(falcon);
+
+ return 0;
}
int
-acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_pmu(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- struct nvkm_pmu *pmu = subdev->device->pmu;
+ struct nvkm_pmu *pmu = sb->subdev.device->pmu;
int ret;
- ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img);
+ ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "pmu", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
- &pmu->queue);
+ sb, &pmu->queue);
if (ret)
return ret;
return 0;
}
-void
+int
acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
struct nvkm_device *device = sb->subdev.device;
struct nvkm_pmu *pmu = device->pmu;
u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
+ int ret;
+
+ ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
+ if (ret)
+ return ret;
- acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
+ nvkm_debug(&sb->subdev, "%s started\n",
+ nvkm_secboot_falcon_name[acr->boot_falcon]);
+
+ return 0;
}
int
-acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev,
- struct ls_ucode_img *img)
+acr_ls_ucode_load_sec2(const struct nvkm_secboot *sb, struct ls_ucode_img *img)
{
- struct nvkm_sec2 *sec = subdev->device->sec2;
+ struct nvkm_sec2 *sec = sb->subdev.device->sec2;
int ret;
- ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img);
+ ret = acr_ls_ucode_load_msgqueue(&sb->subdev, "sec2", img);
if (ret)
return ret;
/* Allocate the PMU queue corresponding to the FW version */
ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
- &sec->queue);
+ sb, &sec->queue);
if (ret)
return ret;
return 0;
}
-void
+int
acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
{
- struct nvkm_device *device = sb->subdev.device;
+ const struct nvkm_subdev *subdev = &sb->subdev;
+ struct nvkm_device *device = subdev->device;
struct nvkm_sec2 *sec = device->sec2;
/* on SEC arguments are always at the beginning of EMEM */
- u32 addr_args = 0x01000000;
+ const u32 addr_args = 0x01000000;
+ u32 reg;
+ int ret;
- acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
+ ret = acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
+ if (ret)
+ return ret;
+
+ /*
+ * There is a bug where the LS firmware sometimes require to be started
+ * twice (this happens only on SEC). Detect and workaround that
+ * condition.
+ *
+ * Once started, the falcon will end up in STOPPED condition (bit 5)
+ * if successful, or in HALT condition (bit 4) if not.
+ */
+ nvkm_msec(device, 1,
+ if ((reg = nvkm_falcon_rd32(sb->boot_falcon, 0x100) & 0x30) != 0)
+ break;
+ );
+ if (reg & BIT(4)) {
+ nvkm_debug(subdev, "applying workaround for start bug...");
+ nvkm_falcon_start(sb->boot_falcon);
+ nvkm_msec(subdev->device, 1,
+ if ((reg = nvkm_rd32(subdev->device,
+ sb->boot_falcon->addr + 0x100)
+ & 0x30) != 0)
+ break;
+ );
+ if (reg & BIT(4)) {
+ nvkm_error(subdev, "%s failed to start\n",
+ nvkm_secboot_falcon_name[acr->boot_falcon]);
+ return -EINVAL;
+ }
+ }
+
+ nvkm_debug(&sb->subdev, "%s started\n",
+ nvkm_secboot_falcon_name[acr->boot_falcon]);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index ee5883f59be5..0dbe0306953d 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -160,10 +160,10 @@ static struct dma_buf_ops omap_dmabuf_ops = {
.release = omap_gem_dmabuf_release,
.begin_cpu_access = omap_gem_dmabuf_begin_cpu_access,
.end_cpu_access = omap_gem_dmabuf_end_cpu_access,
- .kmap_atomic = omap_gem_dmabuf_kmap_atomic,
- .kunmap_atomic = omap_gem_dmabuf_kunmap_atomic,
- .kmap = omap_gem_dmabuf_kmap,
- .kunmap = omap_gem_dmabuf_kunmap,
+ .map_atomic = omap_gem_dmabuf_kmap_atomic,
+ .unmap_atomic = omap_gem_dmabuf_kunmap_atomic,
+ .map = omap_gem_dmabuf_kmap,
+ .unmap = omap_gem_dmabuf_kunmap,
.mmap = omap_gem_dmabuf_mmap,
};
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 62aba976e744..3e29a9903303 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -7,6 +7,16 @@ config DRM_PANEL
menu "Display Panels"
depends on DRM && DRM_PANEL
+config DRM_PANEL_LVDS
+ tristate "Generic LVDS panel driver"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ This driver supports LVDS panels that don't require device-specific
+ handling of power supplies or control signals. It implements automatic
+ backlight handling if the panel is attached to a backlight controller.
+
config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
@@ -52,6 +62,12 @@ config DRM_PANEL_PANASONIC_VVX10F034N00
WUXGA (1920x1200) Novatek NT1397-based DSI panel as found in some
Xperia Z2 tablets
+config DRM_PANEL_SAMSUNG_S6E3HA2
+ tristate "Samsung S6E3HA2 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
@@ -81,4 +97,11 @@ config DRM_PANEL_SHARP_LS043T1LE01
Say Y here if you want to enable support for Sharp LS043T1LE01 qHD
(540x960) DSI panel as found on the Qualcomm APQ8074 Dragonboard
+config DRM_PANEL_SITRONIX_ST7789V
+ tristate "Sitronix ST7789V panel"
+ depends on OF && SPI
+ help
+ Say Y here if you want to enable support for the Sitronix
+ ST7789V controller for 240x320 LCD panels
+
endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index a5c7ec0236e0..292b3c77aede 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,8 +1,11 @@
+obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
obj-$(CONFIG_DRM_PANEL_SHARP_LS043T1LE01) += panel-sharp-ls043t1le01.o
+obj-$(CONFIG_DRM_PANEL_SITRONIX_ST7789V) += panel-sitronix-st7789v.o
diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
new file mode 100644
index 000000000000..3216aa9a88d6
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lvds.c
@@ -0,0 +1,286 @@
+/*
+ * rcar_du_crtc.c -- R-Car Display Unit CRTCs
+ *
+ * Copyright (C) 2016 Laurent Pinchart
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+struct panel_lvds {
+ struct drm_panel panel;
+ struct device *dev;
+
+ const char *label;
+ unsigned int width;
+ unsigned int height;
+ struct videomode video_mode;
+ unsigned int bus_format;
+ bool data_mirror;
+
+ struct backlight_device *backlight;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline struct panel_lvds *to_panel_lvds(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_lvds, panel);
+}
+
+static int panel_lvds_disable(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->backlight) {
+ lvds->backlight->props.power = FB_BLANK_POWERDOWN;
+ lvds->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(lvds->backlight);
+ }
+
+ return 0;
+}
+
+static int panel_lvds_unprepare(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->enable_gpio)
+ gpiod_set_value_cansleep(lvds->enable_gpio, 0);
+
+ return 0;
+}
+
+static int panel_lvds_prepare(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->enable_gpio)
+ gpiod_set_value_cansleep(lvds->enable_gpio, 1);
+
+ return 0;
+}
+
+static int panel_lvds_enable(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+
+ if (lvds->backlight) {
+ lvds->backlight->props.state &= ~BL_CORE_FBBLANK;
+ lvds->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(lvds->backlight);
+ }
+
+ return 0;
+}
+
+static int panel_lvds_get_modes(struct drm_panel *panel)
+{
+ struct panel_lvds *lvds = to_panel_lvds(panel);
+ struct drm_connector *connector = lvds->panel.connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(lvds->panel.drm);
+ if (!mode)
+ return 0;
+
+ drm_display_mode_from_videomode(&lvds->video_mode, mode);
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = lvds->width;
+ connector->display_info.height_mm = lvds->height;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &lvds->bus_format, 1);
+ connector->display_info.bus_flags = lvds->data_mirror
+ ? DRM_BUS_FLAG_DATA_LSB_TO_MSB
+ : DRM_BUS_FLAG_DATA_MSB_TO_LSB;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs panel_lvds_funcs = {
+ .disable = panel_lvds_disable,
+ .unprepare = panel_lvds_unprepare,
+ .prepare = panel_lvds_prepare,
+ .enable = panel_lvds_enable,
+ .get_modes = panel_lvds_get_modes,
+};
+
+static int panel_lvds_parse_dt(struct panel_lvds *lvds)
+{
+ struct device_node *np = lvds->dev->of_node;
+ struct display_timing timing;
+ const char *mapping;
+ int ret;
+
+ ret = of_get_display_timing(np, "panel-timing", &timing);
+ if (ret < 0)
+ return ret;
+
+ videomode_from_timing(&timing, &lvds->video_mode);
+
+ ret = of_property_read_u32(np, "width-mm", &lvds->width);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "width-mm");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(np, "height-mm", &lvds->height);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "height-mm");
+ return -ENODEV;
+ }
+
+ of_property_read_string(np, "label", &lvds->label);
+
+ ret = of_property_read_string(np, "data-mapping", &mapping);
+ if (ret < 0) {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "data-mapping");
+ return -ENODEV;
+ }
+
+ if (!strcmp(mapping, "jeida-18")) {
+ lvds->bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG;
+ } else if (!strcmp(mapping, "jeida-24")) {
+ lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA;
+ } else if (!strcmp(mapping, "vesa-24")) {
+ lvds->bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG;
+ } else {
+ dev_err(lvds->dev, "%s: invalid or missing %s DT property\n",
+ of_node_full_name(np), "data-mapping");
+ return -EINVAL;
+ }
+
+ lvds->data_mirror = of_property_read_bool(np, "data-mirror");
+
+ return 0;
+}
+
+static int panel_lvds_probe(struct platform_device *pdev)
+{
+ struct panel_lvds *lvds;
+ struct device_node *np;
+ int ret;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (!lvds)
+ return -ENOMEM;
+
+ lvds->dev = &pdev->dev;
+
+ ret = panel_lvds_parse_dt(lvds);
+ if (ret < 0)
+ return ret;
+
+ /* Get GPIOs and backlight controller. */
+ lvds->enable_gpio = devm_gpiod_get_optional(lvds->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lvds->enable_gpio)) {
+ ret = PTR_ERR(lvds->enable_gpio);
+ dev_err(lvds->dev, "failed to request %s GPIO: %d\n",
+ "enable", ret);
+ return ret;
+ }
+
+ lvds->reset_gpio = devm_gpiod_get_optional(lvds->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(lvds->reset_gpio)) {
+ ret = PTR_ERR(lvds->reset_gpio);
+ dev_err(lvds->dev, "failed to request %s GPIO: %d\n",
+ "reset", ret);
+ return ret;
+ }
+
+ np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
+ if (np) {
+ lvds->backlight = of_find_backlight_by_node(np);
+ of_node_put(np);
+
+ if (!lvds->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ /*
+ * TODO: Handle all power supplies specified in the DT node in a generic
+ * way for panels that don't care about power supply ordering. LVDS
+ * panels that require a specific power sequence will need a dedicated
+ * driver.
+ */
+
+ /* Register the panel. */
+ drm_panel_init(&lvds->panel);
+ lvds->panel.dev = lvds->dev;
+ lvds->panel.funcs = &panel_lvds_funcs;
+
+ ret = drm_panel_add(&lvds->panel);
+ if (ret < 0)
+ goto error;
+
+ dev_set_drvdata(lvds->dev, lvds);
+ return 0;
+
+error:
+ put_device(&lvds->backlight->dev);
+ return ret;
+}
+
+static int panel_lvds_remove(struct platform_device *pdev)
+{
+ struct panel_lvds *lvds = dev_get_drvdata(&pdev->dev);
+
+ drm_panel_detach(&lvds->panel);
+ drm_panel_remove(&lvds->panel);
+
+ panel_lvds_disable(&lvds->panel);
+
+ if (lvds->backlight)
+ put_device(&lvds->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id panel_lvds_of_table[] = {
+ { .compatible = "panel-lvds", },
+ { /* Sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, panel_lvds_of_table);
+
+static struct platform_driver panel_lvds_driver = {
+ .probe = panel_lvds_probe,
+ .remove = panel_lvds_remove,
+ .driver = {
+ .name = "panel-lvds",
+ .of_match_table = panel_lvds_of_table,
+ },
+};
+
+module_platform_driver(panel_lvds_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("LVDS Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
new file mode 100644
index 000000000000..4cc08d7b3de4
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c
@@ -0,0 +1,739 @@
+/*
+ * MIPI-DSI based s6e3ha2 AMOLED 5.7 inch panel driver.
+ *
+ * Copyright (c) 2016 Samsung Electronics Co., Ltd.
+ * Donghwa Lee <dh09.lee@samsung.com>
+ * Hyungwon Hwang <human.hwang@samsung.com>
+ * Hoegeun Kwon <hoegeun.kwon@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#define S6E3HA2_MIN_BRIGHTNESS 0
+#define S6E3HA2_MAX_BRIGHTNESS 100
+#define S6E3HA2_DEFAULT_BRIGHTNESS 80
+
+#define S6E3HA2_NUM_GAMMA_STEPS 46
+#define S6E3HA2_GAMMA_CMD_CNT 35
+#define S6E3HA2_VINT_STATUS_MAX 10
+
+static const u8 gamma_tbl[S6E3HA2_NUM_GAMMA_STEPS][S6E3HA2_GAMMA_CMD_CNT] = {
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x82, 0x83,
+ 0x85, 0x88, 0x8b, 0x8b, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8c,
+ 0x94, 0x84, 0xb1, 0xaf, 0x8e, 0xcf, 0xad, 0xc9, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x84, 0x84,
+ 0x85, 0x87, 0x8b, 0x8a, 0x84, 0x88, 0x82, 0x82, 0x89, 0x86, 0x8a,
+ 0x93, 0x84, 0xb0, 0xae, 0x8e, 0xc9, 0xa8, 0xc5, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x88, 0x81, 0x84, 0x8a, 0x88, 0x8a,
+ 0x91, 0x84, 0xb1, 0xae, 0x8b, 0xd5, 0xb2, 0xcc, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x8a, 0x8a, 0x84, 0x87, 0x81, 0x84, 0x8a, 0x87, 0x8a,
+ 0x91, 0x85, 0xae, 0xac, 0x8a, 0xc3, 0xa3, 0xc0, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x85, 0x85,
+ 0x86, 0x85, 0x88, 0x89, 0x84, 0x89, 0x82, 0x84, 0x87, 0x85, 0x8b,
+ 0x91, 0x88, 0xad, 0xab, 0x8a, 0xb7, 0x9b, 0xb6, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x89, 0x8a, 0x84, 0x89, 0x83, 0x83, 0x86, 0x84, 0x8b,
+ 0x90, 0x84, 0xb0, 0xae, 0x8b, 0xce, 0xad, 0xc8, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x89,
+ 0x8f, 0x84, 0xac, 0xaa, 0x89, 0xb1, 0x98, 0xaf, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x88, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8c,
+ 0x91, 0x86, 0xac, 0xaa, 0x89, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x87, 0x89, 0x8a, 0x83, 0x87, 0x82, 0x85, 0x88, 0x87, 0x88,
+ 0x8b, 0x82, 0xad, 0xaa, 0x8a, 0xc2, 0xa5, 0xbd, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x89, 0x87, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x82, 0x85, 0x84, 0x8a,
+ 0x8e, 0x84, 0xae, 0xac, 0x89, 0xda, 0xb7, 0xd0, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x84, 0x88, 0x83, 0x80, 0x83, 0x82, 0x8b,
+ 0x8e, 0x85, 0xac, 0xaa, 0x89, 0xc8, 0xaa, 0xc1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x81, 0x85, 0x81, 0x84, 0x86, 0x84, 0x8c,
+ 0x8c, 0x84, 0xa9, 0xa8, 0x87, 0xa3, 0x92, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x84, 0x86, 0x83, 0x80, 0x83, 0x81, 0x8c,
+ 0x8d, 0x84, 0xaa, 0xaa, 0x89, 0xce, 0xaf, 0xc5, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x85, 0x86, 0x87, 0x89, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
+ 0x8c, 0x84, 0xa8, 0xa8, 0x88, 0xb5, 0x9f, 0xb0, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x83, 0x85, 0x85, 0x8c,
+ 0x8b, 0x84, 0xab, 0xa8, 0x86, 0xd4, 0xb4, 0xc9, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x86, 0x86, 0x87, 0x88, 0x81, 0x83, 0x80, 0x84, 0x84, 0x85, 0x8b,
+ 0x8a, 0x83, 0xa6, 0xa5, 0x84, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x84, 0x84,
+ 0x86, 0x85, 0x86, 0x86, 0x82, 0x85, 0x81, 0x82, 0x83, 0x84, 0x8e,
+ 0x8b, 0x83, 0xa4, 0xa3, 0x8a, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8e,
+ 0x8b, 0x83, 0xa4, 0xa2, 0x86, 0xc1, 0xa9, 0xb7, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x82, 0x82, 0x84, 0x8d,
+ 0x89, 0x82, 0xa2, 0xa1, 0x84, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xb8, 0x00, 0xc3, 0x00, 0xb1, 0x88, 0x86, 0x87, 0x83, 0x83,
+ 0x85, 0x86, 0x87, 0x87, 0x82, 0x85, 0x81, 0x83, 0x83, 0x85, 0x8c,
+ 0x87, 0x7f, 0xa2, 0x9d, 0x88, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xbb, 0x00, 0xc5, 0x00, 0xb4, 0x87, 0x86, 0x86, 0x84, 0x83,
+ 0x86, 0x87, 0x87, 0x87, 0x80, 0x82, 0x7f, 0x86, 0x86, 0x88, 0x8a,
+ 0x84, 0x7e, 0x9d, 0x9c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xbd, 0x00, 0xc7, 0x00, 0xb7, 0x87, 0x85, 0x85, 0x84, 0x83,
+ 0x86, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x83, 0x84, 0x85, 0x8a,
+ 0x85, 0x7e, 0x9c, 0x9b, 0x85, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xc0, 0x00, 0xca, 0x00, 0xbb, 0x87, 0x86, 0x85, 0x83, 0x83,
+ 0x85, 0x86, 0x86, 0x88, 0x81, 0x83, 0x80, 0x84, 0x85, 0x86, 0x89,
+ 0x83, 0x7d, 0x9c, 0x99, 0x87, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xc4, 0x00, 0xcd, 0x00, 0xbe, 0x87, 0x86, 0x85, 0x83, 0x83,
+ 0x86, 0x85, 0x85, 0x87, 0x81, 0x82, 0x80, 0x82, 0x82, 0x83, 0x8a,
+ 0x85, 0x7f, 0x9f, 0x9b, 0x86, 0xb4, 0xa1, 0xac, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xc7, 0x00, 0xd0, 0x00, 0xc2, 0x87, 0x85, 0x85, 0x83, 0x82,
+ 0x85, 0x85, 0x85, 0x86, 0x82, 0x83, 0x80, 0x82, 0x82, 0x84, 0x87,
+ 0x86, 0x80, 0x9e, 0x9a, 0x87, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xca, 0x00, 0xd2, 0x00, 0xc5, 0x87, 0x85, 0x84, 0x82, 0x82,
+ 0x84, 0x85, 0x85, 0x86, 0x81, 0x82, 0x7f, 0x82, 0x82, 0x84, 0x88,
+ 0x86, 0x81, 0x9d, 0x98, 0x86, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xce, 0x00, 0xd6, 0x00, 0xca, 0x86, 0x85, 0x84, 0x83, 0x83,
+ 0x85, 0x84, 0x84, 0x85, 0x81, 0x82, 0x80, 0x81, 0x81, 0x82, 0x89,
+ 0x86, 0x81, 0x9c, 0x97, 0x86, 0xa7, 0x98, 0xa1, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xd1, 0x00, 0xd9, 0x00, 0xce, 0x86, 0x84, 0x83, 0x83, 0x82,
+ 0x85, 0x85, 0x85, 0x86, 0x81, 0x83, 0x81, 0x82, 0x82, 0x83, 0x86,
+ 0x83, 0x7f, 0x99, 0x95, 0x86, 0xbb, 0xa4, 0xb3, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xd4, 0x00, 0xdb, 0x00, 0xd1, 0x86, 0x85, 0x83, 0x83, 0x82,
+ 0x85, 0x84, 0x84, 0x85, 0x80, 0x83, 0x82, 0x80, 0x80, 0x81, 0x87,
+ 0x84, 0x81, 0x98, 0x93, 0x85, 0xae, 0x9c, 0xa8, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xd8, 0x00, 0xde, 0x00, 0xd6, 0x86, 0x84, 0x83, 0x81, 0x81,
+ 0x83, 0x85, 0x85, 0x85, 0x82, 0x83, 0x81, 0x81, 0x81, 0x83, 0x86,
+ 0x84, 0x80, 0x98, 0x91, 0x85, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xdc, 0x00, 0xe2, 0x00, 0xda, 0x85, 0x84, 0x83, 0x82, 0x82,
+ 0x84, 0x84, 0x84, 0x85, 0x81, 0x82, 0x82, 0x80, 0x80, 0x81, 0x83,
+ 0x82, 0x7f, 0x99, 0x93, 0x86, 0x94, 0x8b, 0x92, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xdf, 0x00, 0xe5, 0x00, 0xde, 0x85, 0x84, 0x82, 0x82, 0x82,
+ 0x84, 0x83, 0x83, 0x84, 0x81, 0x81, 0x80, 0x83, 0x82, 0x84, 0x82,
+ 0x81, 0x7f, 0x99, 0x92, 0x86, 0x7b, 0x7b, 0x7c, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x83, 0x83, 0x84, 0x80,
+ 0x81, 0x7c, 0x99, 0x92, 0x87, 0xa1, 0x93, 0x9d, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x85, 0x84, 0x83, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x83, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
+ 0x82, 0x80, 0x91, 0x8d, 0x83, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x84, 0x80, 0x81, 0x80, 0x81, 0x80, 0x82, 0x83,
+ 0x81, 0x7f, 0x91, 0x8c, 0x82, 0x8d, 0x88, 0x8b, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x82,
+ 0x82, 0x7f, 0x94, 0x89, 0x84, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x83, 0x82, 0x82, 0x81, 0x81, 0x80, 0x82, 0x83,
+ 0x82, 0x7f, 0x91, 0x85, 0x81, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x83, 0x82, 0x84, 0x83,
+ 0x82, 0x7f, 0x90, 0x84, 0x81, 0x9a, 0x90, 0x96, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe4, 0x00, 0xe9, 0x00, 0xe3, 0x84, 0x83, 0x82, 0x80, 0x80,
+ 0x82, 0x83, 0x83, 0x83, 0x80, 0x80, 0x7f, 0x80, 0x80, 0x81, 0x81,
+ 0x82, 0x83, 0x7e, 0x80, 0x7c, 0xa4, 0x97, 0x9f, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xe9, 0x00, 0xec, 0x00, 0xe8, 0x84, 0x83, 0x82, 0x81, 0x81,
+ 0x82, 0x82, 0x82, 0x83, 0x7f, 0x7f, 0x7f, 0x81, 0x80, 0x82, 0x83,
+ 0x83, 0x84, 0x79, 0x7c, 0x79, 0xb1, 0xa0, 0xaa, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xed, 0x00, 0xf0, 0x00, 0xec, 0x83, 0x83, 0x82, 0x80, 0x80,
+ 0x81, 0x82, 0x82, 0x82, 0x7f, 0x7f, 0x7e, 0x81, 0x81, 0x82, 0x80,
+ 0x81, 0x81, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xf1, 0x00, 0xf4, 0x00, 0xf1, 0x83, 0x82, 0x82, 0x80, 0x80,
+ 0x81, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x81, 0x7d,
+ 0x7e, 0x7f, 0x84, 0x84, 0x83, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xf6, 0x00, 0xf7, 0x00, 0xf5, 0x82, 0x82, 0x81, 0x80, 0x80,
+ 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x7f, 0x7f, 0x7f, 0x82,
+ 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x00, 0xfa, 0x00, 0xfb, 0x00, 0xfa, 0x81, 0x81, 0x81, 0x80, 0x80,
+ 0x80, 0x82, 0x82, 0x82, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 },
+ { 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
+ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00,
+ 0x00, 0x00 }
+};
+
+unsigned char vint_table[S6E3HA2_VINT_STATUS_MAX] = {
+ 0x18, 0x19, 0x1a, 0x1b, 0x1c,
+ 0x1d, 0x1e, 0x1f, 0x20, 0x21
+};
+
+struct s6e3ha2 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct backlight_device *bl_dev;
+
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *enable_gpio;
+};
+
+static int s6e3ha2_dcs_write(struct s6e3ha2 *ctx, const void *data, size_t len)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ return mipi_dsi_dcs_write_buffer(dsi, data, len);
+}
+
+#define s6e3ha2_dcs_write_seq_static(ctx, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = s6e3ha2_dcs_write(ctx, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+} while (0)
+
+#define s6e3ha2_call_write_func(ret, func) do { \
+ ret = (func); \
+ if (ret < 0) \
+ return ret; \
+} while (0)
+
+static int s6e3ha2_test_key_on_f0(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a);
+ return 0;
+}
+
+static int s6e3ha2_test_key_off_f0(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf0, 0xa5, 0xa5);
+ return 0;
+}
+
+static int s6e3ha2_test_key_on_fc(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a);
+ return 0;
+}
+
+static int s6e3ha2_test_key_off_fc(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfc, 0xa5, 0xa5);
+ return 0;
+}
+
+static int s6e3ha2_single_dsi_set(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf2, 0x67);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf9, 0x09);
+ return 0;
+}
+
+static int s6e3ha2_freq_calibration(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfd, 0x1c);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20, 0x39);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0xa0);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x20);
+ s6e3ha2_dcs_write_seq_static(ctx, 0xce, 0x03, 0x3b, 0x12, 0x62, 0x40,
+ 0x80, 0xc0, 0x28, 0x28, 0x28, 0x28, 0x39, 0xc5);
+ return 0;
+}
+
+static int s6e3ha2_aor_control(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb2, 0x03, 0x10);
+ return 0;
+}
+
+static int s6e3ha2_caps_elvss_set(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb6, 0x9c, 0x0a);
+ return 0;
+}
+
+static int s6e3ha2_acl_off(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0x55, 0x00);
+ return 0;
+}
+
+static int s6e3ha2_acl_off_opr(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb5, 0x40);
+ return 0;
+}
+
+static int s6e3ha2_test_global(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x07);
+ return 0;
+}
+
+static int s6e3ha2_test(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb8, 0x19);
+ return 0;
+}
+
+static int s6e3ha2_touch_hsync_on1(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xbd, 0x33, 0x11, 0x02,
+ 0x16, 0x02, 0x16);
+ return 0;
+}
+
+static int s6e3ha2_pentile_control(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xc0, 0x00, 0x00, 0xd8, 0xd8);
+ return 0;
+}
+
+static int s6e3ha2_poc_global(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb0, 0x20);
+ return 0;
+}
+
+static int s6e3ha2_poc_setting(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xfe, 0x08);
+ return 0;
+}
+
+static int s6e3ha2_pcd_set_off(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xcc, 0x40, 0x51);
+ return 0;
+}
+
+static int s6e3ha2_err_fg_set(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xed, 0x44);
+ return 0;
+}
+
+static int s6e3ha2_hbm_off(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0x53, 0x00);
+ return 0;
+}
+
+static int s6e3ha2_te_start_setting(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xb9, 0x10, 0x09, 0xff, 0x00, 0x09);
+ return 0;
+}
+
+static int s6e3ha2_gamma_update(struct s6e3ha2 *ctx)
+{
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x03);
+ ndelay(100); /* need for 100ns delay */
+ s6e3ha2_dcs_write_seq_static(ctx, 0xf7, 0x00);
+ return 0;
+}
+
+static int s6e3ha2_get_brightness(struct backlight_device *bl_dev)
+{
+ return bl_dev->props.brightness;
+}
+
+static int s6e3ha2_set_vint(struct s6e3ha2 *ctx)
+{
+ struct backlight_device *bl_dev = ctx->bl_dev;
+ unsigned int brightness = bl_dev->props.brightness;
+ unsigned char data[] = { 0xf4, 0x8b,
+ vint_table[brightness * (S6E3HA2_VINT_STATUS_MAX - 1) /
+ S6E3HA2_MAX_BRIGHTNESS] };
+
+ return s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data));
+}
+
+static unsigned int s6e3ha2_get_brightness_index(unsigned int brightness)
+{
+ return (brightness * (S6E3HA2_NUM_GAMMA_STEPS - 1)) /
+ S6E3HA2_MAX_BRIGHTNESS;
+}
+
+static int s6e3ha2_update_gamma(struct s6e3ha2 *ctx, unsigned int brightness)
+{
+ struct backlight_device *bl_dev = ctx->bl_dev;
+ unsigned int index = s6e3ha2_get_brightness_index(brightness);
+ u8 data[S6E3HA2_GAMMA_CMD_CNT + 1] = { 0xca, };
+ int ret;
+
+ memcpy(data + 1, gamma_tbl + index, S6E3HA2_GAMMA_CMD_CNT);
+ s6e3ha2_call_write_func(ret,
+ s6e3ha2_dcs_write(ctx, data, ARRAY_SIZE(data)));
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
+ bl_dev->props.brightness = brightness;
+
+ return 0;
+}
+
+static int s6e3ha2_set_brightness(struct backlight_device *bl_dev)
+{
+ struct s6e3ha2 *ctx = bl_get_data(bl_dev);
+ unsigned int brightness = bl_dev->props.brightness;
+ int ret;
+
+ if (brightness < S6E3HA2_MIN_BRIGHTNESS ||
+ brightness > bl_dev->props.max_brightness) {
+ dev_err(ctx->dev, "Invalid brightness: %u\n", brightness);
+ return -EINVAL;
+ }
+
+ if (bl_dev->props.power > FB_BLANK_NORMAL)
+ return -EPERM;
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_update_gamma(ctx, brightness));
+ s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_set_vint(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
+
+ return 0;
+}
+
+static const struct backlight_ops s6e3ha2_bl_ops = {
+ .get_brightness = s6e3ha2_get_brightness,
+ .update_status = s6e3ha2_set_brightness,
+};
+
+static int s6e3ha2_panel_init(struct s6e3ha2 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_exit_sleep_mode(dsi));
+ usleep_range(5000, 6000);
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_single_dsi_set(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_freq_calibration(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
+
+ return 0;
+}
+
+static int s6e3ha2_power_off(struct s6e3ha2 *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int s6e3ha2_disable(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_enter_sleep_mode(dsi));
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_off(dsi));
+
+ msleep(40);
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+}
+
+static int s6e3ha2_unprepare(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+
+ return s6e3ha2_power_off(ctx);
+}
+
+static int s6e3ha2_power_on(struct s6e3ha2 *ctx)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ return ret;
+
+ msleep(120);
+
+ gpiod_set_value(ctx->enable_gpio, 0);
+ usleep_range(5000, 6000);
+ gpiod_set_value(ctx->enable_gpio, 1);
+
+ gpiod_set_value(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value(ctx->reset_gpio, 0);
+ usleep_range(5000, 6000);
+
+ return 0;
+}
+static int s6e3ha2_prepare(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+ int ret;
+
+ ret = s6e3ha2_power_on(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = s6e3ha2_panel_init(ctx);
+ if (ret < 0)
+ goto err;
+
+ ctx->bl_dev->props.power = FB_BLANK_NORMAL;
+
+ return 0;
+
+err:
+ s6e3ha2_power_off(ctx);
+ return ret;
+}
+
+static int s6e3ha2_enable(struct drm_panel *panel)
+{
+ struct s6e3ha2 *ctx = container_of(panel, struct s6e3ha2, panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ /* common setting */
+ s6e3ha2_call_write_func(ret,
+ mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK));
+
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_f0(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_on_fc(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_touch_hsync_on1(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_pentile_control(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_poc_global(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_poc_setting(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_fc(ctx));
+
+ /* pcd setting off for TB */
+ s6e3ha2_call_write_func(ret, s6e3ha2_pcd_set_off(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_err_fg_set(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_te_start_setting(ctx));
+
+ /* brightness setting */
+ s6e3ha2_call_write_func(ret, s6e3ha2_set_brightness(ctx->bl_dev));
+ s6e3ha2_call_write_func(ret, s6e3ha2_aor_control(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_caps_elvss_set(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_gamma_update(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_acl_off(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_acl_off_opr(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_hbm_off(ctx));
+
+ /* elvss temp compensation */
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_global(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test(ctx));
+ s6e3ha2_call_write_func(ret, s6e3ha2_test_key_off_f0(ctx));
+
+ s6e3ha2_call_write_func(ret, mipi_dsi_dcs_set_display_on(dsi));
+ ctx->bl_dev->props.power = FB_BLANK_UNBLANK;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 222372,
+ .hdisplay = 1440,
+ .hsync_start = 1440 + 1,
+ .hsync_end = 1440 + 1 + 1,
+ .htotal = 1440 + 1 + 1 + 1,
+ .vdisplay = 2560,
+ .vsync_start = 2560 + 1,
+ .vsync_end = 2560 + 1 + 1,
+ .vtotal = 2560 + 1 + 1 + 15,
+ .vrefresh = 60,
+ .flags = 0,
+};
+
+static int s6e3ha2_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_ERROR("failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = 71;
+ connector->display_info.height_mm = 125;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e3ha2_drm_funcs = {
+ .disable = s6e3ha2_disable,
+ .unprepare = s6e3ha2_unprepare,
+ .prepare = s6e3ha2_prepare,
+ .enable = s6e3ha2_enable,
+ .get_modes = s6e3ha2_get_modes,
+};
+
+static int s6e3ha2_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e3ha2 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(ctx->reset_gpio));
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->enable_gpio)) {
+ dev_err(dev, "cannot get enable-gpios %ld\n",
+ PTR_ERR(ctx->enable_gpio));
+ return PTR_ERR(ctx->enable_gpio);
+ }
+
+ ctx->bl_dev = backlight_device_register("s6e3ha2", dev, ctx,
+ &s6e3ha2_bl_ops, NULL);
+ if (IS_ERR(ctx->bl_dev)) {
+ dev_err(dev, "failed to register backlight device\n");
+ return PTR_ERR(ctx->bl_dev);
+ }
+
+ ctx->bl_dev->props.max_brightness = S6E3HA2_MAX_BRIGHTNESS;
+ ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS;
+ ctx->bl_dev->props.power = FB_BLANK_POWERDOWN;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &s6e3ha2_drm_funcs;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ goto unregister_backlight;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ goto remove_panel;
+
+ return ret;
+
+remove_panel:
+ drm_panel_remove(&ctx->panel);
+
+unregister_backlight:
+ backlight_device_unregister(ctx->bl_dev);
+
+ return ret;
+}
+
+static int s6e3ha2_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e3ha2 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+ backlight_device_unregister(ctx->bl_dev);
+
+ return 0;
+}
+
+static const struct of_device_id s6e3ha2_of_match[] = {
+ { .compatible = "samsung,s6e3ha2" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, s6e3ha2_of_match);
+
+static struct mipi_dsi_driver s6e3ha2_driver = {
+ .probe = s6e3ha2_probe,
+ .remove = s6e3ha2_remove,
+ .driver = {
+ .name = "panel-samsung-s6e3ha2",
+ .of_match_table = s6e3ha2_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e3ha2_driver);
+
+MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
+MODULE_AUTHOR("Hyungwon Hwang <human.hwang@samsung.com>");
+MODULE_AUTHOR("Hoegeun Kwon <hoegeun.kwon@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based s6e3ha2 AMOLED Panel Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 89eb0422821c..c4566ce8fda7 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -386,6 +386,31 @@ static void panel_simple_shutdown(struct device *dev)
panel_simple_disable(&panel->base);
}
+static const struct drm_display_mode ampire_am_480272h3tmqw_t01h_mode = {
+ .clock = 9000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2,
+ .hsync_end = 480 + 2 + 41,
+ .htotal = 480 + 2 + 41 + 2,
+ .vdisplay = 272,
+ .vsync_start = 272 + 2,
+ .vsync_end = 272 + 2 + 10,
+ .vtotal = 272 + 2 + 10 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ampire_am_480272h3tmqw_t01h = {
+ .modes = &ampire_am_480272h3tmqw_t01h_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 105,
+ .height = 67,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct drm_display_mode ampire_am800480r3tmqwa1h_mode = {
.clock = 33333,
.hdisplay = 800,
@@ -1806,8 +1831,36 @@ static const struct panel_desc urt_umsh_8596md_parallel = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode winstar_wf35ltiacd_mode = {
+ .clock = 6410,
+ .hdisplay = 320,
+ .hsync_start = 320 + 20,
+ .hsync_end = 320 + 20 + 30,
+ .htotal = 320 + 20 + 30 + 38,
+ .vdisplay = 240,
+ .vsync_start = 240 + 4,
+ .vsync_end = 240 + 4 + 3,
+ .vtotal = 240 + 4 + 3 + 15,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc winstar_wf35ltiacd = {
+ .modes = &winstar_wf35ltiacd_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 70,
+ .height = 53,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+};
+
static const struct of_device_id platform_of_match[] = {
{
+ .compatible = "ampire,am-480272h3tmqw-t01h",
+ .data = &ampire_am_480272h3tmqw_t01h,
+ }, {
.compatible = "ampire,am800480r3tmqwa1h",
.data = &ampire_am800480r3tmqwa1h,
}, {
@@ -1994,6 +2047,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "urt,umsh-8596md-20t",
.data = &urt_umsh_8596md_parallel,
}, {
+ .compatible = "winstar,wf35ltiacd",
+ .data = &winstar_wf35ltiacd,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
new file mode 100644
index 000000000000..358c64ef1922
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c
@@ -0,0 +1,449 @@
+/*
+ * Copyright (C) 2017 Free Electrons
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+#define ST7789V_COLMOD_RGB_FMT_18BITS (6 << 4)
+#define ST7789V_COLMOD_CTRL_FMT_18BITS (6 << 0)
+
+#define ST7789V_RAMCTRL_CMD 0xb0
+#define ST7789V_RAMCTRL_RM_RGB BIT(4)
+#define ST7789V_RAMCTRL_DM_RGB BIT(0)
+#define ST7789V_RAMCTRL_MAGIC (3 << 6)
+#define ST7789V_RAMCTRL_EPF(n) (((n) & 3) << 4)
+
+#define ST7789V_RGBCTRL_CMD 0xb1
+#define ST7789V_RGBCTRL_WO BIT(7)
+#define ST7789V_RGBCTRL_RCM(n) (((n) & 3) << 5)
+#define ST7789V_RGBCTRL_VSYNC_HIGH BIT(3)
+#define ST7789V_RGBCTRL_HSYNC_HIGH BIT(2)
+#define ST7789V_RGBCTRL_PCLK_HIGH BIT(1)
+#define ST7789V_RGBCTRL_VBP(n) ((n) & 0x7f)
+#define ST7789V_RGBCTRL_HBP(n) ((n) & 0x1f)
+
+#define ST7789V_PORCTRL_CMD 0xb2
+#define ST7789V_PORCTRL_IDLE_BP(n) (((n) & 0xf) << 4)
+#define ST7789V_PORCTRL_IDLE_FP(n) ((n) & 0xf)
+#define ST7789V_PORCTRL_PARTIAL_BP(n) (((n) & 0xf) << 4)
+#define ST7789V_PORCTRL_PARTIAL_FP(n) ((n) & 0xf)
+
+#define ST7789V_GCTRL_CMD 0xb7
+#define ST7789V_GCTRL_VGHS(n) (((n) & 7) << 4)
+#define ST7789V_GCTRL_VGLS(n) ((n) & 7)
+
+#define ST7789V_VCOMS_CMD 0xbb
+
+#define ST7789V_LCMCTRL_CMD 0xc0
+#define ST7789V_LCMCTRL_XBGR BIT(5)
+#define ST7789V_LCMCTRL_XMX BIT(3)
+#define ST7789V_LCMCTRL_XMH BIT(2)
+
+#define ST7789V_VDVVRHEN_CMD 0xc2
+#define ST7789V_VDVVRHEN_CMDEN BIT(0)
+
+#define ST7789V_VRHS_CMD 0xc3
+
+#define ST7789V_VDVS_CMD 0xc4
+
+#define ST7789V_FRCTRL2_CMD 0xc6
+
+#define ST7789V_PWCTRL1_CMD 0xd0
+#define ST7789V_PWCTRL1_MAGIC 0xa4
+#define ST7789V_PWCTRL1_AVDD(n) (((n) & 3) << 6)
+#define ST7789V_PWCTRL1_AVCL(n) (((n) & 3) << 4)
+#define ST7789V_PWCTRL1_VDS(n) ((n) & 3)
+
+#define ST7789V_PVGAMCTRL_CMD 0xe0
+#define ST7789V_PVGAMCTRL_JP0(n) (((n) & 3) << 4)
+#define ST7789V_PVGAMCTRL_JP1(n) (((n) & 3) << 4)
+#define ST7789V_PVGAMCTRL_VP0(n) ((n) & 0xf)
+#define ST7789V_PVGAMCTRL_VP1(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP2(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP4(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP6(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP13(n) ((n) & 0xf)
+#define ST7789V_PVGAMCTRL_VP20(n) ((n) & 0x7f)
+#define ST7789V_PVGAMCTRL_VP27(n) ((n) & 7)
+#define ST7789V_PVGAMCTRL_VP36(n) (((n) & 7) << 4)
+#define ST7789V_PVGAMCTRL_VP43(n) ((n) & 0x7f)
+#define ST7789V_PVGAMCTRL_VP50(n) ((n) & 0xf)
+#define ST7789V_PVGAMCTRL_VP57(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP59(n) ((n) & 0x1f)
+#define ST7789V_PVGAMCTRL_VP61(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP62(n) ((n) & 0x3f)
+#define ST7789V_PVGAMCTRL_VP63(n) (((n) & 0xf) << 4)
+
+#define ST7789V_NVGAMCTRL_CMD 0xe1
+#define ST7789V_NVGAMCTRL_JN0(n) (((n) & 3) << 4)
+#define ST7789V_NVGAMCTRL_JN1(n) (((n) & 3) << 4)
+#define ST7789V_NVGAMCTRL_VN0(n) ((n) & 0xf)
+#define ST7789V_NVGAMCTRL_VN1(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN2(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN4(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN6(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN13(n) ((n) & 0xf)
+#define ST7789V_NVGAMCTRL_VN20(n) ((n) & 0x7f)
+#define ST7789V_NVGAMCTRL_VN27(n) ((n) & 7)
+#define ST7789V_NVGAMCTRL_VN36(n) (((n) & 7) << 4)
+#define ST7789V_NVGAMCTRL_VN43(n) ((n) & 0x7f)
+#define ST7789V_NVGAMCTRL_VN50(n) ((n) & 0xf)
+#define ST7789V_NVGAMCTRL_VN57(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN59(n) ((n) & 0x1f)
+#define ST7789V_NVGAMCTRL_VN61(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN62(n) ((n) & 0x3f)
+#define ST7789V_NVGAMCTRL_VN63(n) (((n) & 0xf) << 4)
+
+#define ST7789V_TEST(val, func) \
+ do { \
+ if ((val = (func))) \
+ return val; \
+ } while (0)
+
+struct st7789v {
+ struct drm_panel panel;
+ struct spi_device *spi;
+ struct gpio_desc *reset;
+ struct backlight_device *backlight;
+ struct regulator *power;
+};
+
+enum st7789v_prefix {
+ ST7789V_COMMAND = 0,
+ ST7789V_DATA = 1,
+};
+
+static inline struct st7789v *panel_to_st7789v(struct drm_panel *panel)
+{
+ return container_of(panel, struct st7789v, panel);
+}
+
+static int st7789v_spi_write(struct st7789v *ctx, enum st7789v_prefix prefix,
+ u8 data)
+{
+ struct spi_transfer xfer = { };
+ struct spi_message msg;
+ u16 txbuf = ((prefix & 1) << 8) | data;
+
+ spi_message_init(&msg);
+
+ xfer.tx_buf = &txbuf;
+ xfer.bits_per_word = 9;
+ xfer.len = sizeof(txbuf);
+
+ spi_message_add_tail(&xfer, &msg);
+ return spi_sync(ctx->spi, &msg);
+}
+
+static int st7789v_write_command(struct st7789v *ctx, u8 cmd)
+{
+ return st7789v_spi_write(ctx, ST7789V_COMMAND, cmd);
+}
+
+static int st7789v_write_data(struct st7789v *ctx, u8 cmd)
+{
+ return st7789v_spi_write(ctx, ST7789V_DATA, cmd);
+}
+
+static const struct drm_display_mode default_mode = {
+ .clock = 7000,
+ .hdisplay = 240,
+ .hsync_start = 240 + 38,
+ .hsync_end = 240 + 38 + 10,
+ .htotal = 240 + 38 + 10 + 10,
+ .vdisplay = 320,
+ .vsync_start = 320 + 8,
+ .vsync_end = 320 + 8 + 4,
+ .vtotal = 320 + 8 + 4 + 4,
+ .vrefresh = 60,
+};
+
+static int st7789v_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.width_mm = 61;
+ panel->connector->display_info.height_mm = 103;
+
+ return 1;
+}
+
+static int st7789v_prepare(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ int ret;
+
+ ret = regulator_enable(ctx->power);
+ if (ret)
+ return ret;
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(30);
+ gpiod_set_value(ctx->reset, 0);
+ msleep(120);
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_EXIT_SLEEP_MODE));
+
+ /* We need to wait 120ms after a sleep out command */
+ msleep(120);
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx,
+ MIPI_DCS_SET_ADDRESS_MODE));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx,
+ MIPI_DCS_SET_PIXEL_FORMAT));
+ ST7789V_TEST(ret, st7789v_write_data(ctx,
+ (MIPI_DCS_PIXEL_FMT_18BIT << 4) |
+ (MIPI_DCS_PIXEL_FMT_18BIT)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PORCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xc));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PORCTRL_IDLE_BP(3) |
+ ST7789V_PORCTRL_IDLE_FP(3)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx,
+ ST7789V_PORCTRL_PARTIAL_BP(3) |
+ ST7789V_PORCTRL_PARTIAL_FP(3)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_GCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_GCTRL_VGLS(5) |
+ ST7789V_GCTRL_VGHS(3)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VCOMS_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0x2b));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_LCMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_LCMCTRL_XMH |
+ ST7789V_LCMCTRL_XMX |
+ ST7789V_LCMCTRL_XBGR));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVVRHEN_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_VDVVRHEN_CMDEN));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VRHS_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_VDVS_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0x20));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_FRCTRL2_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, 0xf));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PWCTRL1_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_MAGIC));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PWCTRL1_AVDD(2) |
+ ST7789V_PWCTRL1_AVCL(2) |
+ ST7789V_PWCTRL1_VDS(1)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_PVGAMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP63(0xd)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP1(0xca)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP2(0xe)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP4(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP6(9)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP13(7)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP20(0x2d)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP27(0xb) |
+ ST7789V_PVGAMCTRL_VP36(3)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP43(0x3d)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_JP1(3) |
+ ST7789V_PVGAMCTRL_VP50(4)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP57(0xa)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP59(0xa)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP61(0x1b)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_PVGAMCTRL_VP62(0x28)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_NVGAMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN63(0xd)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN1(0xca)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN2(0xf)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN4(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN6(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN13(7)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN20(0x2e)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN27(0xc) |
+ ST7789V_NVGAMCTRL_VN36(5)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN43(0x40)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_JN1(3) |
+ ST7789V_NVGAMCTRL_VN50(4)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN57(9)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN59(0xb)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN61(0x1b)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_NVGAMCTRL_VN62(0x28)));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_INVERT_MODE));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RAMCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_DM_RGB |
+ ST7789V_RAMCTRL_RM_RGB));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RAMCTRL_EPF(3) |
+ ST7789V_RAMCTRL_MAGIC));
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, ST7789V_RGBCTRL_CMD));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_WO |
+ ST7789V_RGBCTRL_RCM(2) |
+ ST7789V_RGBCTRL_VSYNC_HIGH |
+ ST7789V_RGBCTRL_HSYNC_HIGH |
+ ST7789V_RGBCTRL_PCLK_HIGH));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_VBP(8)));
+ ST7789V_TEST(ret, st7789v_write_data(ctx, ST7789V_RGBCTRL_HBP(20)));
+
+ return 0;
+}
+
+static int st7789v_enable(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+
+ if (ctx->backlight) {
+ ctx->backlight->props.state &= ~BL_CORE_FBBLANK;
+ ctx->backlight->props.power = FB_BLANK_UNBLANK;
+ backlight_update_status(ctx->backlight);
+ }
+
+ return st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_ON);
+}
+
+static int st7789v_disable(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ int ret;
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_SET_DISPLAY_OFF));
+
+ if (ctx->backlight) {
+ ctx->backlight->props.power = FB_BLANK_POWERDOWN;
+ ctx->backlight->props.state |= BL_CORE_FBBLANK;
+ backlight_update_status(ctx->backlight);
+ }
+
+ return 0;
+}
+
+static int st7789v_unprepare(struct drm_panel *panel)
+{
+ struct st7789v *ctx = panel_to_st7789v(panel);
+ int ret;
+
+ ST7789V_TEST(ret, st7789v_write_command(ctx, MIPI_DCS_ENTER_SLEEP_MODE));
+
+ regulator_disable(ctx->power);
+
+ return 0;
+}
+
+static const struct drm_panel_funcs st7789v_drm_funcs = {
+ .disable = st7789v_disable,
+ .enable = st7789v_enable,
+ .get_modes = st7789v_get_modes,
+ .prepare = st7789v_prepare,
+ .unprepare = st7789v_unprepare,
+};
+
+static int st7789v_probe(struct spi_device *spi)
+{
+ struct device_node *backlight;
+ struct st7789v *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&spi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ctx);
+ ctx->spi = spi;
+
+ ctx->panel.dev = &spi->dev;
+ ctx->panel.funcs = &st7789v_drm_funcs;
+
+ ctx->power = devm_regulator_get(&spi->dev, "power");
+ if (IS_ERR(ctx->power))
+ return PTR_ERR(ctx->power);
+
+ ctx->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ dev_err(&spi->dev, "Couldn't get our reset line\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ backlight = of_parse_phandle(spi->dev.of_node, "backlight", 0);
+ if (backlight) {
+ ctx->backlight = of_find_backlight_by_node(backlight);
+ of_node_put(backlight);
+
+ if (!ctx->backlight)
+ return -EPROBE_DEFER;
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ goto err_free_backlight;
+
+ return 0;
+
+err_free_backlight:
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return ret;
+}
+
+static int st7789v_remove(struct spi_device *spi)
+{
+ struct st7789v *ctx = spi_get_drvdata(spi);
+
+ drm_panel_detach(&ctx->panel);
+ drm_panel_remove(&ctx->panel);
+
+ if (ctx->backlight)
+ put_device(&ctx->backlight->dev);
+
+ return 0;
+}
+
+static const struct of_device_id st7789v_of_match[] = {
+ { .compatible = "sitronix,st7789v" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, st7789v_of_match);
+
+static struct spi_driver st7789v_driver = {
+ .probe = st7789v_probe,
+ .remove = st7789v_remove,
+ .driver = {
+ .name = "st7789v",
+ .of_match_table = st7789v_of_match,
+ },
+};
+module_spi_driver(st7789v_driver);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Sitronix st7789v LCD Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 7d1cab57c89e..0fdedee4509d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -393,6 +393,7 @@ static struct ttm_bo_driver qxl_bo_driver = {
.verify_access = &qxl_verify_access,
.io_mem_reserve = &qxl_ttm_io_mem_reserve,
.io_mem_free = &qxl_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &qxl_bo_move_notify,
};
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index aefca0b03f38..c31e660e35db 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3295,7 +3295,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
mem_trp = ((temp >> 8) & 0x7) + 1;
mem_tras = ((temp >> 11) & 0xf) + 4;
} else if (rdev->family == CHIP_RV350 ||
- rdev->family <= CHIP_RV380) {
+ rdev->family == CHIP_RV380) {
/* rv3x0 */
mem_trcd = (temp & 0x7) + 3;
mem_trp = ((temp >> 8) & 0x7) + 3;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e400dc414e3..c1c8e2208a21 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -499,6 +499,7 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ unsigned prime_shared_count;
/* list of all virtual address to which this bo
* is associated to
*/
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index a8442f7196d6..df6b58c08544 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -164,6 +164,16 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].allowed_domains = domain;
}
+ /* Objects shared as dma-bufs cannot be moved to VRAM */
+ if (p->relocs[i].robj->prime_shared_count) {
+ p->relocs[i].allowed_domains &= ~RADEON_GEM_DOMAIN_VRAM;
+ if (!p->relocs[i].allowed_domains) {
+ DRM_ERROR("BO associated with dma-buf cannot "
+ "be moved to VRAM\n");
+ return -EINVAL;
+ }
+ }
+
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 981385eb5389..17d3dafc8319 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -1355,6 +1355,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
+ /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */
+ if (obj->import_attach) {
+ DRM_DEBUG_KMS("Cannot create framebuffer from imported dma_buf\n");
+ return ERR_PTR(-EINVAL);
+ }
+
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
if (radeon_fb == NULL) {
drm_gem_object_unreference_unlocked(obj);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 9b0b123ce079..dddb372de2b9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -120,6 +120,10 @@ static int radeon_gem_set_domain(struct drm_gem_object *gobj,
return r;
}
}
+ if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) {
+ /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */
+ return -EINVAL;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 74b276060c20..bec2ec056de4 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -352,6 +352,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
return 0;
}
+ if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
+ /* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
+ return -EINVAL;
+ }
+
radeon_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
/* force to pin into visible video ram */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index f3609c97496b..7110d403322c 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -77,6 +77,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
list_add_tail(&bo->list, &rdev->gem.objects);
mutex_unlock(&rdev->gem.mutex);
+ bo->prime_shared_count = 1;
return &bo->gem_base;
}
@@ -91,6 +92,9 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
/* pin buffer into GTT */
ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
+ if (likely(ret == 0))
+ bo->prime_shared_count++;
+
radeon_bo_unreserve(bo);
return ret;
}
@@ -105,6 +109,8 @@ void radeon_gem_prime_unpin(struct drm_gem_object *obj)
return;
radeon_bo_unpin(bo);
+ if (bo->prime_shared_count)
+ bo->prime_shared_count--;
radeon_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index aaa3e80fecb4..8b7623b5a624 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -873,6 +873,7 @@ static struct ttm_bo_driver radeon_bo_driver = {
.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
.io_mem_reserve = &radeon_ttm_io_mem_reserve,
.io_mem_free = &radeon_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
int radeon_ttm_init(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 4c2fd056dd6d..8a50dab19e5c 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -11,15 +11,17 @@ config DRM_RCAR_DU
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
-config DRM_RCAR_HDMI
- bool "R-Car DU HDMI Encoder Support"
- depends on DRM_RCAR_DU
+config DRM_RCAR_DW_HDMI
+ tristate "R-Car DU Gen3 HDMI Encoder Support"
+ depends on DRM && OF
+ select DRM_DW_HDMI
help
- Enable support for external HDMI encoders.
+ Enable support for R-Car Gen3 internal HDMI encoder.
config DRM_RCAR_LVDS
bool "R-Car DU LVDS Encoder Support"
depends on DRM_RCAR_DU
+ select DRM_PANEL
help
Enable support for the R-Car Display Unit embedded LVDS encoders.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index d3b44651061a..2131e722de3b 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -4,13 +4,11 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_group.o \
rcar_du_kms.o \
rcar_du_lvdscon.o \
- rcar_du_plane.o \
- rcar_du_vgacon.o
-
-rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o
+ rcar_du_plane.o
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
+obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index edcbe2e3625d..4ed6f2340af0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -106,9 +106,62 @@ static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
* Hardware Setup
*/
+struct dpll_info {
+ unsigned int output;
+ unsigned int fdpll;
+ unsigned int n;
+ unsigned int m;
+};
+
+static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
+ struct dpll_info *dpll,
+ unsigned long input,
+ unsigned long target)
+{
+ unsigned long best_diff = (unsigned long)-1;
+ unsigned long diff;
+ unsigned int fdpll;
+ unsigned int m;
+ unsigned int n;
+
+ for (n = 39; n < 120; n++) {
+ for (m = 0; m < 4; m++) {
+ for (fdpll = 1; fdpll < 32; fdpll++) {
+ unsigned long output;
+
+ /* 1/2 (FRQSEL=1) for duty rate 50% */
+ output = input * (n + 1) / (m + 1)
+ / (fdpll + 1) / 2;
+
+ if (output >= 400000000)
+ continue;
+
+ diff = abs((long)output - (long)target);
+ if (best_diff > diff) {
+ best_diff = diff;
+ dpll->n = n;
+ dpll->m = m;
+ dpll->fdpll = fdpll;
+ dpll->output = output;
+ }
+
+ if (diff == 0)
+ goto done;
+ }
+ }
+ }
+
+done:
+ dev_dbg(rcrtc->group->dev->dev,
+ "output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
+ dpll->output, dpll->fdpll, dpll->n, dpll->m,
+ best_diff);
+}
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
unsigned long mode_clock = mode->clock * 1000;
unsigned long clk;
u32 value;
@@ -124,12 +177,18 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = div | ESCR_DCLKSEL_CLKS;
if (rcrtc->extclock) {
+ struct dpll_info dpll = { 0 };
unsigned long extclk;
unsigned long extrate;
unsigned long rate;
u32 extdiv;
extclk = clk_get_rate(rcrtc->extclock);
+ if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
+ rcar_du_dpll_divider(rcrtc, &dpll, extclk, mode_clock);
+ extclk = dpll.output;
+ }
+
extdiv = DIV_ROUND_CLOSEST(extclk, mode_clock);
extdiv = clamp(extdiv, 1U, 64U) - 1;
@@ -140,7 +199,27 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
abs((long)rate - (long)mode_clock)) {
dev_dbg(rcrtc->group->dev->dev,
"crtc%u: using external clock\n", rcrtc->index);
- escr = extdiv | ESCR_DCLKSEL_DCLKIN;
+
+ if (rcdu->info->dpll_ch & (1 << rcrtc->index)) {
+ u32 dpllcr = DPLLCR_CODE | DPLLCR_CLKE
+ | DPLLCR_FDPLL(dpll.fdpll)
+ | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ | DPLLCR_STBY;
+
+ if (rcrtc->index == 1)
+ dpllcr |= DPLLCR_PLCS1
+ | DPLLCR_INCS_DOTCLKIN1;
+ else
+ dpllcr |= DPLLCR_PLCS0
+ | DPLLCR_INCS_DOTCLKIN0;
+
+ rcar_du_group_write(rcrtc->group, DPLLCR,
+ dpllcr);
+
+ escr = ESCR_DCLKSEL_DCLKIN | 1;
+ } else {
+ escr = ESCR_DCLKSEL_DCLKIN | extdiv;
+ }
}
}
@@ -488,22 +567,29 @@ static void rcar_du_crtc_disable(struct drm_crtc *crtc)
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
+ spin_lock_irq(&crtc->dev->event_lock);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
rcrtc->outputs = 0;
}
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_pending_vblank_event *event = crtc->state->event;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
unsigned long flags;
- if (event) {
+ if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
spin_lock_irqsave(&dev->event_lock, flags);
- rcrtc->event = event;
+ rcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index a7194812997e..15871fae7445 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -1,7 +1,7 @@
/*
* rcar_du_crtc.h -- R-Car Display Unit CRTCs
*
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
+ * Copyright (C) 2013-2015 Renesas Electronics Corporation
*
* Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
*
@@ -61,6 +61,8 @@ enum rcar_du_output {
RCAR_DU_OUTPUT_DPAD1,
RCAR_DU_OUTPUT_LVDS0,
RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_HDMI0,
+ RCAR_DU_OUTPUT_HDMI1,
RCAR_DU_OUTPUT_TCON,
RCAR_DU_OUTPUT_MAX,
};
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 62a3b3e32153..d6a0255181cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -44,12 +44,10 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1) | BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
@@ -68,17 +66,14 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2) | BIT(1) | BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 1,
},
[RCAR_DU_OUTPUT_LVDS1] = {
.possible_crtcs = BIT(2) | BIT(1),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 2,
},
},
@@ -97,12 +92,10 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(1) | BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 1,
},
},
@@ -118,12 +111,10 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
/* R8A7792 has two RGB outputs. */
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
@@ -141,12 +132,10 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_DPAD1] = {
.possible_crtcs = BIT(1),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 1,
},
},
@@ -160,21 +149,28 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
| RCAR_DU_FEATURE_VSP1_SOURCE,
.num_crtcs = 4,
.routes = {
- /* R8A7795 has one RGB output, one LVDS output and two
- * (currently unsupported) HDMI outputs.
+ /* R8A7795 has one RGB output, two HDMI outputs and one
+ * LVDS output.
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(3),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
+ [RCAR_DU_OUTPUT_HDMI0] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ [RCAR_DU_OUTPUT_HDMI1] = {
+ .possible_crtcs = BIT(2),
+ .port = 2,
+ },
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 3,
},
},
.num_lvds = 1,
+ .dpll_ch = BIT(1) | BIT(2),
};
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
@@ -189,12 +185,10 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
*/
[RCAR_DU_OUTPUT_DPAD0] = {
.possible_crtcs = BIT(2),
- .encoder_type = DRM_MODE_ENCODER_NONE,
.port = 0,
},
[RCAR_DU_OUTPUT_LVDS0] = {
.possible_crtcs = BIT(0),
- .encoder_type = DRM_MODE_ENCODER_LVDS,
.port = 2,
},
},
@@ -318,10 +312,8 @@ static int rcar_du_probe(struct platform_device *pdev)
if (rcdu == NULL)
return -ENOMEM;
- init_waitqueue_head(&rcdu->commit.wait);
-
rcdu->dev = &pdev->dev;
- rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
+ rcdu->info = of_device_get_match_data(rcdu->dev);
platform_set_drvdata(pdev, rcdu);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index c843c3134498..f8cd79488ece 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -38,7 +38,6 @@ struct rcar_du_lvdsenc;
/*
* struct rcar_du_output_routing - Output routing specification
* @possible_crtcs: bitmask of possible CRTCs for the output
- * @encoder_type: DRM type of the internal encoder associated with the output
* @port: device tree port number corresponding to this output route
*
* The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
@@ -47,7 +46,6 @@ struct rcar_du_lvdsenc;
*/
struct rcar_du_output_routing {
unsigned int possible_crtcs;
- unsigned int encoder_type;
unsigned int port;
};
@@ -67,6 +65,7 @@ struct rcar_du_device_info {
unsigned int num_crtcs;
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
+ unsigned int dpll_ch;
};
#define RCAR_DU_MAX_CRTCS 4
@@ -98,11 +97,6 @@ struct rcar_du_device {
unsigned int vspd1_sink;
struct rcar_du_lvdsenc *lvds[RCAR_DU_MAX_LVDS];
-
- struct {
- wait_queue_head_t wait;
- u32 pending;
- } commit;
};
static inline bool rcar_du_has(struct rcar_du_device *rcdu,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index ab8645c57e2d..3e048dd98b64 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -16,14 +16,13 @@
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include "rcar_du_drv.h"
#include "rcar_du_encoder.h"
-#include "rcar_du_hdmienc.h"
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
#include "rcar_du_lvdsenc.h"
-#include "rcar_du_vgacon.h"
/* -----------------------------------------------------------------------------
* Encoder
@@ -33,6 +32,11 @@ static void rcar_du_encoder_disable(struct drm_encoder *encoder)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ if (renc->connector && renc->connector->panel) {
+ drm_panel_disable(renc->connector->panel);
+ drm_panel_unprepare(renc->connector->panel);
+ }
+
if (renc->lvds)
rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, false);
}
@@ -43,6 +47,11 @@ static void rcar_du_encoder_enable(struct drm_encoder *encoder)
if (renc->lvds)
rcar_du_lvdsenc_enable(renc->lvds, encoder->crtc, true);
+
+ if (renc->connector && renc->connector->panel) {
+ drm_panel_prepare(renc->connector->panel);
+ drm_panel_enable(renc->connector->panel);
+ }
}
static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
@@ -52,30 +61,36 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
const struct drm_display_mode *mode = &crtc_state->mode;
- const struct drm_display_mode *panel_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_device *dev = encoder->dev;
- /* DAC encoders have currently no restriction on the mode. */
- if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
- return 0;
-
- if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "encoder: empty modes list\n");
- return -EINVAL;
+ /*
+ * Only panel-related encoder types require validation here, everything
+ * else is handled by the bridge drivers.
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ const struct drm_display_mode *panel_mode;
+
+ if (list_empty(&connector->modes)) {
+ dev_dbg(dev->dev, "encoder: empty modes list\n");
+ return -EINVAL;
+ }
+
+ panel_mode = list_first_entry(&connector->modes,
+ struct drm_display_mode, head);
+
+ /* We're not allowed to modify the resolution. */
+ if (mode->hdisplay != panel_mode->hdisplay ||
+ mode->vdisplay != panel_mode->vdisplay)
+ return -EINVAL;
+
+ /*
+ * The flat panel mode is fixed, just copy it to the adjusted
+ * mode.
+ */
+ drm_mode_copy(adjusted_mode, panel_mode);
}
- panel_mode = list_first_entry(&connector->modes,
- struct drm_display_mode, head);
-
- /* We're not allowed to modify the resolution. */
- if (mode->hdisplay != panel_mode->hdisplay ||
- mode->vdisplay != panel_mode->vdisplay)
- return -EINVAL;
-
- /* The flat panel mode is fixed, just copy it to the adjusted mode. */
- drm_mode_copy(adjusted_mode, panel_mode);
-
if (renc->lvds)
rcar_du_lvdsenc_atomic_check(renc->lvds, adjusted_mode);
@@ -83,16 +98,54 @@ static int rcar_du_encoder_atomic_check(struct drm_encoder *encoder,
}
static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct drm_display_info *info = &conn_state->connector->display_info;
+ enum rcar_lvds_mode mode;
+
+ rcar_du_crtc_route_output(crtc_state->crtc, renc->output);
+
+ if (!renc->lvds) {
+ /*
+ * The DU driver creates connectors only for the outputs of the
+ * internal LVDS encoders.
+ */
+ renc->connector = NULL;
+ return;
+ }
+
+ renc->connector = to_rcar_connector(conn_state->connector);
+
+ if (!info->num_bus_formats || !info->bus_formats) {
+ dev_err(encoder->dev->dev, "no LVDS bus format reported\n");
+ return;
+ }
+
+ switch (info->bus_formats[0]) {
+ case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
+ case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
+ mode = RCAR_LVDS_MODE_JEIDA;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
+ mode = RCAR_LVDS_MODE_VESA;
+ break;
+ default:
+ dev_err(encoder->dev->dev,
+ "unsupported LVDS bus format 0x%04x\n",
+ info->bus_formats[0]);
+ return;
+ }
+
+ if (info->bus_flags & DRM_BUS_FLAG_DATA_LSB_TO_MSB)
+ mode |= RCAR_LVDS_MODE_MIRROR;
- rcar_du_crtc_route_output(encoder->crtc, renc->output);
+ rcar_du_lvdsenc_set_mode(renc->lvds, mode);
}
static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .mode_set = rcar_du_encoder_mode_set,
+ .atomic_mode_set = rcar_du_encoder_mode_set,
.disable = rcar_du_encoder_disable,
.enable = rcar_du_encoder_enable,
.atomic_check = rcar_du_encoder_atomic_check,
@@ -103,14 +156,13 @@ static const struct drm_encoder_funcs encoder_funcs = {
};
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
- enum rcar_du_encoder_type type,
enum rcar_du_output output,
struct device_node *enc_node,
struct device_node *con_node)
{
struct rcar_du_encoder *renc;
struct drm_encoder *encoder;
- unsigned int encoder_type;
+ struct drm_bridge *bridge = NULL;
int ret;
renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
@@ -133,52 +185,51 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
break;
}
- switch (type) {
- case RCAR_DU_ENCODER_VGA:
- encoder_type = DRM_MODE_ENCODER_DAC;
- break;
- case RCAR_DU_ENCODER_LVDS:
- encoder_type = DRM_MODE_ENCODER_LVDS;
- break;
- case RCAR_DU_ENCODER_HDMI:
- encoder_type = DRM_MODE_ENCODER_TMDS;
- break;
- case RCAR_DU_ENCODER_NONE:
- default:
- /* No external encoder, use the internal encoder type. */
- encoder_type = rcdu->info->routes[output].encoder_type;
- break;
- }
+ if (enc_node) {
+ dev_dbg(rcdu->dev, "initializing encoder %s for output %u\n",
+ of_node_full_name(enc_node), output);
- if (type == RCAR_DU_ENCODER_HDMI) {
- ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
- if (ret < 0)
+ /* Locate the DRM bridge from the encoder DT node. */
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge) {
+ ret = -EPROBE_DEFER;
goto done;
+ }
} else {
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- encoder_type, NULL);
- if (ret < 0)
- goto done;
-
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+ dev_dbg(rcdu->dev,
+ "initializing internal encoder for output %u\n",
+ output);
}
- switch (encoder_type) {
- case DRM_MODE_ENCODER_LVDS:
- ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
- break;
-
- case DRM_MODE_ENCODER_DAC:
- ret = rcar_du_vga_connector_init(rcdu, renc);
- break;
-
- case DRM_MODE_ENCODER_TMDS:
- /* connector managed by the bridge driver */
- break;
-
- default:
- ret = -EINVAL;
- break;
+ ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret < 0)
+ goto done;
+
+ drm_encoder_helper_add(encoder, &encoder_helper_funcs);
+
+ if (bridge) {
+ /*
+ * Attach the bridge to the encoder. The bridge will create the
+ * connector.
+ */
+ ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (ret) {
+ drm_encoder_cleanup(encoder);
+ return ret;
+ }
+ } else {
+ /* There's no bridge, create the connector manually. */
+ switch (output) {
+ case RCAR_DU_OUTPUT_LVDS0:
+ case RCAR_DU_OUTPUT_LVDS1:
+ ret = rcar_du_lvds_connector_init(rcdu, renc, con_node);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
}
done:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index a050a3699857..5422fa4df272 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -17,22 +17,14 @@
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
+struct drm_panel;
struct rcar_du_device;
-struct rcar_du_hdmienc;
struct rcar_du_lvdsenc;
-enum rcar_du_encoder_type {
- RCAR_DU_ENCODER_UNUSED = 0,
- RCAR_DU_ENCODER_NONE,
- RCAR_DU_ENCODER_VGA,
- RCAR_DU_ENCODER_LVDS,
- RCAR_DU_ENCODER_HDMI,
-};
-
struct rcar_du_encoder {
struct drm_encoder base;
enum rcar_du_output output;
- struct rcar_du_hdmienc *hdmi;
+ struct rcar_du_connector *connector;
struct rcar_du_lvdsenc *lvds;
};
@@ -44,13 +36,13 @@ struct rcar_du_encoder {
struct rcar_du_connector {
struct drm_connector connector;
struct rcar_du_encoder *encoder;
+ struct drm_panel *panel;
};
#define to_rcar_connector(c) \
container_of(c, struct rcar_du_connector, connector)
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
- enum rcar_du_encoder_type type,
enum rcar_du_output output,
struct device_node *enc_node,
struct device_node *con_node);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
deleted file mode 100644
index c4c5d1abcff8..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * R-Car Display Unit HDMI Encoder
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/slab.h>
-
-#include <drm/drmP.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_hdmienc.h"
-#include "rcar_du_lvdsenc.h"
-
-struct rcar_du_hdmienc {
- struct rcar_du_encoder *renc;
- bool enabled;
-};
-
-#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
-
-static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- if (hdmienc->renc->lvds)
- rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
- false);
-
- hdmienc->enabled = false;
-}
-
-static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- if (hdmienc->renc->lvds)
- rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
- true);
-
- hdmienc->enabled = true;
-}
-
-static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
- struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
-
- if (hdmienc->renc->lvds)
- rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
- adjusted_mode);
-
- return 0;
-}
-
-
-static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .mode_set = rcar_du_hdmienc_mode_set,
- .disable = rcar_du_hdmienc_disable,
- .enable = rcar_du_hdmienc_enable,
- .atomic_check = rcar_du_hdmienc_atomic_check,
-};
-
-static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
-{
- struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
-
- if (hdmienc->enabled)
- rcar_du_hdmienc_disable(encoder);
-
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = rcar_du_hdmienc_cleanup,
-};
-
-int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc, struct device_node *np)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct drm_bridge *bridge;
- struct rcar_du_hdmienc *hdmienc;
- int ret;
-
- hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL);
- if (hdmienc == NULL)
- return -ENOMEM;
-
- /* Locate the DRM bridge from the HDMI encoder DT node. */
- bridge = of_drm_find_bridge(np);
- if (!bridge)
- return -EPROBE_DEFER;
-
- ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
- DRM_MODE_ENCODER_TMDS, NULL);
- if (ret < 0)
- return ret;
-
- drm_encoder_helper_add(encoder, &encoder_helper_funcs);
-
- renc->hdmi = hdmienc;
- hdmienc->renc = renc;
-
- /* Link the bridge to the encoder. */
- ret = drm_bridge_attach(encoder, bridge, NULL);
- if (ret) {
- drm_encoder_cleanup(encoder);
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
deleted file mode 100644
index 2ff0128ac8e1..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * R-Car Display Unit HDMI Encoder
- *
- * Copyright (C) 2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_HDMIENC_H__
-#define __RCAR_DU_HDMIENC_H__
-
-#include <linux/module.h>
-
-struct device_node;
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
-int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc, struct device_node *np);
-#else
-static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc,
- struct device_node *np)
-{
- return -ENOSYS;
-}
-#endif
-
-#endif /* __RCAR_DU_HDMIENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index ff61f6032f2c..f4125c8ca902 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -249,18 +249,9 @@ static int rcar_du_atomic_check(struct drm_device *dev,
return rcar_du_atomic_check_planes(dev, state);
}
-struct rcar_du_commit {
- struct work_struct work;
- struct drm_device *dev;
- struct drm_atomic_state *state;
- u32 crtcs;
-};
-
-static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
+static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = commit->dev;
- struct rcar_du_device *rcdu = dev->dev_private;
- struct drm_atomic_state *old_state = commit->state;
+ struct drm_device *dev = old_state->dev;
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@@ -268,114 +259,31 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_commit_planes(dev, old_state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
+ drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
-
- drm_atomic_state_put(old_state);
-
- /* Complete the commit, wake up any waiter. */
- spin_lock(&rcdu->commit.wait.lock);
- rcdu->commit.pending &= ~commit->crtcs;
- wake_up_all_locked(&rcdu->commit.wait);
- spin_unlock(&rcdu->commit.wait.lock);
-
- kfree(commit);
-}
-
-static void rcar_du_atomic_work(struct work_struct *work)
-{
- struct rcar_du_commit *commit =
- container_of(work, struct rcar_du_commit, work);
-
- rcar_du_atomic_complete(commit);
-}
-
-static int rcar_du_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
- struct rcar_du_commit *commit;
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- unsigned int i;
- int ret;
-
- ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
- return ret;
-
- /* Allocate the commit object. */
- commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- if (commit == NULL) {
- ret = -ENOMEM;
- goto error;
- }
-
- INIT_WORK(&commit->work, rcar_du_atomic_work);
- commit->dev = dev;
- commit->state = state;
-
- /* Wait until all affected CRTCs have completed previous commits and
- * mark them as pending.
- */
- for_each_crtc_in_state(state, crtc, crtc_state, i)
- commit->crtcs |= drm_crtc_mask(crtc);
-
- spin_lock(&rcdu->commit.wait.lock);
- ret = wait_event_interruptible_locked(rcdu->commit.wait,
- !(rcdu->commit.pending & commit->crtcs));
- if (ret == 0)
- rcdu->commit.pending |= commit->crtcs;
- spin_unlock(&rcdu->commit.wait.lock);
-
- if (ret) {
- kfree(commit);
- goto error;
- }
-
- /* Swap the state, this is the point of no return. */
- drm_atomic_helper_swap_state(state, true);
-
- drm_atomic_state_get(state);
- if (nonblock)
- schedule_work(&commit->work);
- else
- rcar_du_atomic_complete(commit);
-
- return 0;
-
-error:
- drm_atomic_helper_cleanup_planes(dev, state);
- return ret;
}
/* -----------------------------------------------------------------------------
* Initialization
*/
+static const struct drm_mode_config_helper_funcs rcar_du_mode_config_helper = {
+ .atomic_commit_tail = rcar_du_atomic_commit_tail,
+};
+
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
.output_poll_changed = rcar_du_output_poll_changed,
.atomic_check = rcar_du_atomic_check,
- .atomic_commit = rcar_du_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct of_endpoint *ep)
{
- static const struct {
- const char *compatible;
- enum rcar_du_encoder_type type;
- } encoders[] = {
- { "adi,adv7123", RCAR_DU_ENCODER_VGA },
- { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
- { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
- };
-
- enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
struct device_node *connector = NULL;
struct device_node *encoder = NULL;
struct device_node *ep_node = NULL;
@@ -394,6 +302,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
return -ENODEV;
}
+ if (!of_device_is_available(entity)) {
+ dev_dbg(rcdu->dev,
+ "connected entity %s is disabled, skipping\n",
+ entity->full_name);
+ return -ENODEV;
+ }
+
entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
for_each_endpoint_of_node(entity, ep_node) {
@@ -422,30 +337,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
of_node_put(entity_ep_node);
- if (encoder) {
- /*
- * If an encoder has been found, get its type based on its
- * compatible string.
- */
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
- if (of_device_is_compatible(encoder,
- encoders[i].compatible)) {
- enc_type = encoders[i].type;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(encoders)) {
- dev_warn(rcdu->dev,
- "unknown encoder type for %s, skipping\n",
- encoder->full_name);
- of_node_put(encoder);
- of_node_put(connector);
- return -EINVAL;
- }
- } else {
+ if (!encoder) {
/*
* If no encoder has been found the entity must be the
* connector.
@@ -453,7 +345,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
connector = entity;
}
- ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
+ ret = rcar_du_encoder_init(rcdu, output, encoder, connector);
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
"failed to initialize encoder %s on output %u (%d), skipping\n",
@@ -561,6 +453,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
dev->mode_config.max_width = 4095;
dev->mode_config.max_height = 2047;
dev->mode_config.funcs = &rcar_du_mode_config_funcs;
+ dev->mode_config.helper_private = &rcar_du_mode_config_helper;
rcdu->num_crtcs = rcdu->info->num_crtcs;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 3bcfd161c53f..ee91481131ad 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -15,6 +15,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
#include <video/display_timing.h>
#include <video/of_display_timing.h>
@@ -25,47 +26,30 @@
#include "rcar_du_kms.h"
#include "rcar_du_lvdscon.h"
-struct rcar_du_lvds_connector {
- struct rcar_du_connector connector;
-
- struct {
- unsigned int width_mm; /* Panel width in mm */
- unsigned int height_mm; /* Panel height in mm */
- struct videomode mode;
- } panel;
-};
-
-#define to_rcar_lvds_connector(c) \
- container_of(c, struct rcar_du_lvds_connector, connector.connector)
-
static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
{
- struct rcar_du_lvds_connector *lvdscon =
- to_rcar_lvds_connector(connector);
- struct drm_display_mode *mode;
-
- mode = drm_mode_create(connector->dev);
- if (mode == NULL)
- return 0;
+ struct rcar_du_connector *rcon = to_rcar_connector(connector);
- mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
-
- drm_display_mode_from_videomode(&lvdscon->panel.mode, mode);
-
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_panel_get_modes(rcon->panel);
}
static const struct drm_connector_helper_funcs connector_helper_funcs = {
.get_modes = rcar_du_lvds_connector_get_modes,
};
+static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
+{
+ struct rcar_du_connector *rcon = to_rcar_connector(connector);
+
+ drm_panel_detach(rcon->panel);
+ drm_connector_cleanup(connector);
+}
+
static const struct drm_connector_funcs connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
+ .destroy = rcar_du_lvds_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -75,27 +59,19 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
const struct device_node *np)
{
struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_lvds_connector *lvdscon;
+ struct rcar_du_connector *rcon;
struct drm_connector *connector;
- struct display_timing timing;
int ret;
- lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
- if (lvdscon == NULL)
+ rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
+ if (rcon == NULL)
return -ENOMEM;
- ret = of_get_display_timing(np, "panel-timing", &timing);
- if (ret < 0)
- return ret;
-
- videomode_from_timing(&timing, &lvdscon->panel.mode);
+ connector = &rcon->connector;
- of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
- of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
-
- connector = &lvdscon->connector.connector;
- connector->display_info.width_mm = lvdscon->panel.width_mm;
- connector->display_info.height_mm = lvdscon->panel.height_mm;
+ rcon->panel = of_drm_find_panel(np);
+ if (!rcon->panel)
+ return -EPROBE_DEFER;
ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -112,7 +88,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
if (ret < 0)
return ret;
- lvdscon->connector.encoder = renc;
+ ret = drm_panel_attach(rcon->panel, connector);
+ if (ret < 0)
+ return ret;
+
+ rcon->encoder = renc;
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index e3a4985f6f3f..1661f6201210 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -31,6 +31,7 @@ struct rcar_du_lvdsenc {
bool enabled;
enum rcar_lvds_input input;
+ enum rcar_lvds_mode mode;
};
static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
@@ -61,7 +62,7 @@ static void rcar_du_lvdsenc_start_gen2(struct rcar_du_lvdsenc *lvds,
/* Select the input, hardcode mode 0, enable LVDS operation and turn
* bias circuitry on.
*/
- lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+ lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_BEN | LVDCR0_LVEN;
if (rcrtc->index == 2)
lvdcr0 |= LVDCR0_DUSEL;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
@@ -114,7 +115,7 @@ static void rcar_du_lvdsenc_start_gen3(struct rcar_du_lvdsenc *lvds,
* Turn the PLL on, set it to LVDS normal mode, wait for the startup
* delay and turn the output on.
*/
- lvdcr0 = LVDCR0_PLLON;
+ lvdcr0 = (lvds->mode << LVDCR0_LVMD_SHIFT) | LVDCR0_PLLON;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
lvdcr0 |= LVDCR0_PWD;
@@ -211,6 +212,12 @@ void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
mode->clock = clamp(mode->clock, 25175, 148500);
}
+void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
+ enum rcar_lvds_mode mode)
+{
+ lvds->mode = mode;
+}
+
static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
struct platform_device *pdev)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index dfdba746edf4..7218ac89333e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -26,8 +26,17 @@ enum rcar_lvds_input {
RCAR_LVDS_INPUT_DU2,
};
+/* Keep in sync with the LVDCR0.LVMD hardware register values. */
+enum rcar_lvds_mode {
+ RCAR_LVDS_MODE_JEIDA = 0,
+ RCAR_LVDS_MODE_MIRROR = 1,
+ RCAR_LVDS_MODE_VESA = 4,
+};
+
#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
+void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
+ enum rcar_lvds_mode mode);
int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
struct drm_crtc *crtc, bool enable);
void rcar_du_lvdsenc_atomic_check(struct rcar_du_lvdsenc *lvds,
@@ -37,6 +46,10 @@ static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
{
return 0;
}
+static inline void rcar_du_lvdsenc_set_mode(struct rcar_du_lvdsenc *lvds,
+ enum rcar_lvds_mode mode)
+{
+}
static inline int rcar_du_lvdsenc_enable(struct rcar_du_lvdsenc *lvds,
struct drm_crtc *crtc, bool enable)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index fedb0161e234..d5bae99d3cfe 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -277,6 +277,29 @@
#define DEFR10_TSEL_H3_TCON1 (0 << 1) /* DEFR102 register only (DU2/DU3) */
#define DEFR10_DEFE10 (1 << 0)
+#define DPLLCR 0x20044
+#define DPLLCR_CODE (0x95 << 24)
+#define DPLLCR_PLCS1 (1 << 23)
+/*
+ * PLCS0 is bit 21, but H3 ES1.x requires bit 20 to be set as well. As bit 20
+ * isn't implemented by other SoC in the Gen3 family it can safely be set
+ * unconditionally.
+ */
+#define DPLLCR_PLCS0 (3 << 20)
+#define DPLLCR_CLKE (1 << 18)
+#define DPLLCR_FDPLL(n) ((n) << 12)
+#define DPLLCR_N(n) ((n) << 5)
+#define DPLLCR_M(n) ((n) << 3)
+#define DPLLCR_STBY (1 << 2)
+#define DPLLCR_INCS_DOTCLKIN0 (0 << 0)
+#define DPLLCR_INCS_DOTCLKIN1 (1 << 1)
+
+#define DPLLC2R 0x20048
+#define DPLLC2R_CODE (0x95 << 24)
+#define DPLLC2R_SELC (1 << 12)
+#define DPLLC2R_M(n) ((n) << 8)
+#define DPLLC2R_FDPLL(n) ((n) << 0)
+
/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
*/
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
deleted file mode 100644
index 8d6125c1c0f9..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drmP.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "rcar_du_drv.h"
-#include "rcar_du_encoder.h"
-#include "rcar_du_kms.h"
-#include "rcar_du_vgacon.h"
-
-static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
-{
- return 0;
-}
-
-static const struct drm_connector_helper_funcs connector_helper_funcs = {
- .get_modes = rcar_du_vga_connector_get_modes,
-};
-
-static enum drm_connector_status
-rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- return connector_status_connected;
-}
-
-static const struct drm_connector_funcs connector_funcs = {
- .dpms = drm_atomic_helper_connector_dpms,
- .reset = drm_atomic_helper_connector_reset,
- .detect = rcar_du_vga_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
-{
- struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
- struct rcar_du_connector *rcon;
- struct drm_connector *connector;
- int ret;
-
- rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
- if (rcon == NULL)
- return -ENOMEM;
-
- connector = &rcon->connector;
- connector->display_info.width_mm = 0;
- connector->display_info.height_mm = 0;
- connector->interlace_allowed = true;
-
- ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
- if (ret < 0)
- return ret;
-
- drm_connector_helper_add(connector, &connector_helper_funcs);
-
- connector->dpms = DRM_MODE_DPMS_OFF;
- drm_object_property_set_value(&connector->base,
- rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
-
- ret = drm_mode_connector_attach_encoder(connector, encoder);
- if (ret < 0)
- return ret;
-
- return 0;
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
deleted file mode 100644
index 112f50316e01..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
- *
- * Copyright (C) 2013-2014 Renesas Electronics Corporation
- *
- * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __RCAR_DU_VGACON_H__
-#define __RCAR_DU_VGACON_H__
-
-struct rcar_du_device;
-struct rcar_du_encoder;
-
-int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc);
-
-#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index 510dcc9c6816..f1d0f1824528 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -68,7 +68,7 @@ void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
#else
-static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return 0; };
+static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp) { return -ENXIO; };
static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
diff --git a/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
new file mode 100644
index 000000000000..7539626b8ebd
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c
@@ -0,0 +1,100 @@
+/*
+ * R-Car Gen3 HDMI PHY
+ *
+ * Copyright (C) 2016 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <drm/bridge/dw_hdmi.h>
+
+#define RCAR_HDMI_PHY_OPMODE_PLLCFG 0x06 /* Mode of operation and PLL dividers */
+#define RCAR_HDMI_PHY_PLLCURRGMPCTRL 0x10 /* PLL current and Gmp (conductance) */
+#define RCAR_HDMI_PHY_PLLDIVCTRL 0x11 /* PLL dividers */
+
+struct rcar_hdmi_phy_params {
+ unsigned long mpixelclock;
+ u16 opmode_div; /* Mode of operation and PLL dividers */
+ u16 curr_gmp; /* PLL current and Gmp (conductance) */
+ u16 div; /* PLL dividers */
+};
+
+static const struct rcar_hdmi_phy_params rcar_hdmi_phy_params[] = {
+ { 35500000, 0x0003, 0x0344, 0x0328 },
+ { 44900000, 0x0003, 0x0285, 0x0128 },
+ { 71000000, 0x0002, 0x1184, 0x0314 },
+ { 90000000, 0x0002, 0x1144, 0x0114 },
+ { 140250000, 0x0001, 0x20c4, 0x030a },
+ { 182750000, 0x0001, 0x2084, 0x010a },
+ { 281250000, 0x0000, 0x0084, 0x0305 },
+ { 297000000, 0x0000, 0x0084, 0x0105 },
+ { ~0UL, 0x0000, 0x0000, 0x0000 },
+};
+
+static int rcar_hdmi_phy_configure(struct dw_hdmi *hdmi,
+ const struct dw_hdmi_plat_data *pdata,
+ unsigned long mpixelclock)
+{
+ const struct rcar_hdmi_phy_params *params = rcar_hdmi_phy_params;
+
+ for (; params && params->mpixelclock != ~0UL; ++params) {
+ if (mpixelclock <= params->mpixelclock)
+ break;
+ }
+
+ if (params->mpixelclock == ~0UL)
+ return -EINVAL;
+
+ dw_hdmi_phy_i2c_write(hdmi, params->opmode_div,
+ RCAR_HDMI_PHY_OPMODE_PLLCFG);
+ dw_hdmi_phy_i2c_write(hdmi, params->curr_gmp,
+ RCAR_HDMI_PHY_PLLCURRGMPCTRL);
+ dw_hdmi_phy_i2c_write(hdmi, params->div, RCAR_HDMI_PHY_PLLDIVCTRL);
+
+ return 0;
+}
+
+static const struct dw_hdmi_plat_data rcar_dw_hdmi_plat_data = {
+ .configure_phy = rcar_hdmi_phy_configure,
+};
+
+static int rcar_dw_hdmi_probe(struct platform_device *pdev)
+{
+ return dw_hdmi_probe(pdev, &rcar_dw_hdmi_plat_data);
+}
+
+static int rcar_dw_hdmi_remove(struct platform_device *pdev)
+{
+ dw_hdmi_remove(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_dw_hdmi_of_table[] = {
+ { .compatible = "renesas,rcar-gen3-hdmi" },
+ { /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rcar_dw_hdmi_of_table);
+
+static struct platform_driver rcar_dw_hdmi_platform_driver = {
+ .probe = rcar_dw_hdmi_probe,
+ .remove = rcar_dw_hdmi_remove,
+ .driver = {
+ .name = "rcar-dw-hdmi",
+ .of_match_table = rcar_dw_hdmi_of_table,
+ },
+};
+
+module_platform_driver(rcar_dw_hdmi_platform_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 HDMI Encoder Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 86279f5022c2..88f16cdf6a4b 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -66,7 +66,9 @@ static struct gdp_format_to_str {
#define GAM_GDP_ALPHARANGE_255 BIT(5)
#define GAM_GDP_AGC_FULL_RANGE 0x00808080
#define GAM_GDP_PPT_IGNORE (BIT(1) | BIT(0))
-#define GAM_GDP_SIZE_MAX 0x7FF
+
+#define GAM_GDP_SIZE_MAX_WIDTH 3840
+#define GAM_GDP_SIZE_MAX_HEIGHT 2160
#define GDP_NODE_NB_BANK 2
#define GDP_NODE_PER_FIELD 2
@@ -632,8 +634,8 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
- src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
- src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
format = sti_gdp_fourcc2format(fb->format->format);
if (format == -1) {
@@ -741,8 +743,8 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
/* src_x are in 16.16 format */
src_x = state->src_x >> 16;
src_y = state->src_y >> 16;
- src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
- src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+ src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX_WIDTH);
+ src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX_HEIGHT);
list = sti_gdp_get_free_nodes(gdp);
top_field = list->top_field;
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index d625a82a6e5f..59b757350a1f 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -1,11 +1,11 @@
-sun4i-drm-y += sun4i_crtc.o
sun4i-drm-y += sun4i_drv.o
sun4i-drm-y += sun4i_framebuffer.o
-sun4i-drm-y += sun4i_layer.o
sun4i-tcon-y += sun4i_tcon.o
sun4i-tcon-y += sun4i_rgb.o
sun4i-tcon-y += sun4i_dotclock.o
+sun4i-tcon-y += sun4i_crtc.o
+sun4i-tcon-y += sun4i_layer.o
obj-$(CONFIG_DRM_SUN4I) += sun4i-drm.o sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_backend.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 08ce15070f80..d660741ba475 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -24,7 +24,7 @@
#include "sun4i_backend.h"
#include "sun4i_drv.h"
-static u32 sunxi_rgb2yuv_coef[12] = {
+static const u32 sunxi_rgb2yuv_coef[12] = {
0x00000107, 0x00000204, 0x00000064, 0x00000108,
0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index a5d546a68e16..3c876c3a356a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -19,6 +19,7 @@
#include <linux/clk-provider.h>
#include <linux/ioport.h>
#include <linux/of_address.h>
+#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/regmap.h>
@@ -27,6 +28,7 @@
#include "sun4i_backend.h"
#include "sun4i_crtc.h"
#include "sun4i_drv.h"
+#include "sun4i_layer.h"
#include "sun4i_tcon.h"
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
@@ -50,12 +52,11 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
struct drm_pending_vblank_event *event = crtc->state->event;
DRM_DEBUG_DRIVER("Committing plane changes\n");
- sun4i_backend_commit(drv->backend);
+ sun4i_backend_commit(scrtc->backend);
if (event) {
crtc->state->event = NULL;
@@ -72,11 +73,10 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
static void sun4i_crtc_disable(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Disabling the CRTC\n");
- sun4i_tcon_disable(drv->tcon);
+ sun4i_tcon_disable(scrtc->tcon);
if (crtc->state->event && !crtc->state->active) {
spin_lock_irq(&crtc->dev->event_lock);
@@ -90,11 +90,10 @@ static void sun4i_crtc_disable(struct drm_crtc *crtc)
static void sun4i_crtc_enable(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Enabling the CRTC\n");
- sun4i_tcon_enable(drv->tcon);
+ sun4i_tcon_enable(scrtc->tcon);
}
static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
@@ -107,11 +106,10 @@ static const struct drm_crtc_helper_funcs sun4i_crtc_helper_funcs = {
static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Enabling VBLANK on crtc %p\n", crtc);
- sun4i_tcon_enable_vblank(drv->tcon, true);
+ sun4i_tcon_enable_vblank(scrtc->tcon, true);
return 0;
}
@@ -119,11 +117,10 @@ static int sun4i_crtc_enable_vblank(struct drm_crtc *crtc)
static void sun4i_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
- struct sun4i_drv *drv = scrtc->drv;
DRM_DEBUG_DRIVER("Disabling VBLANK on crtc %p\n", crtc);
- sun4i_tcon_enable_vblank(drv->tcon, false);
+ sun4i_tcon_enable_vblank(scrtc->tcon, false);
}
static const struct drm_crtc_funcs sun4i_crtc_funcs = {
@@ -137,28 +134,67 @@ static const struct drm_crtc_funcs sun4i_crtc_funcs = {
.disable_vblank = sun4i_crtc_disable_vblank,
};
-struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm)
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
+ struct sun4i_backend *backend,
+ struct sun4i_tcon *tcon)
{
- struct sun4i_drv *drv = drm->dev_private;
struct sun4i_crtc *scrtc;
- int ret;
+ struct drm_plane *primary = NULL, *cursor = NULL;
+ int ret, i;
scrtc = devm_kzalloc(drm->dev, sizeof(*scrtc), GFP_KERNEL);
if (!scrtc)
+ return ERR_PTR(-ENOMEM);
+ scrtc->backend = backend;
+ scrtc->tcon = tcon;
+
+ /* Create our layers */
+ scrtc->layers = sun4i_layers_init(drm, scrtc->backend);
+ if (IS_ERR(scrtc->layers)) {
+ dev_err(drm->dev, "Couldn't create the planes\n");
return NULL;
- scrtc->drv = drv;
+ }
+
+ /* find primary and cursor planes for drm_crtc_init_with_planes */
+ for (i = 0; scrtc->layers[i]; i++) {
+ struct sun4i_layer *layer = scrtc->layers[i];
+
+ switch (layer->plane.type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ primary = &layer->plane;
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ cursor = &layer->plane;
+ break;
+ default:
+ break;
+ }
+ }
ret = drm_crtc_init_with_planes(drm, &scrtc->crtc,
- drv->primary,
- NULL,
+ primary,
+ cursor,
&sun4i_crtc_funcs,
NULL);
if (ret) {
dev_err(drm->dev, "Couldn't init DRM CRTC\n");
- return NULL;
+ return ERR_PTR(ret);
}
drm_crtc_helper_add(&scrtc->crtc, &sun4i_crtc_helper_funcs);
+ /* Set crtc.port to output port node of the tcon */
+ scrtc->crtc.port = of_graph_get_port_by_id(scrtc->tcon->dev->of_node,
+ 1);
+
+ /* Set possible_crtcs to this crtc for overlay planes */
+ for (i = 0; scrtc->layers[i]; i++) {
+ uint32_t possible_crtcs = BIT(drm_crtc_index(&scrtc->crtc));
+ struct sun4i_layer *layer = scrtc->layers[i];
+
+ if (layer->plane.type == DRM_PLANE_TYPE_OVERLAY)
+ layer->plane.possible_crtcs = possible_crtcs;
+ }
+
return scrtc;
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.h b/drivers/gpu/drm/sun4i/sun4i_crtc.h
index dec8ce4d9b25..230cb8f0d601 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.h
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.h
@@ -17,7 +17,9 @@ struct sun4i_crtc {
struct drm_crtc crtc;
struct drm_pending_vblank_event *event;
- struct sun4i_drv *drv;
+ struct sun4i_backend *backend;
+ struct sun4i_tcon *tcon;
+ struct sun4i_layer **layers;
};
static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
@@ -25,6 +27,8 @@ static inline struct sun4i_crtc *drm_crtc_to_sun4i_crtc(struct drm_crtc *crtc)
return container_of(crtc, struct sun4i_crtc, crtc);
}
-struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm);
+struct sun4i_crtc *sun4i_crtc_init(struct drm_device *drm,
+ struct sun4i_backend *backend,
+ struct sun4i_tcon *tcon);
#endif /* _SUN4I_CRTC_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 329ea56106a5..8ddd72cd5873 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -12,6 +12,7 @@
#include <linux/component.h>
#include <linux/of_graph.h>
+#include <linux/of_reserved_mem.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -20,10 +21,9 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
-#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_framebuffer.h"
-#include "sun4i_layer.h"
+#include "sun4i_tcon.h"
DEFINE_DRM_GEM_CMA_FOPS(sun4i_drv_fops);
@@ -92,30 +92,25 @@ static int sun4i_drv_bind(struct device *dev)
}
drm->dev_private = drv;
- drm_vblank_init(drm, 1);
+ ret = of_reserved_mem_device_init(dev);
+ if (ret && ret != -ENODEV) {
+ dev_err(drm->dev, "Couldn't claim our memory region\n");
+ goto free_drm;
+ }
+
+ /* drm_vblank_init calls kcalloc, which can fail */
+ ret = drm_vblank_init(drm, 1);
+ if (ret)
+ goto free_mem_region;
+
drm_mode_config_init(drm);
ret = component_bind_all(drm->dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all pipelines components\n");
- goto free_drm;
- }
-
- /* Create our layers */
- drv->layers = sun4i_layers_init(drm);
- if (IS_ERR(drv->layers)) {
- dev_err(drm->dev, "Couldn't create the planes\n");
- ret = PTR_ERR(drv->layers);
- goto free_drm;
+ goto cleanup_mode_config;
}
- /* Create our CRTC */
- drv->crtc = sun4i_crtc_init(drm);
- if (!drv->crtc) {
- dev_err(drm->dev, "Couldn't create the CRTC\n");
- ret = -EINVAL;
- goto free_drm;
- }
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
@@ -126,7 +121,7 @@ static int sun4i_drv_bind(struct device *dev)
if (IS_ERR(drv->fbdev)) {
dev_err(drm->dev, "Couldn't create our framebuffer\n");
ret = PTR_ERR(drv->fbdev);
- goto free_drm;
+ goto cleanup_mode_config;
}
/* Enable connectors polling */
@@ -134,10 +129,18 @@ static int sun4i_drv_bind(struct device *dev)
ret = drm_dev_register(drm, 0);
if (ret)
- goto free_drm;
+ goto finish_poll;
return 0;
+finish_poll:
+ drm_kms_helper_poll_fini(drm);
+ sun4i_framebuffer_free(drm);
+cleanup_mode_config:
+ drm_mode_config_cleanup(drm);
+ drm_vblank_cleanup(drm);
+free_mem_region:
+ of_reserved_mem_device_release(dev);
free_drm:
drm_dev_unref(drm);
return ret;
@@ -150,7 +153,9 @@ static void sun4i_drv_unbind(struct device *dev)
drm_dev_unregister(drm);
drm_kms_helper_poll_fini(drm);
sun4i_framebuffer_free(drm);
+ drm_mode_config_cleanup(drm);
drm_vblank_cleanup(drm);
+ of_reserved_mem_device_release(dev);
drm_dev_unref(drm);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.h b/drivers/gpu/drm/sun4i/sun4i_drv.h
index 597353eab728..5df50126ff52 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.h
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.h
@@ -18,13 +18,9 @@
struct sun4i_drv {
struct sun4i_backend *backend;
- struct sun4i_crtc *crtc;
struct sun4i_tcon *tcon;
- struct drm_plane *primary;
struct drm_fbdev_cma *fbdev;
-
- struct sun4i_layer **layers;
};
#endif /* _SUN4I_DRV_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index 2c3beff8b53e..9872e0fc03b0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -48,5 +48,4 @@ void sun4i_framebuffer_free(struct drm_device *drm)
struct sun4i_drv *drv = drm->dev_private;
drm_fbdev_cma_fini(drv->fbdev);
- drm_mode_config_cleanup(drm);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 5d53c977bca5..f26bde5b9117 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -16,7 +16,6 @@
#include <drm/drmP.h>
#include "sun4i_backend.h"
-#include "sun4i_drv.h"
#include "sun4i_layer.h"
struct sun4i_plane_desc {
@@ -36,8 +35,7 @@ static void sun4i_backend_layer_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
- struct sun4i_drv *drv = layer->drv;
- struct sun4i_backend *backend = drv->backend;
+ struct sun4i_backend *backend = layer->backend;
sun4i_backend_layer_enable(backend, layer->id, false);
}
@@ -46,8 +44,7 @@ static void sun4i_backend_layer_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct sun4i_layer *layer = plane_to_sun4i_layer(plane);
- struct sun4i_drv *drv = layer->drv;
- struct sun4i_backend *backend = drv->backend;
+ struct sun4i_backend *backend = layer->backend;
sun4i_backend_update_layer_coord(backend, layer->id, plane);
sun4i_backend_update_layer_formats(backend, layer->id, plane);
@@ -104,9 +101,9 @@ static const struct sun4i_plane_desc sun4i_backend_planes[] = {
};
static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
+ struct sun4i_backend *backend,
const struct sun4i_plane_desc *plane)
{
- struct sun4i_drv *drv = drm->dev_private;
struct sun4i_layer *layer;
int ret;
@@ -114,7 +111,8 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
if (!layer)
return ERR_PTR(-ENOMEM);
- ret = drm_universal_plane_init(drm, &layer->plane, BIT(0),
+ /* possible crtcs are set later */
+ ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun4i_backend_layer_funcs,
plane->formats, plane->nformats,
plane->type, NULL);
@@ -125,22 +123,19 @@ static struct sun4i_layer *sun4i_layer_init_one(struct drm_device *drm,
drm_plane_helper_add(&layer->plane,
&sun4i_backend_layer_helper_funcs);
- layer->drv = drv;
-
- if (plane->type == DRM_PLANE_TYPE_PRIMARY)
- drv->primary = &layer->plane;
+ layer->backend = backend;
return layer;
}
-struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm,
+ struct sun4i_backend *backend)
{
- struct sun4i_drv *drv = drm->dev_private;
struct sun4i_layer **layers;
int i;
- layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes),
- sizeof(**layers), GFP_KERNEL);
+ layers = devm_kcalloc(drm->dev, ARRAY_SIZE(sun4i_backend_planes) + 1,
+ sizeof(*layers), GFP_KERNEL);
if (!layers)
return ERR_PTR(-ENOMEM);
@@ -167,9 +162,9 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
*/
for (i = 0; i < ARRAY_SIZE(sun4i_backend_planes); i++) {
const struct sun4i_plane_desc *plane = &sun4i_backend_planes[i];
- struct sun4i_layer *layer = layers[i];
+ struct sun4i_layer *layer;
- layer = sun4i_layer_init_one(drm, plane);
+ layer = sun4i_layer_init_one(drm, backend, plane);
if (IS_ERR(layer)) {
dev_err(drm->dev, "Couldn't initialize %s plane\n",
i ? "overlay" : "primary");
@@ -178,11 +173,12 @@ struct sun4i_layer **sun4i_layers_init(struct drm_device *drm)
DRM_DEBUG_DRIVER("Assigning %s plane to pipe %d\n",
i ? "overlay" : "primary", plane->pipe);
- regmap_update_bits(drv->backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
+ regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG0(i),
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK,
SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(plane->pipe));
layer->id = i;
+ layers[i] = layer;
};
return layers;
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.h b/drivers/gpu/drm/sun4i/sun4i_layer.h
index a2f65d7a3f4e..4be1f0919df2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.h
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.h
@@ -16,6 +16,7 @@
struct sun4i_layer {
struct drm_plane plane;
struct sun4i_drv *drv;
+ struct sun4i_backend *backend;
int id;
};
@@ -25,6 +26,7 @@ plane_to_sun4i_layer(struct drm_plane *plane)
return container_of(plane, struct sun4i_layer, plane);
}
-struct sun4i_layer **sun4i_layers_init(struct drm_device *drm);
+struct sun4i_layer **sun4i_layers_init(struct drm_device *drm,
+ struct sun4i_backend *backend);
#endif /* _SUN4I_LAYER_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index 46280dd70c9e..67f0b91a99de 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -18,7 +18,7 @@
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
-#include "sun4i_drv.h"
+#include "sun4i_crtc.h"
#include "sun4i_tcon.h"
#include "sun4i_rgb.h"
@@ -26,7 +26,7 @@ struct sun4i_rgb {
struct drm_connector connector;
struct drm_encoder encoder;
- struct sun4i_drv *drv;
+ struct sun4i_tcon *tcon;
};
static inline struct sun4i_rgb *
@@ -47,8 +47,7 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
{
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
return drm_panel_get_modes(tcon->panel);
}
@@ -57,8 +56,7 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
unsigned long rate = mode->clock * 1000;
@@ -115,8 +113,7 @@ static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
drm_panel_detach(tcon->panel);
drm_connector_cleanup(connector);
@@ -141,8 +138,7 @@ static int sun4i_rgb_atomic_check(struct drm_encoder *encoder,
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Enabling RGB output\n");
@@ -158,8 +154,7 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Disabling RGB output\n");
@@ -177,8 +172,7 @@ static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_drv *drv = rgb->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_tcon *tcon = rgb->tcon;
sun4i_tcon0_mode_set(tcon, mode);
@@ -204,10 +198,8 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
.destroy = sun4i_rgb_enc_destroy,
};
-int sun4i_rgb_init(struct drm_device *drm)
+int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
- struct sun4i_drv *drv = drm->dev_private;
- struct sun4i_tcon *tcon = drv->tcon;
struct drm_encoder *encoder;
struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
@@ -216,7 +208,7 @@ int sun4i_rgb_init(struct drm_device *drm)
rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
return -ENOMEM;
- rgb->drv = drv;
+ rgb->tcon = tcon;
encoder = &rgb->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
@@ -239,7 +231,7 @@ int sun4i_rgb_init(struct drm_device *drm)
}
/* The RGB encoder can only work with the TCON channel 0 */
- rgb->encoder.possible_crtcs = BIT(0);
+ rgb->encoder.possible_crtcs = BIT(drm_crtc_index(&tcon->crtc->crtc));
if (tcon->panel) {
drm_connector_helper_add(&rgb->connector,
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.h b/drivers/gpu/drm/sun4i/sun4i_rgb.h
index 7c4da4c8acdd..40c18f4a6c7e 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.h
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.h
@@ -13,6 +13,6 @@
#ifndef _SUN4I_RGB_H_
#define _SUN4I_RGB_H_
-int sun4i_rgb_init(struct drm_device *drm);
+int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon);
#endif /* _SUN4I_RGB_H_ */
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 2e4e365cecf9..9a83a85529ac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -142,7 +142,7 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
/*
* This is called a backporch in the register documentation,
- * but it really is the front porch + hsync
+ * but it really is the back porch + hsync
*/
bp = mode->crtc_htotal - mode->crtc_hsync_start;
DRM_DEBUG_DRIVER("Setting horizontal total %d, backporch %d\n",
@@ -155,7 +155,7 @@ void sun4i_tcon0_mode_set(struct sun4i_tcon *tcon,
/*
* This is called a backporch in the register documentation,
- * but it really is the front porch + hsync
+ * but it really is the back porch + hsync
*/
bp = mode->crtc_vtotal - mode->crtc_vsync_start;
DRM_DEBUG_DRIVER("Setting vertical total %d, backporch %d\n",
@@ -289,8 +289,7 @@ static irqreturn_t sun4i_tcon_handler(int irq, void *private)
{
struct sun4i_tcon *tcon = private;
struct drm_device *drm = tcon->drm;
- struct sun4i_drv *drv = drm->dev_private;
- struct sun4i_crtc *scrtc = drv->crtc;
+ struct sun4i_crtc *scrtc = tcon->crtc;
unsigned int status;
regmap_read(tcon->regs, SUN4I_TCON_GINT0_REG, &status);
@@ -335,12 +334,11 @@ static int sun4i_tcon_init_clocks(struct device *dev,
}
}
- return sun4i_dclk_create(dev, tcon);
+ return 0;
}
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
{
- sun4i_dclk_free(tcon);
clk_disable_unprepare(tcon->clk);
}
@@ -437,30 +435,45 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = sun4i_tcon_init_clocks(dev, tcon);
+ if (ret) {
+ dev_err(dev, "Couldn't init our TCON clocks\n");
+ goto err_assert_reset;
+ }
+
ret = sun4i_tcon_init_regmap(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON regmap\n");
- goto err_assert_reset;
+ goto err_free_clocks;
}
- ret = sun4i_tcon_init_clocks(dev, tcon);
+ ret = sun4i_dclk_create(dev, tcon);
if (ret) {
- dev_err(dev, "Couldn't init our TCON clocks\n");
- goto err_assert_reset;
+ dev_err(dev, "Couldn't create our TCON dot clock\n");
+ goto err_free_clocks;
}
ret = sun4i_tcon_init_irq(dev, tcon);
if (ret) {
dev_err(dev, "Couldn't init our TCON interrupts\n");
+ goto err_free_dotclock;
+ }
+
+ tcon->crtc = sun4i_crtc_init(drm, drv->backend, tcon);
+ if (IS_ERR(tcon->crtc)) {
+ dev_err(dev, "Couldn't create our CRTC\n");
+ ret = PTR_ERR(tcon->crtc);
goto err_free_clocks;
}
- ret = sun4i_rgb_init(drm);
+ ret = sun4i_rgb_init(drm, tcon);
if (ret < 0)
goto err_free_clocks;
return 0;
+err_free_dotclock:
+ sun4i_dclk_free(tcon);
err_free_clocks:
sun4i_tcon_free_clocks(tcon);
err_assert_reset:
@@ -473,6 +486,7 @@ static void sun4i_tcon_unbind(struct device *dev, struct device *master,
{
struct sun4i_tcon *tcon = dev_get_drvdata(dev);
+ sun4i_dclk_free(tcon);
sun4i_tcon_free_clocks(tcon);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index 166064bafe2e..f636343a935d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -169,6 +169,9 @@ struct sun4i_tcon {
/* Platform adjustments */
const struct sun4i_tcon_quirks *quirks;
+
+ /* Associated crtc */
+ struct sun4i_crtc *crtc;
};
struct drm_bridge *sun4i_tcon_find_bridge(struct device_node *node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index c6f47222e8fc..49c49431a053 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -19,9 +19,11 @@
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include "sun4i_backend.h"
+#include "sun4i_crtc.h"
#include "sun4i_drv.h"
#include "sun4i_tcon.h"
@@ -349,8 +351,9 @@ static int sun4i_tv_atomic_check(struct drm_encoder *encoder,
static void sun4i_tv_disable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_drv *drv = tv->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct sun4i_tcon *tcon = crtc->tcon;
+ struct sun4i_backend *backend = crtc->backend;
DRM_DEBUG_DRIVER("Disabling the TV Output\n");
@@ -359,18 +362,19 @@ static void sun4i_tv_disable(struct drm_encoder *encoder)
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
0);
- sun4i_backend_disable_color_correction(drv->backend);
+ sun4i_backend_disable_color_correction(backend);
}
static void sun4i_tv_enable(struct drm_encoder *encoder)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_drv *drv = tv->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct sun4i_tcon *tcon = crtc->tcon;
+ struct sun4i_backend *backend = crtc->backend;
DRM_DEBUG_DRIVER("Enabling the TV Output\n");
- sun4i_backend_apply_color_correction(drv->backend);
+ sun4i_backend_apply_color_correction(backend);
regmap_update_bits(tv->regs, SUN4I_TVE_EN_REG,
SUN4I_TVE_EN_ENABLE,
@@ -384,8 +388,8 @@ static void sun4i_tv_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct sun4i_tv *tv = drm_encoder_to_sun4i_tv(encoder);
- struct sun4i_drv *drv = tv->drv;
- struct sun4i_tcon *tcon = drv->tcon;
+ struct sun4i_crtc *crtc = drm_crtc_to_sun4i_crtc(encoder->crtc);
+ struct sun4i_tcon *tcon = crtc->tcon;
const struct tv_mode *tv_mode = sun4i_tv_find_tv_by_mode(mode);
sun4i_tcon1_mode_set(tcon, mode);
@@ -623,7 +627,12 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
goto err_disable_clk;
}
- tv->encoder.possible_crtcs = BIT(0);
+ tv->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
+ dev->of_node);
+ if (!tv->encoder.possible_crtcs) {
+ ret = -EPROBE_DEFER;
+ goto err_disable_clk;
+ }
drm_connector_helper_add(&tv->connector,
&sun4i_tv_comp_connector_helper_funcs);
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 17e62ecb5d4d..8672f5d2f237 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -619,10 +619,10 @@ static const struct dma_buf_ops tegra_gem_prime_dmabuf_ops = {
.map_dma_buf = tegra_gem_prime_map_dma_buf,
.unmap_dma_buf = tegra_gem_prime_unmap_dma_buf,
.release = tegra_gem_prime_release,
- .kmap_atomic = tegra_gem_prime_kmap_atomic,
- .kunmap_atomic = tegra_gem_prime_kunmap_atomic,
- .kmap = tegra_gem_prime_kmap,
- .kunmap = tegra_gem_prime_kunmap,
+ .map_atomic = tegra_gem_prime_kmap_atomic,
+ .unmap_atomic = tegra_gem_prime_kunmap_atomic,
+ .map = tegra_gem_prime_kmap,
+ .unmap = tegra_gem_prime_kunmap,
.mmap = tegra_gem_prime_mmap,
.vmap = tegra_gem_prime_vmap,
.vunmap = tegra_gem_prime_vunmap,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 412240a3ba90..e44626a2e698 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1020,37 +1020,44 @@ out_unlock:
return ret;
}
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem,
- uint32_t *new_flags)
+static bool ttm_bo_places_compat(const struct ttm_place *places,
+ unsigned num_placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
- int i;
-
- for (i = 0; i < placement->num_placement; i++) {
- const struct ttm_place *heap = &placement->placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
+ unsigned i;
- *new_flags = heap->flags;
- if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM))
- return true;
- }
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
- for (i = 0; i < placement->num_busy_placement; i++) {
- const struct ttm_place *heap = &placement->busy_placement[i];
- if (mem->mm_node &&
- (mem->start < heap->fpfn ||
+ if (mem->mm_node && (mem->start < heap->fpfn ||
(heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
continue;
*new_flags = heap->flags;
if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
- (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM) &&
+ (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
return true;
}
+ return false;
+}
+
+bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
+{
+ if (ttm_bo_places_compat(placement->placement, placement->num_placement,
+ mem, new_flags))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_bo_places_compat(placement->busy_placement,
+ placement->num_busy_placement,
+ mem, new_flags))
+ return true;
return false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 35ffb3754feb..9f53df95f35c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -231,7 +231,7 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
*/
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
- pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
+ pfn = bdev->driver->io_mem_pfn(bo, page_offset);
else {
page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
@@ -324,6 +324,14 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
return bo;
}
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset)
+{
+ return ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT)
+ + page_offset;
+}
+EXPORT_SYMBOL(ttm_bo_default_io_mem_pfn);
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index fdb451e3ec01..26a7ad0f4789 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@ int ttm_base_object_init(struct ttm_object_file *tfile,
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -318,7 +318,8 @@ EXPORT_SYMBOL(ttm_ref_object_exists);
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -449,10 +453,10 @@ void ttm_object_file_release(struct ttm_object_file **p_tfile)
ttm_ref_object_release(&ref->kref);
}
+ spin_unlock(&tfile->lock);
for (i = 0; i < TTM_REF_NUM; ++i)
drm_ht_remove(&tfile->ref_hash[i]);
- spin_unlock(&tfile->lock);
ttm_object_file_unref(&tfile);
}
EXPORT_SYMBOL(ttm_object_file_release);
@@ -529,9 +533,7 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
*p_tdev = NULL;
- spin_lock(&tdev->object_lock);
drm_ht_remove(&tdev->object_hash);
- spin_unlock(&tdev->object_lock);
kfree(tdev);
}
@@ -635,7 +637,7 @@ int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
index ac90ffdb5912..ed0e636243b2 100644
--- a/drivers/gpu/drm/udl/udl_dmabuf.c
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -191,10 +191,10 @@ static struct dma_buf_ops udl_dmabuf_ops = {
.detach = udl_detach_dma_buf,
.map_dma_buf = udl_map_dma_buf,
.unmap_dma_buf = udl_unmap_dma_buf,
- .kmap = udl_dmabuf_kmap,
- .kmap_atomic = udl_dmabuf_kmap_atomic,
- .kunmap = udl_dmabuf_kunmap,
- .kunmap_atomic = udl_dmabuf_kunmap_atomic,
+ .map = udl_dmabuf_kmap,
+ .map_atomic = udl_dmabuf_kmap_atomic,
+ .unmap = udl_dmabuf_kunmap,
+ .unmap_atomic = udl_dmabuf_kunmap_atomic,
.mmap = udl_dmabuf_mmap,
.release = drm_gem_dmabuf_release,
};
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 917dcb978c2c..0c87b1ac6b68 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/prefetch.h>
+#include <asm/unaligned.h>
#include <drm/drmP.h>
#include "udl_drv.h"
@@ -163,7 +164,7 @@ static void udl_compress_hline16(
const u8 *const start = pixel;
const uint16_t repeating_pixel_val16 = pixel_val16;
- *(uint16_t *)cmd = cpu_to_be16(pixel_val16);
+ put_unaligned_be16(pixel_val16, cmd);
cmd += 2;
pixel += bpp;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 70ec8ca8d9b1..4e8e27d50922 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -431,6 +431,7 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
.move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 4c7f24a67a2e..35bf781e418e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -859,4 +859,5 @@ struct ttm_bo_driver vmw_bo_driver = {
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
.io_mem_free = &vmw_ttm_io_mem_free,
+ .io_mem_pfn = ttm_bo_default_io_mem_pfn,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index f6e936d90517..4a641555b960 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -254,7 +254,7 @@ module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
-module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, S_IRUSR | S_IWUSR);
+module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b399f03a988d..6b2708b4eafe 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -538,7 +538,7 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -701,6 +701,41 @@ void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -726,12 +761,9 @@ int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- pr_err("Wait invalid fence object handle 0x%08lx\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
@@ -770,12 +802,9 @@ int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- pr_err("Fence signaled invalid fence object handle 0x%08lx\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
@@ -1022,6 +1051,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1035,24 +1065,18 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
-
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ vmw_fence_obj_lookup(tfile, arg->handle);
+
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1095,8 +1119,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03c8c54..5ec24fd801cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -186,7 +184,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
index 31fe32d8d65a..0d42a46521fc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
@@ -108,10 +108,10 @@ const struct dma_buf_ops vmw_prime_dmabuf_ops = {
.map_dma_buf = vmw_prime_map_dma_buf,
.unmap_dma_buf = vmw_prime_unmap_dma_buf,
.release = NULL,
- .kmap = vmw_prime_dmabuf_kmap,
- .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
- .kunmap = vmw_prime_dmabuf_kunmap,
- .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+ .map = vmw_prime_dmabuf_kmap,
+ .map_atomic = vmw_prime_dmabuf_kmap_atomic,
+ .unmap = vmw_prime_dmabuf_kunmap,
+ .unmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
.mmap = vmw_prime_dmabuf_mmap,
.vmap = vmw_prime_dmabuf_vmap,
.vunmap = vmw_prime_dmabuf_vunmap,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index fa1037ec8e5f..7d591f653dfa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -546,7 +546,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
@@ -730,7 +730,7 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 41b9d20d6ae7..7681341fe32b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -713,11 +713,14 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
@@ -890,17 +893,16 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
@@ -928,17 +930,14 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;