summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2014-12-19 15:33:24 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-12-19 15:33:24 +0100
commit98c3d67ba7b436f417d4356c924c33a34aae2eef (patch)
treebe4e12ed39cdb750ad3afe1f16fabf19c6fa3117
parente07869d8faf5d0632d96f0d9876db5e6d7ef461d (diff)
parent4e0cd68115620bc3236ff4e58e4c073948629b41 (diff)
Merge remote-tracking branch 'drm-upstream/drm-next' into drm-intel-nightly
-rw-r--r--Documentation/DocBook/drm.tmpl39
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt29
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.txt26
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-vop.txt58
-rw-r--r--MAINTAINERS38
-rw-r--r--Makefile2
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--block/bio-integrity.c13
-rw-r--r--drivers/acpi/video.c3
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c168
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c7
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c25
-rw-r--r--drivers/gpu/drm/drm_edid.c187
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c134
-rw-r--r--drivers/gpu/drm/drm_fops.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c24
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/cikd.h8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h43
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c130
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c107
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c74
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/sid.h8
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig17
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c551
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c201
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c294
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1455
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h201
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/Makefile4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c20
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c242
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c23
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c4
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c62
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c84
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h6
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1073
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp_lut.h373
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c18
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c17
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h3
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c104
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/i2c/busses/i2c-cadence.c11
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c10
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/iommu/Kconfig12
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/rockchip-iommu.c1038
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c10
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c96
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/pci/host/pci-tegra.c28
-rw-r--r--drivers/watchdog/s3c2410_wdt.c2
-rw-r--r--fs/fat/namei_vfat.c20
-rw-r--r--fs/jbd2/journal.c5
-rw-r--r--include/drm/drm_crtc.h38
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_mst_helper.h4
-rw-r--r--include/drm/drm_edid.h2
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h9
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--ipc/sem.c15
-rw-r--r--kernel/sched/core.c8
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/show_mem.c2
-rw-r--r--mm/frontswap.c4
-rw-r--r--mm/memory.c26
-rw-r--r--mm/mmap.c10
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmpressure.c8
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/keyctl.c56
-rw-r--r--security/keys/keyring.c10
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c1
-rw-r--r--sound/pci/hda/patch_realtek.c2
148 files changed, 7627 insertions, 717 deletions
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index bd1456ad8460..995dfb24bcb0 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -1947,10 +1947,16 @@ void intel_crt_init(struct drm_device *dev)
and then retrieves a list of modes by calling the connector
<methodname>get_modes</methodname> helper operation.
</para>
+ <para>
+ If the helper operation returns no mode, and if the connector status
+ is connector_status_connected, standard VESA DMT modes up to
+ 1024x768 are automatically added to the modes list by a call to
+ <function>drm_add_modes_noedid</function>.
+ </para>
<para>
- The function filters out modes larger than
+ The function then filters out modes larger than
<parameter>max_width</parameter> and <parameter>max_height</parameter>
- if specified. It then calls the optional connector
+ if specified. It finally calls the optional connector
<methodname>mode_valid</methodname> helper operation for each mode in
the probed list to check whether the mode is valid for the connector.
</para>
@@ -2090,12 +2096,20 @@ void intel_crt_init(struct drm_device *dev)
<synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
<para>
Fill the connector's <structfield>probed_modes</structfield> list
- by parsing EDID data with <function>drm_add_edid_modes</function> or
- calling <function>drm_mode_probed_add</function> directly for every
+ by parsing EDID data with <function>drm_add_edid_modes</function>,
+ adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
+ or calling <function>drm_mode_probed_add</function> directly for every
supported mode and return the number of modes it has detected. This
operation is mandatory.
</para>
<para>
+ Note that the caller function will automatically add standard VESA
+ DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
+ helper operation returns no mode and if the connector status is
+ connector_status_connected. There is no need to call
+ <function>drm_add_edid_modes</function> manually in that case.
+ </para>
+ <para>
When adding modes manually the driver creates each mode with a call to
<function>drm_mode_create</function> and must fill the following fields.
<itemizedlist>
@@ -2292,7 +2306,7 @@ void intel_crt_init(struct drm_device *dev)
<function>drm_helper_probe_single_connector_modes</function>.
</para>
<para>
- When parsing EDID data, <function>drm_add_edid_modes</function> fill the
+ When parsing EDID data, <function>drm_add_edid_modes</function> fills the
connector <structfield>display_info</structfield>
<structfield>width_mm</structfield> and
<structfield>height_mm</structfield> fields. When creating modes
@@ -2412,6 +2426,10 @@ void intel_crt_init(struct drm_device *dev)
!Edrivers/gpu/drm/drm_plane_helper.c
!Pdrivers/gpu/drm/drm_plane_helper.c overview
</sect2>
+ <sect2>
+ <title>Tile group</title>
+!Pdrivers/gpu/drm/drm_crtc.c Tile group
+ </sect2>
</sect1>
<!-- Internals: kms properties -->
@@ -2546,8 +2564,8 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Description/Restrictions</td>
</tr>
<tr>
- <td rowspan="23" valign="top" >DRM</td>
- <td rowspan="3" valign="top" >Generic</td>
+ <td rowspan="25" valign="top" >DRM</td>
+ <td rowspan="4" valign="top" >Generic</td>
<td valign="top" >“EDID”</td>
<td valign="top" >BLOB | IMMUTABLE</td>
<td valign="top" >0</td>
@@ -2569,6 +2587,13 @@ void intel_crt_init(struct drm_device *dev)
<td valign="top" >Contains topology path to a connector.</td>
</tr>
<tr>
+ <td valign="top" >“TILE”</td>
+ <td valign="top" >BLOB | IMMUTABLE</td>
+ <td valign="top" >0</td>
+ <td valign="top" >Connector</td>
+ <td valign="top" >Contains tiling information for a connector.</td>
+ </tr>
+ <tr>
<td rowspan="1" valign="top" >Plane</td>
<td valign="top" >“type”</td>
<td valign="top" >ENUM | IMMUTABLE</td>
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 2d150c311a05..c99eb34e640b 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -68,7 +68,7 @@ STMicroelectronics stih4xx platforms
number of clocks may depend of the SoC type.
- clock-names: names of the clocks listed in clocks property in the same
order.
- - hdmi,hpd-gpio: gpio id to detect if an hdmi cable is plugged or not.
+ - ddc: phandle of an I2C controller used for DDC EDID probing
sti-hda:
Required properties:
@@ -83,6 +83,22 @@ sti-hda:
- clock-names: names of the clocks listed in clocks property in the same
order.
+sti-hqvdp:
+ must be a child of sti-display-subsystem
+ Required properties:
+ - compatible: "st,stih<chip>-hqvdp"
+ - reg: Physical base address of the IP registers and length of memory mapped region.
+ - clocks: from common clock binding: handle hardware IP needed clocks, the
+ number of clocks may depend of the SoC type.
+ See ../clocks/clock-bindings.txt for details.
+ - clock-names: names of the clocks listed in clocks property in the same
+ order.
+ - resets: resets to be used by the device
+ See ../reset/reset.txt for details.
+ - reset-names: names of the resets listed in resets property in the same
+ order.
+ - st,vtg: phandle on vtg main device node.
+
Example:
/ {
@@ -173,7 +189,6 @@ Example:
interrupt-names = "irq";
clock-names = "pix", "tmds", "phy", "audio";
clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
- hdmi,hpd-gpio = <&PIO2 5>;
};
sti-hda@fe85a000 {
@@ -184,6 +199,16 @@ Example:
clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
};
};
+
+ sti-hqvdp@9c000000 {
+ compatible = "st,stih407-hqvdp";
+ reg = <0x9C00000 0x100000>;
+ clock-names = "hqvdp", "pix_main";
+ clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>;
+ reset-names = "hqvdp";
+ resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
+ st,vtg = <&vtg_main>;
+ };
};
...
};
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
new file mode 100644
index 000000000000..9a55ac3735e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
@@ -0,0 +1,26 @@
+Rockchip IOMMU
+==============
+
+A Rockchip DRM iommu translates io virtual addresses to physical addresses for
+its master device. Each slave device is bound to a single master device, and
+shares its clocks, power domain and irq.
+
+Required properties:
+- compatible : Should be "rockchip,iommu"
+- reg : Address space for the configuration registers
+- interrupts : Interrupt specifier for the IOMMU instance
+- interrupt-names : Interrupt name for the IOMMU instance
+- #iommu-cells : Should be <0>. This indicates the iommu is a
+ "single-master" device, and needs no additional information
+ to associate with its master device. See:
+ Documentation/devicetree/bindings/iommu/iommu.txt
+
+Example:
+
+ vopl_mmu: iommu@ff940300 {
+ compatible = "rockchip,iommu";
+ reg = <0xff940300 0x100>;
+ interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "vopl_mmu";
+ #iommu-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt
new file mode 100644
index 000000000000..7fff582495a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-drm.txt
@@ -0,0 +1,19 @@
+Rockchip DRM master device
+================================
+
+The Rockchip DRM master device is a virtual device needed to list all
+vop devices or other display interface nodes that comprise the
+graphics subsystem.
+
+Required properties:
+- compatible: Should be "rockchip,display-subsystem"
+- ports: Should contain a list of phandles pointing to display interface port
+ of vop devices. vop definitions as defined in
+ Documentation/devicetree/bindings/video/rockchip-vop.txt
+
+example:
+
+display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vopl_out>, <&vopb_out>;
+};
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt
new file mode 100644
index 000000000000..d15351f2313d
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-vop.txt
@@ -0,0 +1,58 @@
+device-tree bindings for rockchip soc display controller (vop)
+
+VOP (Visual Output Processor) is the Display Controller for the Rockchip
+series of SoCs which transfers the image data from a video memory
+buffer to an external LCD interface.
+
+Required properties:
+- compatible: value should be one of the following
+ "rockchip,rk3288-vop";
+
+- interrupts: should contain a list of all VOP IP block interrupts in the
+ order: VSYNC, LCD_SYSTEM. The interrupt specifier
+ format depends on the interrupt controller used.
+
+- clocks: must include clock specifiers corresponding to entries in the
+ clock-names property.
+
+- clock-names: Must contain
+ aclk_vop: for ddr buffer transfer.
+ hclk_vop: for ahb bus to R/W the phy regs.
+ dclk_vop: pixel clock.
+
+- resets: Must contain an entry for each entry in reset-names.
+ See ../reset/reset.txt for details.
+- reset-names: Must include the following entries:
+ - axi
+ - ahb
+ - dclk
+
+- iommus: required a iommu node
+
+- port: A port node with endpoint definitions as defined in
+ Documentation/devicetree/bindings/media/video-interfaces.txt.
+
+Example:
+SoC specific DT entry:
+ vopb: vopb@ff930000 {
+ compatible = "rockchip,rk3288-vop";
+ reg = <0xff930000 0x19c>;
+ interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
+ clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
+ resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
+ reset-names = "axi", "ahb", "dclk";
+ iommus = <&vopb_mmu>;
+ vopb_out: port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ vopb_out_edp: endpoint@0 {
+ reg = <0>;
+ remote-endpoint=<&edp_in_vopb>;
+ };
+ vopb_out_hdmi: endpoint@1 {
+ reg = <1>;
+ remote-endpoint=<&hdmi_in_vopb>;
+ };
+ };
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index 55d3e9b93338..296c02d39c29 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1838,7 +1838,7 @@ F: include/net/ax25.h
F: net/ax25/
AZ6007 DVB DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -2208,7 +2208,7 @@ F: Documentation/filesystems/btrfs.txt
F: fs/btrfs/
BTTV VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -2729,7 +2729,7 @@ F: drivers/media/common/cx2341x*
F: include/media/cx2341x*
CX88 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -3419,7 +3419,7 @@ F: fs/ecryptfs/
EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com>
M: Borislav Petkov <bp@alien8.de>
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Supported
@@ -3468,7 +3468,7 @@ S: Maintained
F: drivers/edac/e7xxx_edac.c
EDAC-GHES
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
@@ -3496,21 +3496,21 @@ S: Maintained
F: drivers/edac/i5000_edac.c
EDAC-I5400
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i5400_edac.c
EDAC-I7300
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
F: drivers/edac/i7300_edac.c
EDAC-I7CORE
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
@@ -3553,7 +3553,7 @@ S: Maintained
F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Maintained
@@ -3613,7 +3613,7 @@ S: Maintained
F: drivers/net/ethernet/ibm/ehea/
EM28XX VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -5979,7 +5979,7 @@ S: Maintained
F: drivers/media/radio/radio-maxiradio*
MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
P: LinuxTV.org Project
L: linux-media@vger.kernel.org
W: http://linuxtv.org
@@ -8030,7 +8030,7 @@ S: Odd Fixes
F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -8488,7 +8488,7 @@ S: Maintained
F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -8699,7 +8699,9 @@ S: Maintained
F: drivers/leds/leds-net48xx.c
SOFTLOGIC 6x10 MPEG CODEC
-M: Ismael Luceno <ismael.luceno@corp.bluecherry.net>
+M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
+M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
+M: Andrey Utkin <andrey.krieger.utkin@gmail.com>
L: linux-media@vger.kernel.org
S: Supported
F: drivers/media/pci/solo6x10/
@@ -9173,7 +9175,7 @@ S: Maintained
F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -9181,7 +9183,7 @@ S: Odd fixes
F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -9493,7 +9495,7 @@ F: include/linux/shmem_fs.h
F: mm/shmem.c
TM6000 VIDEO4LINUX DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
@@ -10314,7 +10316,7 @@ S: Maintained
F: arch/x86/kernel/cpu/mcheck/*
XC2028/3028 TUNER DRIVER
-M: Mauro Carvalho Chehab <m.chehab@samsung.com>
+M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
L: linux-media@vger.kernel.org
W: http://linuxtv.org
T: git git://linuxtv.org/media_tree.git
diff --git a/Makefile b/Makefile
index ce70361f766e..fd80c6e9bc23 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
NAME = Diseased Newt
# *DOCUMENTATION*
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index dd1c24ceda50..3f51cf4e8f02 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
*/
local_irq_save(flags);
local_mcck_disable();
- /*
- * Ummm... Does this make sense at all? Copying the percpu struct
- * and then zapping it one statement later?
- */
- memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
- memset(&mcck, 0, sizeof(struct mcck_struct));
+ mcck = *this_cpu_ptr(&cpu_mcck);
+ memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable();
local_irq_restore(flags);
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index be1e07d4b596..45abc363dd3e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -76,7 +76,7 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4
-RUN_SIZE = $(shell objdump -h vmlinux | \
+RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
perl $(srctree)/arch/x86/tools/calc_run_size.pl)
quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 2ce9051174e6..08fe6e8a726e 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -465,6 +465,7 @@ static void mc_bp_resume(void)
if (uci->valid && uci->mc)
microcode_ops->apply_microcode(cpu);
+#ifdef CONFIG_X86_64
else if (!uci->mc)
/*
* We might resume and not have applied late microcode but still
@@ -473,6 +474,7 @@ static void mc_bp_resume(void)
* applying patches early on the APs.
*/
load_ucode_ap();
+#endif
}
static struct syscore_ops mc_syscore_ops = {
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232e429f..5cbd5d9ea61d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
{
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
struct blk_integrity_iter iter;
- struct bio_vec *bv;
+ struct bvec_iter bviter;
+ struct bio_vec bv;
struct bio_integrity_payload *bip = bio_integrity(bio);
- unsigned int i, ret = 0;
+ unsigned int ret = 0;
void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset;
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
iter.seed = bip_get_seed(bip);
iter.prot_buf = prot_buf;
- bio_for_each_segment_all(bv, bio, i) {
- void *kaddr = kmap_atomic(bv->bv_page);
+ bio_for_each_segment(bv, bio, bviter) {
+ void *kaddr = kmap_atomic(bv.bv_page);
- iter.data_buf = kaddr + bv->bv_offset;
- iter.data_size = bv->bv_len;
+ iter.data_buf = kaddr + bv.bv_offset;
+ iter.data_size = bv.bv_len;
ret = proc_fn(&iter);
if (ret) {
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 807a88a0f394..9d75ead2a1f9 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
return true;
for (i = 0; i < video->attached_count; i++) {
- if (video->attached_array[i].bind_info == device)
+ if ((video->attached_array[i].value.int_val & 0xfff) ==
+ (device->device_id & 0xfff))
return true;
}
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index e45f83789809..49f1e6890587 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -321,6 +321,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
{ PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+ { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
@@ -492,6 +495,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
* enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
*/
{ PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+ { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 07bc7e4dbd04..65071591b143 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
host_priv->csr_base = csr_base;
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
- if (irq < 0) {
+ if (!irq) {
dev_err(&ofdev->dev, "invalid irq from platform\n");
goto error_exit_with_cleanup;
}
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 24c2d7caedd5..c3413b6adb17 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -167,6 +167,8 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
+source "drivers/gpu/drm/rockchip/Kconfig"
+
source "drivers/gpu/drm/vmwgfx/Kconfig"
source "drivers/gpu/drm/gma500/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 47d89869c5df..66e40398b3d3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
obj-$(CONFIG_DRM_GMA500) += gma500/
obj-$(CONFIG_DRM_UDL) += udl/
obj-$(CONFIG_DRM_AST) += ast/
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 102cd36799b1..4f7b275f2f7b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -102,15 +102,26 @@ struct device *kfd_chardev(void)
static int kfd_open(struct inode *inode, struct file *filep)
{
struct kfd_process *process;
+ bool is_32bit_user_mode;
if (iminor(inode) != 0)
return -ENODEV;
+ is_32bit_user_mode = is_compat_task();
+
+ if (is_32bit_user_mode == true) {
+ dev_warn(kfd_device,
+ "Process %d (32-bit) failed to open /dev/kfd\n"
+ "32-bit processes are not supported by amdkfd\n",
+ current->pid);
+ return -EPERM;
+ }
+
process = kfd_create_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
- process->is_32bit_user_mode = is_compat_task();
+ process->is_32bit_user_mode = is_32bit_user_mode;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 9abac48de499..935071410724 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -221,8 +221,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
queue_size_dwords;
if (packet_size_in_dwords >= queue_size_dwords ||
- packet_size_in_dwords >= available_size)
+ packet_size_in_dwords >= available_size) {
+ /*
+ * make sure calling functions know
+ * acquire_packet_buffer() failed
+ */
+ *buffer_ptr = NULL;
return -ENOMEM;
+ }
if (wptr + packet_size_in_dwords >= queue_size_dwords) {
while (wptr > 0) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 2458ab7c0c6e..71699ad97d74 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -32,8 +32,7 @@ int kfd_pasid_init(void)
{
pasid_limit = max_num_of_processes;
- pasid_bitmap = kzalloc(DIV_ROUND_UP(pasid_limit, BITS_PER_BYTE),
- GFP_KERNEL);
+ pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b4f49ac13334..b85eb0b830b4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -196,7 +196,7 @@ static void kfd_process_destroy_delayed(struct rcu_head *rcu)
mmdrop(p->mm);
work = (struct kfd_process_release_work *)
- kmalloc(sizeof(struct kfd_process_release_work), GFP_KERNEL);
+ kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
if (work) {
INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 2985e3ff364f..4d0baa43de58 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -725,6 +725,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
if (crtc->state && crtc->funcs->atomic_destroy_state)
crtc->funcs->atomic_destroy_state(crtc, crtc->state);
+
+ memset(crtc, 0, sizeof(*crtc));
}
EXPORT_SYMBOL(drm_crtc_cleanup);
@@ -908,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *t;
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+
list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
drm_mode_remove(connector, mode);
@@ -927,6 +934,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
if (connector->state && connector->funcs->atomic_destroy_state)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+
+ memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -1068,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
list_del(&bridge->head);
dev->mode_config.num_bridge--;
drm_modeset_unlock_all(dev);
+
+ memset(bridge, 0, sizeof(*bridge));
}
EXPORT_SYMBOL(drm_bridge_cleanup);
@@ -1134,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
kfree(encoder->name);
- encoder->name = NULL;
list_del(&encoder->head);
dev->mode_config.num_encoder--;
drm_modeset_unlock_all(dev);
+
+ memset(encoder, 0, sizeof(*encoder));
}
EXPORT_SYMBOL(drm_encoder_cleanup);
@@ -1257,6 +1269,8 @@ void drm_plane_cleanup(struct drm_plane *plane)
WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
if (plane->state && plane->funcs->atomic_destroy_state)
plane->funcs->atomic_destroy_state(plane, plane->state);
+
+ memset(plane, 0, sizeof(*plane));
}
EXPORT_SYMBOL(drm_plane_cleanup);
@@ -1339,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
"PATH", 0);
dev->mode_config.path_property = dev_path;
+ dev->mode_config.tile_property = drm_property_create(dev,
+ DRM_MODE_PROP_BLOB |
+ DRM_MODE_PROP_IMMUTABLE,
+ "TILE", 0);
+
return 0;
}
@@ -3456,7 +3475,7 @@ void drm_fb_release(struct drm_file *priv)
/*
* When the file gets released that means no one else can access the fb
- * list any more, so no need to grab fpriv->fbs_lock. And we need to to
+ * list any more, so no need to grab fpriv->fbs_lock. And we need to
* avoid upsetting lockdep since the universal cursor code adds a
* framebuffer while holding mutex locks.
*
@@ -4095,6 +4114,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_mode_connector_set_path_property);
/**
+ * drm_mode_connector_set_tile_property - set tile property on connector
+ * @connector: connector to set property on.
+ *
+ * This looks up the tile information for a connector, and creates a
+ * property for userspace to parse if it exists. The property is of
+ * the form of 8 integers using ':' as a separator.
+ *
+ * Returns:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_connector_set_tile_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ int ret, size;
+ char tile[256];
+
+ if (connector->tile_blob_ptr)
+ drm_property_destroy_blob(dev, connector->tile_blob_ptr);
+
+ if (!connector->has_tile) {
+ connector->tile_blob_ptr = NULL;
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.tile_property, 0);
+ return ret;
+ }
+
+ snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
+ connector->tile_group->id, connector->tile_is_single_monitor,
+ connector->num_h_tile, connector->num_v_tile,
+ connector->tile_h_loc, connector->tile_v_loc,
+ connector->tile_h_size, connector->tile_v_size);
+ size = strlen(tile) + 1;
+
+ connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
+ size, tile);
+ if (!connector->tile_blob_ptr)
+ return -EINVAL;
+
+ ret = drm_object_property_set_value(&connector->base,
+ dev->mode_config.tile_property,
+ connector->tile_blob_ptr->base.id);
+ return ret;
+}
+EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
+
+/**
* drm_mode_connector_update_edid_property - update the edid property of a connector
* @connector: drm connector
* @edid: new value of the edid property
@@ -5164,6 +5229,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
+ idr_init(&dev->mode_config.tile_idr);
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
@@ -5251,6 +5317,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
+ idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
}
@@ -5273,3 +5340,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
supported_rotations);
}
EXPORT_SYMBOL(drm_mode_create_rotation_property);
+
+/**
+ * DOC: Tile group
+ *
+ * Tile groups are used to represent tiled monitors with a unique
+ * integer identifier. Tiled monitors using DisplayID v1.3 have
+ * a unique 8-byte handle, we store this in a tile group, so we
+ * have a common identifier for all tiles in a monitor group.
+ */
+static void drm_tile_group_free(struct kref *kref)
+{
+ struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
+ struct drm_device *dev = tg->dev;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_remove(&dev->mode_config.tile_idr, tg->id);
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ kfree(tg);
+}
+
+/**
+ * drm_mode_put_tile_group - drop a reference to a tile group.
+ * @dev: DRM device
+ * @tg: tile group to drop reference to.
+ *
+ * drop reference to tile group and free if 0.
+ */
+void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg)
+{
+ kref_put(&tg->refcount, drm_tile_group_free);
+}
+
+/**
+ * drm_mode_get_tile_group - get a reference to an existing tile group
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Use the unique bytes to get a reference to an existing tile group.
+ *
+ * RETURNS:
+ * tile group or NULL if not found.
+ */
+struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int id;
+ mutex_lock(&dev->mode_config.idr_mutex);
+ idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
+ if (!memcmp(tg->group_data, topology, 8)) {
+ if (!kref_get_unless_zero(&tg->refcount))
+ tg = NULL;
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+ }
+ }
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return NULL;
+}
+
+/**
+ * drm_mode_create_tile_group - create a tile group from a displayid description
+ * @dev: DRM device
+ * @topology: 8-bytes unique per monitor.
+ *
+ * Create a tile group for the unique monitor, and get a unique
+ * identifier for the tile group.
+ *
+ * RETURNS:
+ * new tile group or error.
+ */
+struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8])
+{
+ struct drm_tile_group *tg;
+ int ret;
+
+ tg = kzalloc(sizeof(*tg), GFP_KERNEL);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ kref_init(&tg->refcount);
+ memcpy(tg->group_data, topology, 8);
+ tg->dev = dev;
+
+ mutex_lock(&dev->mode_config.idr_mutex);
+ ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ tg->id = ret;
+ } else {
+ kfree(tg);
+ tg = ERR_PTR(ret);
+ }
+
+ mutex_unlock(&dev->mode_config.idr_mutex);
+ return tg;
+}
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 959e2074b0d4..79968e39c8d0 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -186,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
/*
* The specification doesn't give any recommendation on how often to
- * retry native transactions, so retry 7 times like for I2C-over-AUX
- * transactions.
+ * retry native transactions. We used to retry 7 times like for
+ * aux i2c transactions but real world devices this wasn't
+ * sufficient, bump to 32 which makes Dell 4k monitors happier.
*/
- for (retry = 0; retry < 7; retry++) {
+ for (retry = 0; retry < 32; retry++) {
mutex_lock(&aux->hw_mutex);
err = aux->transfer(aux, &msg);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 5682d7e9f1ec..9a5b68717ec8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
+ struct drm_dp_mst_branch *mstb;
+
switch (old_pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV:
case DP_PEER_DEVICE_SST_SINK:
@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
drm_dp_mst_unregister_i2c_bus(&port->aux);
break;
case DP_PEER_DEVICE_MST_BRANCHING:
- drm_dp_put_mst_branch_device(port->mstb);
+ mstb = port->mstb;
port->mstb = NULL;
+ drm_dp_put_mst_branch_device(mstb);
break;
}
}
@@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref)
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
port->vcpi.num_slots = 0;
+
+ kfree(port->cached_edid);
if (port->connector)
(*port->mgr->cbs->destroy_connector)(mgr, port->connector);
drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1097,6 +1102,10 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
char proppath[255];
build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+
+ if (port->port_num >= 8) {
+ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ }
}
/* put reference to this port */
@@ -2167,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
* This returns the current connection state for a port. It validates the
* port pointer still exists so the caller doesn't require a reference
*/
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
enum drm_connector_status status = connector_status_disconnected;
@@ -2186,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
+ /* for logical ports - cache the EDID */
+ if (port->port_num >= 8 && !port->cached_edid) {
+ port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
+ }
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@@ -2217,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (!port)
return NULL;
- edid = drm_get_edid(connector, &port->aux.ddc);
+ if (port->cached_edid)
+ edid = drm_edid_duplicate(port->cached_edid);
+ else
+ edid = drm_get_edid(connector, &port->aux.ddc);
+
+ drm_mode_connector_set_tile_property(connector);
drm_dp_put_port(port);
return edid;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index a7b5a71856a7..53bc7a628909 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
+#include <drm/drm_displayid.h>
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
@@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
+static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid);
+
+static int drm_edid_block_checksum(const u8 *raw_edid)
+{
+ int i;
+ u8 csum = 0;
+ for (i = 0; i < EDID_LENGTH; i++)
+ csum += raw_edid[i];
+
+ return csum;
+}
+
+static bool drm_edid_is_zero(const u8 *in_edid, int length)
+{
+ if (memchr_inv(in_edid, 0, length))
+ return false;
+
+ return true;
+}
+
/**
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
* @raw_edid: pointer to raw EDID block
@@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup,
*/
bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
{
- int i;
- u8 csum = 0;
+ u8 csum;
struct edid *edid = (struct edid *)raw_edid;
if (WARN_ON(!raw_edid))
@@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
}
}
- for (i = 0; i < EDID_LENGTH; i++)
- csum += raw_edid[i];
+ csum = drm_edid_block_checksum(raw_edid);
if (csum) {
if (print_bad_edid) {
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
@@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
bad:
if (print_bad_edid) {
- printk(KERN_ERR "Raw EDID:\n");
- print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
+ if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
+ printk(KERN_ERR "EDID block is all zeroes\n");
+ } else {
+ printk(KERN_ERR "Raw EDID:\n");
+ print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
+ }
}
return false;
}
@@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid);
#define DDC_SEGMENT_ADDR 0x30
/**
* drm_do_probe_ddc_edid() - get EDID information via I2C
- * @adapter: I2C device adaptor
+ * @data: I2C device adapter
* @buf: EDID data buffer to be filled
* @block: 128 byte EDID block to start fetching from
* @len: EDID data buffer length to fetch
@@ -1176,14 +1200,6 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
return ret == xfers ? 0 : -1;
}
-static bool drm_edid_is_zero(u8 *in_edid, int length)
-{
- if (memchr_inv(in_edid, 0, length))
- return false;
-
- return true;
-}
-
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1308,10 +1324,15 @@ EXPORT_SYMBOL(drm_probe_ddc);
struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter)
{
+ struct edid *edid;
+
if (!drm_probe_ddc(adapter))
return NULL;
- return drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+ edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
+ if (edid)
+ drm_get_displayid(connector, edid);
+ return edid;
}
EXPORT_SYMBOL(drm_get_edid);
@@ -2406,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
/*
* Search EDID for CEA extension block.
*/
-static u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
{
u8 *edid_ext = NULL;
int i;
@@ -2418,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
/* Find CEA extension */
for (i = 0; i < edid->extensions; i++) {
edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == CEA_EXT)
+ if (edid_ext[0] == ext_id)
break;
}
@@ -2428,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
+static u8 *drm_find_cea_extension(struct edid *edid)
+{
+ return drm_find_edid_extension(edid, CEA_EXT);
+}
+
+static u8 *drm_find_displayid_extension(struct edid *edid)
+{
+ return drm_find_edid_extension(edid, DISPLAYID_EXT);
+}
+
/*
* Calculate the alternate clock for the CEA mode
* (60Hz vs. 59.94Hz etc.)
@@ -3888,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
return 0;
}
EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
+
+static int drm_parse_display_id(struct drm_connector *connector,
+ u8 *displayid, int length,
+ bool is_edid_extension)
+{
+ /* if this is an EDID extension the first byte will be 0x70 */
+ int idx = 0;
+ struct displayid_hdr *base;
+ struct displayid_block *block;
+ u8 csum = 0;
+ int i;
+
+ if (is_edid_extension)
+ idx = 1;
+
+ base = (struct displayid_hdr *)&displayid[idx];
+
+ DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
+ base->rev, base->bytes, base->prod_id, base->ext_count);
+
+ if (base->bytes + 5 > length - idx)
+ return -EINVAL;
+
+ for (i = idx; i <= base->bytes + 5; i++) {
+ csum += displayid[i];
+ }
+ if (csum) {
+ DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
+ return -EINVAL;
+ }
+
+ block = (struct displayid_block *)&displayid[idx + 4];
+ DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
+ block->tag, block->rev, block->num_bytes);
+
+ switch (block->tag) {
+ case DATA_BLOCK_TILED_DISPLAY: {
+ struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
+
+ u16 w, h;
+ u8 tile_v_loc, tile_h_loc;
+ u8 num_v_tile, num_h_tile;
+ struct drm_tile_group *tg;
+
+ w = tile->tile_size[0] | tile->tile_size[1] << 8;
+ h = tile->tile_size[2] | tile->tile_size[3] << 8;
+
+ num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
+ num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
+ tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
+ tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
+
+ connector->has_tile = true;
+ if (tile->tile_cap & 0x80)
+ connector->tile_is_single_monitor = true;
+
+ connector->num_h_tile = num_h_tile + 1;
+ connector->num_v_tile = num_v_tile + 1;
+ connector->tile_h_loc = tile_h_loc;
+ connector->tile_v_loc = tile_v_loc;
+ connector->tile_h_size = w + 1;
+ connector->tile_v_size = h + 1;
+
+ DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
+ DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
+ DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
+ num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
+ DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
+
+ tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
+ if (!tg) {
+ tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
+ }
+ if (!tg)
+ return -ENOMEM;
+
+ if (connector->tile_group != tg) {
+ /* if we haven't got a pointer,
+ take the reference, drop ref to old tile group */
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ }
+ connector->tile_group = tg;
+ } else
+ /* if same tile group, then release the ref we just took. */
+ drm_mode_put_tile_group(connector->dev, tg);
+ }
+ break;
+ default:
+ printk("unknown displayid tag %d\n", block->tag);
+ break;
+ }
+ return 0;
+}
+
+static void drm_get_displayid(struct drm_connector *connector,
+ struct edid *edid)
+{
+ void *displayid = NULL;
+ int ret;
+ connector->has_tile = false;
+ displayid = drm_find_displayid_extension(edid);
+ if (!displayid) {
+ /* drop reference to any tile group we had */
+ goto out_drop_ref;
+ }
+
+ ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
+ if (ret < 0)
+ goto out_drop_ref;
+ if (!connector->has_tile)
+ goto out_drop_ref;
+ return;
+out_drop_ref:
+ if (connector->tile_group) {
+ drm_mode_put_tile_group(connector->dev, connector->tile_group);
+ connector->tile_group = NULL;
+ }
+ return;
+}
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 09d47e9ba026..52ce26d6b4fb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
bool ret;
+ bool do_delayed = false;
+
drm_modeset_lock_all(dev);
ret = restore_fbdev_mode(fb_helper);
+
+ do_delayed = fb_helper->delayed_hotplug;
+ if (do_delayed)
+ fb_helper->delayed_hotplug = false;
drm_modeset_unlock_all(dev);
+
+ if (do_delayed)
+ drm_fb_helper_hotplug_event(fb_helper);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
- if (fb_helper->delayed_hotplug) {
- fb_helper->delayed_hotplug = false;
- drm_fb_helper_hotplug_event(fb_helper);
- }
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
+ int x, y;
desired_mode = fb_helper->crtc_info[i].desired_mode;
-
+ x = fb_helper->crtc_info[i].x;
+ y = fb_helper->crtc_info[i].y;
if (desired_mode) {
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
- if (desired_mode->hdisplay < sizes.fb_width)
- sizes.fb_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay < sizes.fb_height)
- sizes.fb_height = desired_mode->vdisplay;
- if (desired_mode->hdisplay > sizes.surface_width)
- sizes.surface_width = desired_mode->hdisplay;
- if (desired_mode->vdisplay > sizes.surface_height)
- sizes.surface_height = desired_mode->vdisplay;
+ if (desired_mode->hdisplay + x < sizes.fb_width)
+ sizes.fb_width = desired_mode->hdisplay + x;
+ if (desired_mode->vdisplay + y < sizes.fb_height)
+ sizes.fb_height = desired_mode->vdisplay + y;
+ if (desired_mode->hdisplay + x > sizes.surface_width)
+ sizes.surface_width = desired_mode->hdisplay + x;
+ if (desired_mode->vdisplay + y > sizes.surface_height)
+ sizes.surface_height = desired_mode->vdisplay + y;
crtc_count++;
}
}
@@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
int count, i, j;
@@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
return false;
}
+static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ int idx,
+ int h_idx, int v_idx)
+{
+ struct drm_fb_helper_connector *fb_helper_conn;
+ int i;
+ int hoffset = 0, voffset = 0;
+
+ for (i = 0; i < fb_helper->connector_count; i++) {
+ fb_helper_conn = fb_helper->connector_info[i];
+ if (!fb_helper_conn->connector->has_tile)
+ continue;
+
+ if (!modes[i] && (h_idx || v_idx)) {
+ DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
+ fb_helper_conn->connector->base.id);
+ continue;
+ }
+ if (fb_helper_conn->connector->tile_h_loc < h_idx)
+ hoffset += modes[i]->hdisplay;
+
+ if (fb_helper_conn->connector->tile_v_loc < v_idx)
+ voffset += modes[i]->vdisplay;
+ }
+ offsets[idx].x = hoffset;
+ offsets[idx].y = voffset;
+ DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
+ return 0;
+}
+
static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
-
+ uint64_t conn_configured = 0, mask;
+ int tile_pass = 0;
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
- if (enabled[i] == false)
+ if (conn_configured & (1 << i))
continue;
+ if (enabled[i] == false) {
+ conn_configured |= (1 << i);
+ continue;
+ }
+
+ /* first pass over all the untiled connectors */
+ if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
+ continue;
+
+ if (tile_pass == 1) {
+ if (fb_helper_conn->connector->tile_h_loc != 0 ||
+ fb_helper_conn->connector->tile_v_loc != 0)
+ continue;
+
+ } else {
+ if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
+ fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
+ /* if this tile_pass doesn't cover any of the tiles - keep going */
+ continue;
+
+ /* find the tile offsets for this pass - need
+ to find all tiles left and above */
+ drm_get_tile_offsets(fb_helper, modes, offsets,
+ i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
+ }
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
fb_helper_conn->connector->base.id);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
- fb_helper_conn->connector->base.id);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
+ fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
}
/* No preferred modes, pick one off the list */
@@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ tile_pass++;
+ goto retry;
}
return true;
}
@@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
+ struct drm_fb_offset *offsets;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
@@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
+ offsets = kcalloc(dev->mode_config.num_connector,
+ sizeof(struct drm_fb_offset), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
- if (!crtcs || !modes || !enabled) {
+ if (!crtcs || !modes || !enabled || !offsets) {
DRM_ERROR("Memory allocation failed\n");
goto out;
}
@@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
if (!(fb_helper->funcs->initial_config &&
fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
+ offsets,
enabled, width, height))) {
memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
+ memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
- if (!drm_target_cloned(fb_helper,
- modes, enabled, width, height) &&
- !drm_target_preferred(fb_helper,
- modes, enabled, width, height))
+ if (!drm_target_cloned(fb_helper, modes, offsets,
+ enabled, width, height) &&
+ !drm_target_preferred(fb_helper, modes, offsets,
+ enabled, width, height))
DRM_ERROR("Unable to find initial modes\n");
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
@@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
+ struct drm_fb_offset *offset = &offsets[i];
modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
- DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
- mode->name, fb_crtc->mode_set.crtc->base.id);
+ DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
+ mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
fb_crtc->desired_mode = mode;
+ fb_crtc->x = offset->x;
+ fb_crtc->y = offset->y;
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
modeset->fb = fb_helper->fb;
+ modeset->x = offset->x;
+ modeset->y = offset->y;
}
}
@@ -1578,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
out:
kfree(crtcs);
kfree(modes);
+ kfree(offsets);
kfree(enabled);
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 91e1105f2800..0b9514b6cd64 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -527,6 +527,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
if (copy_to_user(buffer + total,
e->event, e->event->length)) {
total = -EFAULT;
+ e->destroy(e);
break;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0e47df4ef24e..f5a5f18efa5b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
/*
- * If the vblank interrupt was already disbled update the count
+ * If the vblank interrupt was already disabled update the count
* and timestamp to maintain the appearance that the counter
* has been ticking all along until this time. This makes the
* count account for the entire time between drm_vblank_on() and
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index bfe359506377..7f8c6a66680a 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -283,7 +283,7 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port);
+ return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
}
static int
@@ -414,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
intel_dp_add_properties(intel_dp, connector);
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
+
drm_mode_connector_set_path_property(connector, pathprop);
drm_reinit_primary_mode_group(dev);
mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index f2183b554cbc..850cf7d6578c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height)
{
struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
+ uint64_t conn_configured = 0, mask;
+ int pass = 0;
save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
return false;
memcpy(save_enabled, enabled, dev->mode_config.num_connector);
-
+ mask = (1 << fb_helper->connector_count) - 1;
+retry:
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
fb_conn = fb_helper->connector_info[i];
connector = fb_conn->connector;
+ if (conn_configured & (1 << i))
+ continue;
+
+ if (pass == 0 && !connector->has_tile)
+ continue;
+
if (connector->status == connector_status_connected)
num_connectors_detected++;
if (!enabled[i]) {
DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
connector->name);
+ conn_configured |= (1 << i);
continue;
}
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
connector->name);
enabled[i] = false;
+ conn_configured |= (1 << i);
continue;
}
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
/* try for preferred next */
if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s\n",
- connector->name);
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
modes[i] = drm_has_preferred_mode(fb_conn, width,
height);
}
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
fallback = false;
+ conn_configured |= (1 << i);
+ }
+
+ if ((conn_configured & mask) != mask) {
+ pass++;
+ goto retry;
}
/*
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677ad4b7..72a40f95d048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
- device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43893b5..1931057f9962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
}
if (status & 0x40000000) {
- nouveau_fifo_uevent(&priv->base);
nv_wr32(priv, 0x002100, 0x40000000);
+ nouveau_fifo_uevent(&priv->base);
status &= ~0x40000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8fbda0c..074d434c3077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
u32 inte = nv_rd32(priv, 0x002628);
u32 unkn;
+ nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
+
for (unkn = 0; unkn < 8; unkn++) {
u32 ints = (intr >> (unkn * 0x04)) & inte;
if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
nv_mask(priv, 0x002628, ints, 0);
}
}
-
- nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
}
static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index fc9ef663f25a..6a8db7c80bd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -982,8 +982,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
}
if (stat & 0x80000000) {
- nve0_fifo_intr_engine(priv);
nv_wr32(priv, 0x002100, 0x80000000);
+ nve0_fifo_intr_engine(priv);
stat &= ~0x80000000;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index afb93bb72f97..65910e3aed0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -664,7 +664,6 @@ nouveau_pmops_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
- pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
@@ -732,6 +731,7 @@ nouveau_pmops_runtime_suspend(struct device *dev)
ret = nouveau_do_suspend(drm_dev, true);
pci_save_state(pdev);
pci_disable_device(pdev);
+ pci_ignore_hotplug(pdev);
pci_set_power_state(pdev, PCI_D3cold);
drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9aebb99..f32a434724e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
}
-static void
+static int
nouveau_fence_signal(struct nouveau_fence *fence)
{
+ int drop = 0;
+
fence_signal_locked(&fence->base);
list_del(&fence->head);
+ rcu_assign_pointer(fence->channel, NULL);
if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
if (!--fctx->notify_ref)
- nvif_notify_put(&fctx->notify);
+ drop = 1;
}
fence_put(&fence->base);
+ return drop;
}
static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence;
- nvif_notify_fini(&fctx->notify);
-
spin_lock_irq(&fctx->lock);
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- nouveau_fence_signal(fence);
- fence->channel = NULL;
+ if (nouveau_fence_signal(fence))
+ nvif_notify_put(&fctx->notify);
}
spin_unlock_irq(&fctx->lock);
+
+ nvif_notify_fini(&fctx->notify);
+ fctx->dead = 1;
+
+ /*
+ * Ensure that all accesses to fence->channel complete before freeing
+ * the channel.
+ */
+ synchronize_rcu();
}
static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
kref_put(&fctx->fence_ref, nouveau_fence_context_put);
}
-static void
+static int
nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
{
struct nouveau_fence *fence;
-
+ int drop = 0;
u32 seq = fctx->read(chan);
while (!list_empty(&fctx->pending)) {
fence = list_entry(fctx->pending.next, typeof(*fence), head);
if ((int)(seq - fence->base.seqno) < 0)
- return;
+ break;
- nouveau_fence_signal(fence);
+ drop |= nouveau_fence_signal(fence);
}
+
+ return drop;
}
static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
struct nouveau_fence_chan *fctx =
container_of(notify, typeof(*fctx), notify);
unsigned long flags;
+ int ret = NVIF_NOTIFY_KEEP;
spin_lock_irqsave(&fctx->lock, flags);
if (!list_empty(&fctx->pending)) {
struct nouveau_fence *fence;
+ struct nouveau_channel *chan;
fence = list_entry(fctx->pending.next, typeof(*fence), head);
- nouveau_fence_update(fence->channel, fctx);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (nouveau_fence_update(fence->channel, fctx))
+ ret = NVIF_NOTIFY_DROP;
}
spin_unlock_irqrestore(&fctx->lock, flags);
- /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */
- return NVIF_NOTIFY_KEEP;
+ return ret;
}
void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
if (!ret) {
fence_get(&fence->base);
spin_lock_irq(&fctx->lock);
- nouveau_fence_update(chan, fctx);
+
+ if (nouveau_fence_update(chan, fctx))
+ nvif_notify_put(&fctx->notify);
+
list_add_tail(&fence->head, &fctx->pending);
spin_unlock_irq(&fctx->lock);
}
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
if (fence->base.ops == &nouveau_fence_ops_legacy ||
fence->base.ops == &nouveau_fence_ops_uevent) {
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
+ struct nouveau_channel *chan;
unsigned long flags;
if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
return true;
spin_lock_irqsave(&fctx->lock, flags);
- nouveau_fence_update(fence->channel, fctx);
+ chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
+ if (chan && nouveau_fence_update(chan, fctx))
+ nvif_notify_put(&fctx->notify);
spin_unlock_irqrestore(&fctx->lock, flags);
}
return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
struct nouveau_channel *prev = NULL;
+ bool must_wait = true;
f = nouveau_local_fence(fence, chan->drm);
- if (f)
- prev = f->channel;
+ if (f) {
+ rcu_read_lock();
+ prev = rcu_dereference(f->channel);
+ if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+ must_wait = false;
+ rcu_read_unlock();
+ }
- if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+ if (must_wait)
ret = fence_wait(fence, intr);
return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
for (i = 0; i < fobj->shared_count && !ret; ++i) {
struct nouveau_channel *prev = NULL;
+ bool must_wait = true;
fence = rcu_dereference_protected(fobj->shared[i],
reservation_object_held(resv));
f = nouveau_local_fence(fence, chan->drm);
- if (f)
- prev = f->channel;
+ if (f) {
+ rcu_read_lock();
+ prev = rcu_dereference(f->channel);
+ if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
+ must_wait = false;
+ rcu_read_unlock();
+ }
- if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan))))
+ if (must_wait)
ret = fence_wait(fence, intr);
-
- if (ret)
- break;
}
return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- return fence->channel ? fctx->name : "dead channel";
+ return !fctx->dead ? fctx->name : "dead channel";
}
/*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
{
struct nouveau_fence *fence = from_fence(f);
struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
- struct nouveau_channel *chan = fence->channel;
+ struct nouveau_channel *chan;
+ bool ret = false;
+
+ rcu_read_lock();
+ chan = rcu_dereference(fence->channel);
+ if (chan)
+ ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+ rcu_read_unlock();
- return (int)(fctx->read(chan) - fence->base.seqno) >= 0;
+ return ret;
}
static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b17b1fc..96e461c6f68f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@ struct nouveau_fence {
bool sysmem;
- struct nouveau_channel *channel;
+ struct nouveau_channel __rcu *channel;
unsigned long timeout;
};
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
char name[32];
struct nvif_notify notify;
- int notify_ref;
+ int notify_ref, dead;
};
struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 446e71ca36cb..d9b25684ac98 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
if (list_is_singular(&release->bos))
return 0;
- ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+ !no_intr, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 30d242b25078..d59ec491dbb9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
+ radeon_cursor_reset(crtc);
/* update the hw version fpr dpm */
radeon_crtc->hw_mode = *adjusted_mode;
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 3f898d020ae6..f373a81ba3d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -937,7 +937,7 @@ static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
tmp |= TMIN(0);
WREG32_SMC(CG_FDO_CTRL2, tmp);
- tmp = RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(mode);
WREG32_SMC(CG_FDO_CTRL2, tmp);
}
@@ -1162,7 +1162,7 @@ static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
tmp |= TARGET_PERIOD(tach_period);
WREG32_SMC(CG_TACH_CTRL, tmp);
- ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+ ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
return 0;
}
@@ -1178,7 +1178,7 @@ static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
WREG32_SMC(CG_FDO_CTRL2, tmp);
- tmp = RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK;
+ tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(pi->t_min);
WREG32_SMC(CG_FDO_CTRL2, tmp);
pi->fan_ctrl_is_in_default_mode = true;
@@ -5849,7 +5849,6 @@ int ci_dpm_init(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
pi->fan_ctrl_is_in_default_mode = true;
- rdev->pm.dpm.fan.ucode_fan_control = false;
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index e4e88ca8b82e..ba85986febea 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -213,18 +213,18 @@
#define CG_FDO_CTRL0 0xC0300064
#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x0000000F
+#define FDO_STATIC_DUTY_MASK 0x000000FF
#define FDO_STATIC_DUTY_SHIFT 0
#define CG_FDO_CTRL1 0xC0300068
#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x0000000F
+#define FMAX_DUTY100_MASK 0x000000FF
#define FMAX_DUTY100_SHIFT 0
#define CG_FDO_CTRL2 0xC030006C
#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x0000000F
+#define TMIN_MASK 0x000000FF
#define TMIN_SHIFT 0
#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (3 << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
#define FDO_PWM_MODE_SHIFT 11
#define TACH_PWM_RESP_RATE(x) ((x) << 25)
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 5c8b358f9fba..924b1b7ab455 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -35,7 +35,7 @@
#define MIN(a,b) (((a)<(b))?(a):(b))
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc);
+ struct radeon_bo_list **cs_reloc);
struct evergreen_cs_track {
u32 group_size;
u32 nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 last_reg;
u32 m, i, tmp, *ib;
int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int evergreen_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct evergreen_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
**/
int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc;
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+ struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
u32 header, cmd, count, sub_cmd;
volatile u32 *ib = p->ib.ptr;
u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b53b31a7b76f..74f06d540591 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
int r;
u32 tile_flags = 0;
u32 tmp;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 value;
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
int idx)
{
unsigned c, i;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
unsigned idx;
volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
}
if (r)
return r;
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 732d4938aab7..c70e6d5bcd19 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1bc4704034ce..064ad5569cca 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
if (r) {
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c47537a1ddba..acc1f99c84d9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
{
struct r600_cs_track *track = (struct r600_cs_track *)p->track;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
u32 m, i, tmp, *ib;
int r;
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
struct r600_cs_track *track;
volatile u32 *ib;
unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
p->track = NULL;
return r;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib.length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
{
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
return 0;
}
- p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
/* Copy the packet into the IB, the parser will read from the
* input memory (cached) and write to the IB (which can be
* uncached). */
- ib_chunk = &parser.chunks[parser.chunk_ib_idx];
+ ib_chunk = parser.chunk_ib;
parser.ib.length_dw = ib_chunk->length_dw;
*l = parser.ib.length_dw;
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
* GPU offset using the provided start.
**/
int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc)
+ struct radeon_bo_list **cs_reloc)
{
struct radeon_cs_chunk *relocs_chunk;
unsigned idx;
*cs_reloc = NULL;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
idx = p->dma_reloc_idx;
if (idx >= p->nrelocs) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, p->nrelocs);
return -EINVAL;
}
- *cs_reloc = p->relocs_ptr[idx];
+ *cs_reloc = &p->relocs[idx];
p->dma_reloc_idx++;
return 0;
}
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
**/
int r600_dma_cs_parse(struct radeon_cs_parser *p)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
- struct radeon_cs_reloc *src_reloc, *dst_reloc;
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
+ struct radeon_bo_list *src_reloc, *dst_reloc;
u32 header, cmd, count, tiled;
volatile u32 *ib = p->ib.ptr;
u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
#if 0
for (r = 0; r < p->ib->length_dw; r++) {
printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 3207bb60715e..54529b837afa 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -450,6 +450,15 @@ struct radeon_mman {
#endif
};
+struct radeon_bo_list {
+ struct radeon_bo *robj;
+ struct ttm_validate_buffer tv;
+ uint64_t gpu_offset;
+ unsigned prefered_domains;
+ unsigned allowed_domains;
+ uint32_t tiling_flags;
+};
+
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
@@ -920,6 +929,9 @@ struct radeon_vm {
struct rb_root va;
+ /* protecting invalidated and freed */
+ spinlock_t status_lock;
+
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
@@ -1044,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
/*
* CS.
*/
-struct radeon_cs_reloc {
- struct drm_gem_object *gobj;
- struct radeon_bo *robj;
- struct ttm_validate_buffer tv;
- uint64_t gpu_offset;
- unsigned prefered_domains;
- unsigned allowed_domains;
- uint32_t tiling_flags;
- uint32_t handle;
-};
-
struct radeon_cs_chunk {
- uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
void __user *user_ptr;
@@ -1074,16 +1074,15 @@ struct radeon_cs_parser {
unsigned idx;
/* relocations */
unsigned nrelocs;
- struct radeon_cs_reloc *relocs;
- struct radeon_cs_reloc **relocs_ptr;
- struct radeon_cs_reloc *vm_bos;
+ struct radeon_bo_list *relocs;
+ struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
- int chunk_ib_idx;
- int chunk_relocs_idx;
- int chunk_flags_idx;
- int chunk_const_ib_idx;
+ struct radeon_cs_chunk *chunk_ib;
+ struct radeon_cs_chunk *chunk_relocs;
+ struct radeon_cs_chunk *chunk_flags;
+ struct radeon_cs_chunk *chunk_const_ib;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
@@ -1097,7 +1096,7 @@ struct radeon_cs_parser {
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
- struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_chunk *ibc = p->chunk_ib;
if (ibc->kdata)
return ibc->kdata[idx];
@@ -2975,7 +2974,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
@@ -3089,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc,
+ struct radeon_bo_list **cs_reloc,
int nomm);
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 75f22e5e999f..c830863bc98a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
struct drm_device *ddev = p->rdev->ddev;
struct radeon_cs_chunk *chunk;
struct radeon_cs_buckets buckets;
- unsigned i, j;
- bool duplicate, need_mmap_lock = false;
+ unsigned i;
+ bool need_mmap_lock = false;
int r;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
return 0;
}
- chunk = &p->chunks[p->chunk_relocs_idx];
+ chunk = p->chunk_relocs;
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
- if (p->relocs_ptr == NULL) {
- return -ENOMEM;
- }
- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
+ p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
for (i = 0; i < p->nrelocs; i++) {
struct drm_radeon_cs_reloc *r;
+ struct drm_gem_object *gobj;
unsigned priority;
- duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < i; j++) {
- if (r->handle == p->relocs[j].handle) {
- p->relocs_ptr[i] = &p->relocs[j];
- duplicate = true;
- break;
- }
- }
- if (duplicate) {
- p->relocs[i].handle = 0;
- continue;
- }
-
- p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
- r->handle);
- if (p->relocs[i].gobj == NULL) {
+ gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
+ if (gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
return -ENOENT;
}
- p->relocs_ptr[i] = &p->relocs[i];
- p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
+ p->relocs[i].robj = gem_to_radeon_bo(gobj);
/* The userspace buffer priorities are from 0 to 15. A higher
* number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
p->relocs[i].tv.shared = !r->write_domain;
- p->relocs[i].handle = r->handle;
radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
priority);
@@ -251,22 +232,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
- int i, r = 0;
+ struct radeon_bo_list *reloc;
+ int r;
- for (i = 0; i < p->nrelocs; i++) {
+ list_for_each_entry(reloc, &p->validated, tv.head) {
struct reservation_object *resv;
- if (!p->relocs[i].robj)
- continue;
-
- resv = p->relocs[i].robj->tbo.resv;
+ resv = reloc->robj->tbo.resv;
r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
- p->relocs[i].tv.shared);
-
+ reloc->tv.shared);
if (r)
- break;
+ return r;
}
- return r;
+ return 0;
}
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -286,10 +264,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->idx = 0;
p->ib.sa_bo = NULL;
p->const_ib.sa_bo = NULL;
- p->chunk_ib_idx = -1;
- p->chunk_relocs_idx = -1;
- p->chunk_flags_idx = -1;
- p->chunk_const_ib_idx = -1;
+ p->chunk_ib = NULL;
+ p->chunk_relocs = NULL;
+ p->chunk_flags = NULL;
+ p->chunk_const_ib = NULL;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -316,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].chunk_id = user_chunk.chunk_id;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
- p->chunk_relocs_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
+ p->chunk_relocs = &p->chunks[i];
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
- p->chunk_ib_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
+ p->chunk_ib = &p->chunks[i];
/* zero length IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
- p->chunk_const_ib_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
+ p->chunk_const_ib = &p->chunks[i];
/* zero length CONST IB isn't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- p->chunk_flags_idx = i;
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags = &p->chunks[i];
/* zero length flags aren't useful */
if (p->chunks[i].length_dw == 0)
return -EINVAL;
@@ -342,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
size = p->chunks[i].length_dw;
cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
p->chunks[i].user_ptr = cdata;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
continue;
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
continue;
}
@@ -358,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
return -EFAULT;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
p->cs_flags = p->chunks[i].kdata[0];
if (p->chunks[i].length_dw > 1)
ring = p->chunks[i].kdata[1];
@@ -399,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
static int cmp_size_smaller_first(void *priv, struct list_head *a,
struct list_head *b)
{
- struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
- struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
+ struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
+ struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
/* Sort A before B if A is smaller. */
return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -441,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
if (parser->relocs != NULL) {
for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj)
- drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+ struct radeon_bo *bo = parser->relocs[i].robj;
+ if (bo == NULL)
+ continue;
+
+ drm_gem_object_unreference_unlocked(&bo->gem_base);
}
}
kfree(parser->track);
kfree(parser->relocs);
- kfree(parser->relocs_ptr);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
@@ -462,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
{
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -505,9 +484,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
if (r)
return r;
- radeon_sync_resv(p->rdev, &p->ib.sync, vm->page_directory->tbo.resv,
- true);
-
r = radeon_vm_clear_freed(rdev, vm);
if (r)
return r;
@@ -525,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
for (i = 0; i < p->nrelocs; i++) {
struct radeon_bo *bo;
- /* ignore duplicates */
- if (p->relocs_ptr[i] != &p->relocs[i])
- continue;
-
bo = p->relocs[i].robj;
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
@@ -553,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
struct radeon_vm *vm = &fpriv->vm;
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
return 0;
@@ -587,7 +559,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
}
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
+ (parser->chunk_const_ib != NULL)) {
r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
} else {
r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -614,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
struct radeon_vm *vm = NULL;
int r;
- if (parser->chunk_ib_idx == -1)
+ if (parser->chunk_ib == NULL)
return 0;
if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -622,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
vm = &fpriv->vm;
if ((rdev->family >= CHIP_TAHITI) &&
- (parser->chunk_const_ib_idx != -1)) {
- ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
+ (parser->chunk_const_ib != NULL)) {
+ ib_chunk = parser->chunk_const_ib;
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
@@ -642,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
return -EFAULT;
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ ib_chunk = parser->chunk_ib;
if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
return -EINVAL;
}
}
- ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ ib_chunk = parser->chunk_ib;
r = radeon_ib_get(rdev, parser->ring, &parser->ib,
vm, ib_chunk->length_dw * 4);
@@ -740,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx)
{
- struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
struct radeon_device *rdev = p->rdev;
uint32_t header;
@@ -834,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
* GPU offset using the provided start.
**/
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
- struct radeon_cs_reloc **cs_reloc,
+ struct radeon_bo_list **cs_reloc,
int nomm)
{
struct radeon_cs_chunk *relocs_chunk;
@@ -842,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
unsigned idx;
int r;
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
*cs_reloc = NULL;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
if (r)
return r;
@@ -873,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
(u64)relocs_chunk->kdata[idx + 3] << 32;
(*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
} else
- *cs_reloc = p->relocs_ptr[(idx / 4)];
+ *cs_reloc = &p->relocs[(idx / 4)];
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 85f38ee11888..45e54060ee97 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -227,11 +227,24 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
return ret;
}
-static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
- uint64_t gpu_addr, int hot_x, int hot_y)
+static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
+ struct radeon_bo *robj = gem_to_radeon_bo(obj);
+ uint64_t gpu_addr;
+ int ret;
+
+ ret = radeon_bo_reserve(robj, false);
+ if (unlikely(ret != 0))
+ goto fail;
+ /* Only 27 bit offset for legacy cursor */
+ ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
+ ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
+ &gpu_addr);
+ radeon_bo_unreserve(robj);
+ if (ret)
+ goto fail;
if (ASIC_IS_DCE4(rdev)) {
WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
@@ -253,18 +266,12 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
}
- if (hot_x != radeon_crtc->cursor_hot_x ||
- hot_y != radeon_crtc->cursor_hot_y) {
- int x, y;
-
- x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
- y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+ return 0;
- radeon_cursor_move_locked(crtc, x, y);
+fail:
+ drm_gem_object_unreference_unlocked(obj);
- radeon_crtc->cursor_hot_x = hot_x;
- radeon_crtc->cursor_hot_y = hot_y;
- }
+ return ret;
}
int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -276,10 +283,7 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct radeon_device *rdev = crtc->dev->dev_private;
struct drm_gem_object *obj;
- struct radeon_bo *robj;
- uint64_t gpu_addr;
int ret;
if (!handle) {
@@ -301,41 +305,76 @@ int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
return -ENOENT;
}
- robj = gem_to_radeon_bo(obj);
- ret = radeon_bo_reserve(robj, false);
- if (unlikely(ret != 0))
- goto fail;
- /* Only 27 bit offset for legacy cursor */
- ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
- ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
- &gpu_addr);
- radeon_bo_unreserve(robj);
- if (ret)
- goto fail;
-
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
radeon_lock_cursor(crtc, true);
- radeon_set_cursor(crtc, obj, gpu_addr, hot_x, hot_y);
- radeon_show_cursor(crtc);
+
+ if (hot_x != radeon_crtc->cursor_hot_x ||
+ hot_y != radeon_crtc->cursor_hot_y) {
+ int x, y;
+
+ x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
+ y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
+
+ radeon_cursor_move_locked(crtc, x, y);
+
+ radeon_crtc->cursor_hot_x = hot_x;
+ radeon_crtc->cursor_hot_y = hot_y;
+ }
+
+ ret = radeon_set_cursor(crtc, obj);
+
+ if (ret)
+ DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
+ ret);
+ else
+ radeon_show_cursor(crtc);
+
radeon_lock_cursor(crtc, false);
unpin:
if (radeon_crtc->cursor_bo) {
- robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
+ struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
ret = radeon_bo_reserve(robj, false);
if (likely(ret == 0)) {
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
}
- drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
+ if (radeon_crtc->cursor_bo != obj)
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
return 0;
-fail:
- drm_gem_object_unreference_unlocked(obj);
+}
- return ret;
+/**
+ * radeon_cursor_reset - Re-set the current cursor, if any.
+ *
+ * @crtc: drm crtc
+ *
+ * If the CRTC passed in currently has a cursor assigned, this function
+ * makes sure it's visible.
+ */
+void radeon_cursor_reset(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ int ret;
+
+ if (radeon_crtc->cursor_bo) {
+ radeon_lock_cursor(crtc, true);
+
+ radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
+ radeon_crtc->cursor_y);
+
+ ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
+ if (ret)
+ DRM_ERROR("radeon_set_cursor returned %d, not showing "
+ "cursor\n", ret);
+ else
+ radeon_show_cursor(crtc);
+
+ radeon_lock_cursor(crtc, false);
+ }
}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0ea1db83d573..29b9220ec399 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,10 +48,40 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
+/**
+ * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
+ *
+ * @info: fbdev info
+ *
+ * This function hides the cursor on all CRTCs used by fbdev.
+ */
+static int radeon_fb_helper_set_par(struct fb_info *info)
+{
+ int ret;
+
+ ret = drm_fb_helper_set_par(info);
+
+ /* XXX: with universal plane support fbdev will automatically disable
+ * all non-primary planes (including the cursor)
+ */
+ if (ret == 0) {
+ struct drm_fb_helper *fb_helper = info->par;
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++) {
+ struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
+
+ radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ }
+ }
+
+ return ret;
+}
+
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
+ .fb_set_par = radeon_fb_helper_set_par,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 12cfaeac1205..fe48f229043e 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -548,7 +548,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
struct ttm_validate_buffer tv, *entry;
- struct radeon_cs_reloc *vm_bos;
+ struct radeon_bo_list *vm_bos;
struct ww_acquire_ctx ticket;
struct list_head list;
unsigned domain;
@@ -564,7 +564,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
if (r)
goto error_free;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f4dd26ae33e5..3cf9c1fa6475 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -800,6 +800,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
/* Get associated drm_crtc: */
drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+ if (!drmcrtc)
+ return -EINVAL;
/* Helper routine in DRM core does all the work: */
return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cafb1ccf2ec3..678b4386540d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
}
}
+ radeon_cursor_reset(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index f3d87cdd5c9d..390db897f322 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -818,6 +818,7 @@ extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y);
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern void radeon_cursor_reset(struct drm_crtc *crtc);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
unsigned int flags,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 87b00d902bf7..7d68223eb469 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -233,6 +233,13 @@ int radeon_bo_create(struct radeon_device *rdev,
if (!(rdev->flags & RADEON_IS_PCIE))
bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+#ifdef CONFIG_X86_32
+ /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
+ * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
+ */
+ bo->flags &= ~RADEON_GEM_GTT_WC;
+#endif
+
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
down_read(&rdev->pm.mclk_lock);
@@ -502,19 +509,20 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
- struct radeon_cs_reloc *lobj;
- struct radeon_bo *bo;
+ struct radeon_bo_list *lobj;
+ struct list_head duplicates;
int r;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
- r = ttm_eu_reserve_buffers(ticket, head, true);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
if (unlikely(r != 0)) {
return r;
}
list_for_each_entry(lobj, head, tv.head) {
- bo = lobj->robj;
+ struct radeon_bo *bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
u32 allowed = lobj->allowed_domains;
@@ -562,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
}
+
+ list_for_each_entry(lobj, &duplicates, tv.head) {
+ lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
+ lobj->tiling_flags = lobj->robj->tiling_flags;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9db74a96ef61..ce075cb08cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
TP_fast_assign(
__entry->ring = p->ring;
- __entry->dw = p->chunks[p->chunk_ib_idx].length_dw;
+ __entry->dw = p->chunk_ib->length_dw;
__entry->fences = radeon_fence_count_emitted(
p->rdev, p->ring);
),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index cbe7b32d181c..d02aa1d0f588 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -196,7 +196,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+ if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 11b662469253..c10b2aec6450 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
unsigned idx, cmd, offset;
uint64_t start, end;
int r;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, data0);
idx = radeon_get_ib_value(p, data1);
if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- reloc = p->relocs_ptr[(idx / 4)];
+ reloc = &p->relocs[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
[0x00000003] = 2048,
};
- if (p->chunks[p->chunk_ib_idx].length_dw % 16) {
+ if (p->chunk_ib->length_dw % 16) {
DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
- p->chunks[p->chunk_ib_idx].length_dw);
+ p->chunk_ib->length_dw);
return -EINVAL;
}
- if (p->chunk_relocs_idx == -1) {
+ if (p->chunk_relocs == NULL) {
DRM_ERROR("No relocation chunk !\n");
return -EINVAL;
}
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
return -EINVAL;
}
- } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+ } while (p->idx < p->chunk_ib->length_dw);
if (!has_msg_cmd) {
DRM_ERROR("UVD-IBs need a msg command!\n");
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 9e85757d5599..976fe432f4e2 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
unsigned size)
{
struct radeon_cs_chunk *relocs_chunk;
- struct radeon_cs_reloc *reloc;
+ struct radeon_bo_list *reloc;
uint64_t start, end, offset;
unsigned idx;
- relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ relocs_chunk = p->chunk_relocs;
offset = radeon_get_ib_value(p, lo);
idx = radeon_get_ib_value(p, hi);
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
return -EINVAL;
}
- reloc = p->relocs_ptr[(idx / 4)];
+ reloc = &p->relocs[(idx / 4)];
start = reloc->gpu_offset;
end = start + radeon_bo_size(reloc->robj);
start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
uint32_t *size = &tmp;
int i, r;
- while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
+ while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 0b10f3a03ce2..cde48c42b30a 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
* Add the page directory to the list of BOs to
* validate for command submission (cayman+).
*/
-struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
+struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head)
{
- struct radeon_cs_reloc *list;
+ struct radeon_bo_list *list;
unsigned i, idx;
list = drm_malloc_ab(vm->max_pde_used + 2,
- sizeof(struct radeon_cs_reloc));
+ sizeof(struct radeon_bo_list));
if (!list)
return NULL;
/* add the vm page table to the list */
- list[0].gobj = NULL;
list[0].robj = vm->page_directory;
list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
list[0].tiling_flags = 0;
- list[0].handle = 0;
list_add(&list[0].tv.head, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
continue;
- list[idx].gobj = NULL;
list[idx].robj = vm->page_tables[i].bo;
list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = true;
list[idx].tiling_flags = 0;
- list[idx].handle = 0;
list_add(&list[idx++].tv.head, head);
}
@@ -491,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
tmp->vm = vm;
tmp->addr = bo_va->addr;
tmp->bo = radeon_bo_ref(bo_va->bo);
+ spin_lock(&vm->status_lock);
list_add(&tmp->vm_status, &vm->freed);
+ spin_unlock(&vm->status_lock);
}
interval_tree_remove(&bo_va->it, &vm->va);
@@ -802,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
*
* Global and local mutex must be locked!
*/
-static void radeon_vm_update_ptes(struct radeon_device *rdev,
- struct radeon_vm *vm,
- struct radeon_ib *ib,
- uint64_t start, uint64_t end,
- uint64_t dst, uint32_t flags)
+static int radeon_vm_update_ptes(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_ib *ib,
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint32_t flags)
{
uint64_t mask = RADEON_VM_PTE_COUNT - 1;
uint64_t last_pte = ~0, last_dst = ~0;
@@ -819,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
unsigned nptes;
uint64_t pte;
+ int r;
radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
+ r = reservation_object_reserve_shared(pt->tbo.resv);
+ if (r)
+ return r;
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
@@ -854,6 +856,8 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
last_pte + 8 * count,
last_dst, flags);
}
+
+ return 0;
}
/**
@@ -878,7 +882,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
end >>= radeon_vm_block_size;
for (i = start; i <= end; ++i)
- radeon_bo_fence(vm->page_tables[i].bo, fence, false);
+ radeon_bo_fence(vm->page_tables[i].bo, fence, true);
}
/**
@@ -911,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
return -EINVAL;
}
+ spin_lock(&vm->status_lock);
list_del_init(&bo_va->vm_status);
+ spin_unlock(&vm->status_lock);
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -987,9 +993,13 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
}
- radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
- bo_va->it.last + 1, addr,
- radeon_vm_page_flags(bo_va->flags));
+ r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
+ bo_va->it.last + 1, addr,
+ radeon_vm_page_flags(bo_va->flags));
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ return r;
+ }
radeon_asic_vm_pad_ib(rdev, &ib);
WARN_ON(ib.length_dw > ndw);
@@ -1022,17 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
int radeon_vm_clear_freed(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va, *tmp;
+ struct radeon_bo_va *bo_va;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->freed)) {
+ bo_va = list_first_entry(&vm->freed,
+ struct radeon_bo_va, vm_status);
+ spin_unlock(&vm->status_lock);
+
r = radeon_vm_bo_update(rdev, bo_va, NULL);
radeon_bo_unref(&bo_va->bo);
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
if (r)
return r;
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
return 0;
}
@@ -1051,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
int radeon_vm_clear_invalids(struct radeon_device *rdev,
struct radeon_vm *vm)
{
- struct radeon_bo_va *bo_va, *tmp;
+ struct radeon_bo_va *bo_va;
int r;
- list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
+ spin_lock(&vm->status_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated,
+ struct radeon_bo_va, vm_status);
+ spin_unlock(&vm->status_lock);
+
r = radeon_vm_bo_update(rdev, bo_va, NULL);
if (r)
return r;
+
+ spin_lock(&vm->status_lock);
}
+ spin_unlock(&vm->status_lock);
+
return 0;
}
@@ -1081,6 +1108,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
mutex_lock(&vm->mutex);
interval_tree_remove(&bo_va->it, &vm->va);
+ spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
if (bo_va->addr) {
@@ -1090,6 +1118,7 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
radeon_fence_unref(&bo_va->last_pt_update);
kfree(bo_va);
}
+ spin_unlock(&vm->status_lock);
mutex_unlock(&vm->mutex);
}
@@ -1110,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->addr) {
- mutex_lock(&bo_va->vm->mutex);
+ spin_lock(&bo_va->vm->status_lock);
list_del(&bo_va->vm_status);
list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
- mutex_unlock(&bo_va->vm->mutex);
+ spin_unlock(&bo_va->vm->status_lock);
}
}
}
@@ -1141,6 +1170,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
}
mutex_init(&vm->mutex);
vm->va = RB_ROOT;
+ spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->invalidated);
INIT_LIST_HEAD(&vm->freed);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index cf4c420b5572..32e354b8b0ab 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -5893,7 +5893,7 @@ static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
tmp |= TMIN(0);
WREG32(CG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
+ tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
tmp |= FDO_PWM_MODE(mode);
WREG32(CG_FDO_CTRL2, tmp);
}
@@ -6098,7 +6098,7 @@ static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
tmp |= TARGET_PERIOD(tach_period);
WREG32(CG_TACH_CTRL, tmp);
- si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
+ si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
return 0;
}
@@ -6114,7 +6114,7 @@ static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
WREG32(CG_FDO_CTRL2, tmp);
- tmp = RREG32(CG_FDO_CTRL2) & TMIN_MASK;
+ tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
tmp |= TMIN(si_pi->t_min);
WREG32(CG_FDO_CTRL2, tmp);
si_pi->fan_ctrl_is_in_default_mode = true;
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index c549c16a4fe4..4069be89e585 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -208,18 +208,18 @@
#define CG_FDO_CTRL0 0x754
#define FDO_STATIC_DUTY(x) ((x) << 0)
-#define FDO_STATIC_DUTY_MASK 0x0000000F
+#define FDO_STATIC_DUTY_MASK 0x000000FF
#define FDO_STATIC_DUTY_SHIFT 0
#define CG_FDO_CTRL1 0x758
#define FMAX_DUTY100(x) ((x) << 0)
-#define FMAX_DUTY100_MASK 0x0000000F
+#define FMAX_DUTY100_MASK 0x000000FF
#define FMAX_DUTY100_SHIFT 0
#define CG_FDO_CTRL2 0x75C
#define TMIN(x) ((x) << 0)
-#define TMIN_MASK 0x0000000F
+#define TMIN_MASK 0x000000FF
#define TMIN_SHIFT 0
#define FDO_PWM_MODE(x) ((x) << 11)
-#define FDO_PWM_MODE_MASK (3 << 11)
+#define FDO_PWM_MODE_MASK (7 << 11)
#define FDO_PWM_MODE_SHIFT 11
#define TACH_PWM_RESP_RATE(x) ((x) << 25)
#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
new file mode 100644
index 000000000000..ca9f085efa92
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -0,0 +1,17 @@
+config DRM_ROCKCHIP
+ tristate "DRM Support for Rockchip"
+ depends on DRM && ROCKCHIP_IOMMU
+ select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
+ select DRM_PANEL
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ select VIDEOMODE_HELPERS
+ help
+ Choose this option if you have a Rockchip soc chipset.
+ This driver provides kernel mode setting and buffer
+ management to userspace. This driver does not provide
+ 2D or 3D acceleration; acceleration is performed by other
+ IP found on the SoC.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
new file mode 100644
index 000000000000..2cb0672f57ed
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
+ rockchip_drm_gem.o
+
+obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
new file mode 100644
index 000000000000..a798c7c71f91
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/dma-iommu.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_graph.h>
+#include <linux/component.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_fbdev.h"
+#include "rockchip_drm_gem.h"
+
+#define DRIVER_NAME "rockchip"
+#define DRIVER_DESC "RockChip Soc DRM"
+#define DRIVER_DATE "20140818"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+/*
+ * Attach a (component) device to the shared drm dma mapping from master drm
+ * device. This is used by the VOPs to map GEM buffers to a common DMA
+ * mapping.
+ */
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
+ int ret;
+
+ ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ return arm_iommu_attach_device(dev, mapping);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
+
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ arm_iommu_detach_device(dev);
+}
+EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+ const struct rockchip_crtc_funcs *crtc_funcs,
+ int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ if (pipe > ROCKCHIP_MAX_CRTC)
+ return -EINVAL;
+
+ priv->crtc_funcs[pipe] = crtc_funcs;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
+
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ if (pipe > ROCKCHIP_MAX_CRTC)
+ return;
+
+ priv->crtc_funcs[pipe] = NULL;
+}
+EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
+
+static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
+ int pipe)
+{
+ struct drm_crtc *crtc;
+ int i = 0;
+
+ list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
+ if (i++ == pipe)
+ return crtc;
+
+ return NULL;
+}
+
+static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+ if (crtc && priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->enable_vblank)
+ return priv->crtc_funcs[pipe]->enable_vblank(crtc);
+
+ return 0;
+}
+
+static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
+
+ if (crtc && priv->crtc_funcs[pipe] &&
+ priv->crtc_funcs[pipe]->enable_vblank)
+ priv->crtc_funcs[pipe]->disable_vblank(crtc);
+}
+
+static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
+{
+ struct rockchip_drm_private *private;
+ struct dma_iommu_mapping *mapping;
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
+
+ drm_dev->dev_private = private;
+
+ drm_mode_config_init(drm_dev);
+
+ rockchip_drm_mode_config_init(drm_dev);
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms) {
+ ret = -ENOMEM;
+ goto err_config_cleanup;
+ }
+
+ /* TODO(djkurtz): fetch the mapping start/size from somewhere */
+ mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
+ SZ_2G);
+ if (IS_ERR(mapping)) {
+ ret = PTR_ERR(mapping);
+ goto err_config_cleanup;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ goto err_release_mapping;
+
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+ ret = arm_iommu_attach_device(dev, mapping);
+ if (ret)
+ goto err_release_mapping;
+
+ /* Try to bind all sub drivers. */
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ goto err_detach_device;
+
+ /* init kms poll for handling hpd */
+ drm_kms_helper_poll_init(drm_dev);
+
+ /*
+ * enable drm irq mode.
+ * - with irq_enabled = true, we can use the vblank feature.
+ */
+ drm_dev->irq_enabled = true;
+
+ ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
+ if (ret)
+ goto err_kms_helper_poll_fini;
+
+ /*
+ * with vblank_disable_allowed = true, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = true;
+
+ ret = rockchip_drm_fbdev_init(drm_dev);
+ if (ret)
+ goto err_vblank_cleanup;
+
+ return 0;
+err_vblank_cleanup:
+ drm_vblank_cleanup(drm_dev);
+err_kms_helper_poll_fini:
+ drm_kms_helper_poll_fini(drm_dev);
+ component_unbind_all(dev, drm_dev);
+err_detach_device:
+ arm_iommu_detach_device(dev);
+err_release_mapping:
+ arm_iommu_release_mapping(dev->archdata.mapping);
+err_config_cleanup:
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev->dev_private = NULL;
+ return ret;
+}
+
+static int rockchip_drm_unload(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ rockchip_drm_fbdev_fini(drm_dev);
+ drm_vblank_cleanup(drm_dev);
+ drm_kms_helper_poll_fini(drm_dev);
+ component_unbind_all(dev, drm_dev);
+ arm_iommu_detach_device(dev);
+ arm_iommu_release_mapping(dev->archdata.mapping);
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev->dev_private = NULL;
+
+ return 0;
+}
+
+void rockchip_drm_lastclose(struct drm_device *dev)
+{
+ struct rockchip_drm_private *priv = dev->dev_private;
+
+ drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
+}
+
+static const struct file_operations rockchip_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = rockchip_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .release = drm_release,
+};
+
+const struct vm_operations_struct rockchip_drm_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static struct drm_driver rockchip_drm_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
+ .load = rockchip_drm_load,
+ .unload = rockchip_drm_unload,
+ .lastclose = rockchip_drm_lastclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = rockchip_drm_crtc_enable_vblank,
+ .disable_vblank = rockchip_drm_crtc_disable_vblank,
+ .gem_vm_ops = &rockchip_drm_vm_ops,
+ .gem_free_object = rockchip_gem_free_object,
+ .dumb_create = rockchip_gem_dumb_create,
+ .dumb_map_offset = rockchip_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
+ .gem_prime_vmap = rockchip_gem_prime_vmap,
+ .gem_prime_vunmap = rockchip_gem_prime_vunmap,
+ .gem_prime_mmap = rockchip_gem_mmap_buf,
+ .fops = &rockchip_drm_driver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_drm_sys_suspend(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_connector *connector;
+
+ if (!drm)
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int old_dpms = connector->dpms;
+
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
+
+ /* Set the old mode back to the connector for resume */
+ connector->dpms = old_dpms;
+ }
+ drm_modeset_unlock_all(drm);
+
+ return 0;
+}
+
+static int rockchip_drm_sys_resume(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+ struct drm_connector *connector;
+ enum drm_connector_status status;
+ bool changed = false;
+
+ if (!drm)
+ return 0;
+
+ drm_modeset_lock_all(drm);
+ list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
+ int desired_mode = connector->dpms;
+
+ /*
+ * at suspend time, we save dpms to connector->dpms,
+ * restore the old_dpms, and at current time, the connector
+ * dpms status must be DRM_MODE_DPMS_OFF.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+
+ /*
+ * If the connector has been disconnected during suspend,
+ * disconnect it from the encoder and leave it off. We'll notify
+ * userspace at the end.
+ */
+ if (desired_mode == DRM_MODE_DPMS_ON) {
+ status = connector->funcs->detect(connector, true);
+ if (status == connector_status_disconnected) {
+ connector->encoder = NULL;
+ connector->status = status;
+ changed = true;
+ continue;
+ }
+ }
+ if (connector->funcs->dpms)
+ connector->funcs->dpms(connector, desired_mode);
+ }
+ drm_modeset_unlock_all(drm);
+
+ drm_helper_resume_force_mode(drm);
+
+ if (changed)
+ drm_kms_helper_hotplug_event(drm);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops rockchip_drm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
+ rockchip_drm_sys_resume)
+};
+
+/*
+ * @node: device tree node containing encoder input ports
+ * @encoder: drm_encoder
+ */
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder)
+{
+ struct device_node *ep = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct of_endpoint endpoint;
+ struct device_node *port;
+ int ret;
+
+ if (!node || !crtc)
+ return -EINVAL;
+
+ do {
+ ep = of_graph_get_next_endpoint(node, ep);
+ if (!ep)
+ break;
+
+ port = of_graph_get_remote_port(ep);
+ of_node_put(port);
+ if (port == crtc->port) {
+ ret = of_graph_parse_endpoint(ep, &endpoint);
+ return ret ?: endpoint.id;
+ }
+ } while (ep);
+
+ return -EINVAL;
+}
+
+static int compare_of(struct device *dev, void *data)
+{
+ struct device_node *np = data;
+
+ return dev->of_node == np;
+}
+
+static void rockchip_add_endpoints(struct device *dev,
+ struct component_match **match,
+ struct device_node *port)
+{
+ struct device_node *ep, *remote;
+
+ for_each_child_of_node(port, ep) {
+ remote = of_graph_get_remote_port_parent(ep);
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
+ continue;
+ } else if (!of_device_is_available(remote->parent)) {
+ dev_warn(dev, "parent device of %s is not available\n",
+ remote->full_name);
+ of_node_put(remote);
+ continue;
+ }
+
+ component_match_add(dev, match, compare_of, remote);
+ of_node_put(remote);
+ }
+}
+
+static int rockchip_drm_bind(struct device *dev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&rockchip_drm_driver, dev);
+ if (!drm)
+ return -ENOMEM;
+
+ ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_free;
+
+ dev_set_drvdata(dev, drm);
+
+ return 0;
+
+err_free:
+ drm_dev_unref(drm);
+ return ret;
+}
+
+static void rockchip_drm_unbind(struct device *dev)
+{
+ struct drm_device *drm = dev_get_drvdata(dev);
+
+ drm_dev_unregister(drm);
+ drm_dev_unref(drm);
+ dev_set_drvdata(dev, NULL);
+}
+
+static const struct component_master_ops rockchip_drm_ops = {
+ .bind = rockchip_drm_bind,
+ .unbind = rockchip_drm_unbind,
+};
+
+static int rockchip_drm_platform_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct component_match *match = NULL;
+ struct device_node *np = dev->of_node;
+ struct device_node *port;
+ int i;
+
+ if (!np)
+ return -ENODEV;
+ /*
+ * Bind the crtc ports first, so that
+ * drm_of_find_possible_crtcs called from encoder .bind callbacks
+ * works as expected.
+ */
+ for (i = 0;; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ if (!of_device_is_available(port->parent)) {
+ of_node_put(port);
+ continue;
+ }
+
+ component_match_add(dev, &match, compare_of, port->parent);
+ of_node_put(port);
+ }
+
+ if (i == 0) {
+ dev_err(dev, "missing 'ports' property\n");
+ return -ENODEV;
+ }
+
+ if (!match) {
+ dev_err(dev, "No available vop found for display-subsystem.\n");
+ return -ENODEV;
+ }
+ /*
+ * For each bound crtc, bind the encoders attached to its
+ * remote endpoint.
+ */
+ for (i = 0;; i++) {
+ port = of_parse_phandle(np, "ports", i);
+ if (!port)
+ break;
+
+ if (!of_device_is_available(port->parent)) {
+ of_node_put(port);
+ continue;
+ }
+
+ rockchip_add_endpoints(dev, &match, port);
+ of_node_put(port);
+ }
+
+ return component_master_add_with_match(dev, &rockchip_drm_ops, match);
+}
+
+static int rockchip_drm_platform_remove(struct platform_device *pdev)
+{
+ component_master_del(&pdev->dev, &rockchip_drm_ops);
+
+ return 0;
+}
+
+static const struct of_device_id rockchip_drm_dt_ids[] = {
+ { .compatible = "rockchip,display-subsystem", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
+
+static struct platform_driver rockchip_drm_platform_driver = {
+ .probe = rockchip_drm_platform_probe,
+ .remove = rockchip_drm_platform_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "rockchip-drm",
+ .of_match_table = rockchip_drm_dt_ids,
+ .pm = &rockchip_drm_pm_ops,
+ },
+};
+
+module_platform_driver(rockchip_drm_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
new file mode 100644
index 000000000000..dc4e5f03ac79
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * based on exynos_drm_drv.h
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_DRV_H
+#define _ROCKCHIP_DRM_DRV_H
+
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <linux/module.h>
+#include <linux/component.h>
+
+#define ROCKCHIP_MAX_FB_BUFFER 3
+#define ROCKCHIP_MAX_CONNECTOR 2
+#define ROCKCHIP_MAX_CRTC 2
+
+struct drm_device;
+struct drm_connector;
+
+/*
+ * Rockchip drm private crtc funcs.
+ * @enable_vblank: enable crtc vblank irq.
+ * @disable_vblank: disable crtc vblank irq.
+ */
+struct rockchip_crtc_funcs {
+ int (*enable_vblank)(struct drm_crtc *crtc);
+ void (*disable_vblank)(struct drm_crtc *crtc);
+};
+
+/*
+ * Rockchip drm private structure.
+ *
+ * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
+ * @num_pipe: number of pipes for this device.
+ */
+struct rockchip_drm_private {
+ struct drm_fb_helper fbdev_helper;
+ struct drm_gem_object *fbdev_bo;
+ const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
+};
+
+int rockchip_register_crtc_funcs(struct drm_device *dev,
+ const struct rockchip_crtc_funcs *crtc_funcs,
+ int pipe);
+void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe);
+int rockchip_drm_encoder_get_mux_id(struct device_node *node,
+ struct drm_encoder *encoder);
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
+ int out_mode);
+int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
+ struct device *dev);
+void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
+ struct device *dev);
+
+#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
new file mode 100644
index 000000000000..77d52893d40f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
+
+struct rockchip_drm_fb {
+ struct drm_framebuffer fb;
+ struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
+};
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane)
+{
+ struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
+
+ if (plane >= ROCKCHIP_MAX_FB_BUFFER)
+ return NULL;
+
+ return rk_fb->obj[plane];
+}
+EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
+
+static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+ struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+ struct drm_gem_object *obj;
+ int i;
+
+ for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
+ obj = rockchip_fb->obj[i];
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ drm_framebuffer_cleanup(fb);
+ kfree(rockchip_fb);
+}
+
+static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
+
+ return drm_gem_handle_create(file_priv,
+ rockchip_fb->obj[0], handle);
+}
+
+static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
+ .destroy = rockchip_drm_fb_destroy,
+ .create_handle = rockchip_drm_fb_create_handle,
+};
+
+static struct rockchip_drm_fb *
+rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object **obj, unsigned int num_planes)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+ int ret;
+ int i;
+
+ rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
+ if (!rockchip_fb)
+ return ERR_PTR(-ENOMEM);
+
+ drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
+
+ for (i = 0; i < num_planes; i++)
+ rockchip_fb->obj[i] = obj[i];
+
+ ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
+ &rockchip_drm_fb_funcs);
+ if (ret) {
+ dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
+ ret);
+ kfree(rockchip_fb);
+ return ERR_PTR(ret);
+ }
+
+ return rockchip_fb;
+}
+
+static struct drm_framebuffer *
+rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+ struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
+ struct drm_gem_object *obj;
+ unsigned int hsub;
+ unsigned int vsub;
+ int num_planes;
+ int ret;
+ int i;
+
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+ num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
+ ROCKCHIP_MAX_FB_BUFFER);
+
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ dev_err(dev->dev, "Failed to lookup GEM object\n");
+ ret = -ENXIO;
+ goto err_gem_object_unreference;
+ }
+
+ min_size = (height - 1) * mode_cmd->pitches[i] +
+ mode_cmd->offsets[i] +
+ width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+ if (obj->size < min_size) {
+ drm_gem_object_unreference_unlocked(obj);
+ ret = -EINVAL;
+ goto err_gem_object_unreference;
+ }
+ objs[i] = obj;
+ }
+
+ rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
+ if (IS_ERR(rockchip_fb)) {
+ ret = PTR_ERR(rockchip_fb);
+ goto err_gem_object_unreference;
+ }
+
+ return &rockchip_fb->fb;
+
+err_gem_object_unreference:
+ for (i--; i >= 0; i--)
+ drm_gem_object_unreference_unlocked(objs[i]);
+ return ERR_PTR(ret);
+}
+
+static void rockchip_drm_output_poll_changed(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *fb_helper = &private->fbdev_helper;
+
+ drm_fb_helper_hotplug_event(fb_helper);
+}
+
+static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
+ .fb_create = rockchip_user_fb_create,
+ .output_poll_changed = rockchip_drm_output_poll_changed,
+};
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ struct rockchip_drm_fb *rockchip_fb;
+
+ rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
+ if (IS_ERR(rockchip_fb))
+ return NULL;
+
+ return &rockchip_fb->fb;
+}
+
+void rockchip_drm_mode_config_init(struct drm_device *dev)
+{
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * set max width and height as default value(4096x4096).
+ * this value would be used to check framebuffer size limitation
+ * at drm_mode_addfb().
+ */
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+
+ dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
new file mode 100644
index 000000000000..09574d48226f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_FB_H
+#define _ROCKCHIP_DRM_FB_H
+
+struct drm_framebuffer *
+rockchip_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
+
+void rockchip_drm_mode_config_init(struct drm_device *dev);
+
+struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
+ unsigned int plane);
+#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
new file mode 100644
index 000000000000..a5d889a8716b
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+
+#define PREFERRED_BPP 32
+#define to_drm_private(x) \
+ container_of(x, struct rockchip_drm_private, fbdev_helper)
+
+static int rockchip_fbdev_mmap(struct fb_info *info,
+ struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct rockchip_drm_private *private = to_drm_private(helper);
+
+ return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
+}
+
+static struct fb_ops rockchip_drm_fbdev_ops = {
+ .owner = THIS_MODULE,
+ .fb_mmap = rockchip_fbdev_mmap,
+ .fb_fillrect = cfb_fillrect,
+ .fb_copyarea = cfb_copyarea,
+ .fb_imageblit = cfb_imageblit,
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct rockchip_drm_private *private = to_drm_private(helper);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_device *dev = helper->dev;
+ struct rockchip_gem_object *rk_obj;
+ struct drm_framebuffer *fb;
+ unsigned int bytes_per_pixel;
+ unsigned long offset;
+ struct fb_info *fbi;
+ size_t size;
+ int ret;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+
+ rk_obj = rockchip_gem_create_object(dev, size);
+ if (IS_ERR(rk_obj))
+ return -ENOMEM;
+
+ private->fbdev_bo = &rk_obj->base;
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
+ ret = -ENOMEM;
+ goto err_rockchip_gem_free_object;
+ }
+
+ helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
+ private->fbdev_bo);
+ if (IS_ERR(helper->fb)) {
+ dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
+ ret = PTR_ERR(helper->fb);
+ goto err_framebuffer_release;
+ }
+
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_FLAG_DEFAULT;
+ fbi->fbops = &rockchip_drm_fbdev_ops;
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ dev_err(dev->dev, "Failed to allocate color map.\n");
+ goto err_drm_framebuffer_unref;
+ }
+
+ fb = helper->fb;
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
+
+ offset = fbi->var.xoffset * bytes_per_pixel;
+ offset += fbi->var.yoffset * fb->pitches[0];
+
+ dev->mode_config.fb_base = 0;
+ fbi->screen_base = rk_obj->kvaddr + offset;
+ fbi->screen_size = rk_obj->base.size;
+ fbi->fix.smem_len = rk_obj->base.size;
+
+ DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
+ fb->width, fb->height, fb->depth, rk_obj->kvaddr,
+ offset, size);
+ return 0;
+
+err_drm_framebuffer_unref:
+ drm_framebuffer_unreference(helper->fb);
+err_framebuffer_release:
+ framebuffer_release(fbi);
+err_rockchip_gem_free_object:
+ rockchip_gem_free_object(&rk_obj->base);
+ return ret;
+}
+
+static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
+ .fb_probe = rockchip_drm_fbdev_create,
+};
+
+int rockchip_drm_fbdev_init(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+ unsigned int num_crtc;
+ int ret;
+
+ if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ return -EINVAL;
+
+ num_crtc = dev->mode_config.num_crtc;
+
+ helper = &private->fbdev_helper;
+
+ drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
+
+ ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
+ ret);
+ return ret;
+ }
+
+ ret = drm_fb_helper_single_add_all_connectors(helper);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
+ goto err_drm_fb_helper_fini;
+ }
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+ if (ret < 0) {
+ dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
+ ret);
+ goto err_drm_fb_helper_fini;
+ }
+
+ return 0;
+
+err_drm_fb_helper_fini:
+ drm_fb_helper_fini(helper);
+ return ret;
+}
+
+void rockchip_drm_fbdev_fini(struct drm_device *dev)
+{
+ struct rockchip_drm_private *private = dev->dev_private;
+ struct drm_fb_helper *helper;
+
+ helper = &private->fbdev_helper;
+
+ if (helper->fbdev) {
+ struct fb_info *info;
+ int ret;
+
+ info = helper->fbdev;
+ ret = unregister_framebuffer(info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
+ ret);
+
+ if (info->cmap.len)
+ fb_dealloc_cmap(&info->cmap);
+
+ framebuffer_release(info);
+ }
+
+ if (helper->fb)
+ drm_framebuffer_unreference(helper->fb);
+
+ drm_fb_helper_fini(helper);
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
new file mode 100644
index 000000000000..50432e9b5b37
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_FBDEV_H
+#define _ROCKCHIP_DRM_FBDEV_H
+
+int rockchip_drm_fbdev_init(struct drm_device *dev);
+void rockchip_drm_fbdev_fini(struct drm_device *dev);
+
+#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
new file mode 100644
index 000000000000..bc98a227dc76
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_vma_manager.h>
+
+#include <linux/dma-attrs.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+
+static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+
+ init_dma_attrs(&rk_obj->dma_attrs);
+ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+
+ /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
+ rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
+ &rk_obj->dma_addr, GFP_KERNEL,
+ &rk_obj->dma_attrs);
+ if (IS_ERR(rk_obj->kvaddr)) {
+ int ret = PTR_ERR(rk_obj->kvaddr);
+
+ DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
+ obj->size, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
+{
+ struct drm_gem_object *obj = &rk_obj->base;
+ struct drm_device *drm = obj->dev;
+
+ dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
+ &rk_obj->dma_attrs);
+}
+
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ unsigned long vm_size;
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+ vm_size = vma->vm_end - vma->vm_start;
+
+ if (vm_size > obj->size)
+ return -EINVAL;
+
+ return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
+ obj->size, &rk_obj->dma_attrs);
+}
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->minor->dev;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
+ int ret;
+
+ if (drm_device_is_unplugged(dev))
+ return -ENODEV;
+
+ mutex_lock(&dev->struct_mutex);
+
+ node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
+ vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
+ mutex_unlock(&dev->struct_mutex);
+ DRM_ERROR("failed to find vma node.\n");
+ return -EINVAL;
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
+ }
+
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = rockchip_gem_mmap_buf(obj, vma);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+struct rockchip_gem_object *
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
+{
+ struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+
+ rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
+ if (!rk_obj)
+ return ERR_PTR(-ENOMEM);
+
+ obj = &rk_obj->base;
+
+ drm_gem_private_object_init(drm, obj, size);
+
+ ret = rockchip_gem_alloc_buf(rk_obj);
+ if (ret)
+ goto err_free_rk_obj;
+
+ return rk_obj;
+
+err_free_rk_obj:
+ kfree(rk_obj);
+ return ERR_PTR(ret);
+}
+
+/*
+ * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
+ * function
+ */
+void rockchip_gem_free_object(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj;
+
+ drm_gem_free_mmap_offset(obj);
+
+ rk_obj = to_rockchip_obj(obj);
+
+ rockchip_gem_free_buf(rk_obj);
+
+ kfree(rk_obj);
+}
+
+/*
+ * rockchip_gem_create_with_handle - allocate an object with the given
+ * size and create a gem handle on it
+ *
+ * returns a struct rockchip_gem_object* on success or ERR_PTR values
+ * on failure.
+ */
+static struct rockchip_gem_object *
+rockchip_gem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *drm, unsigned int size,
+ unsigned int *handle)
+{
+ struct rockchip_gem_object *rk_obj;
+ struct drm_gem_object *obj;
+ int ret;
+
+ rk_obj = rockchip_gem_create_object(drm, size);
+ if (IS_ERR(rk_obj))
+ return ERR_CAST(rk_obj);
+
+ obj = &rk_obj->base;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ goto err_handle_create;
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return rk_obj;
+
+err_handle_create:
+ rockchip_gem_free_object(obj);
+
+ return ERR_PTR(ret);
+}
+
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object.\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+ DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
+
+out:
+ drm_gem_object_unreference(obj);
+unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+/*
+ * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
+ * function
+ *
+ * This aligns the pitch and size arguments to the minimum required. wrap
+ * this into your own function if you need bigger alignment.
+ */
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct rockchip_gem_object *rk_obj;
+ int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+
+ /*
+ * align to 64 bytes since Mali requires it.
+ */
+ min_pitch = ALIGN(min_pitch, 64);
+
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+
+ rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
+ &args->handle);
+
+ return PTR_ERR_OR_ZERO(rk_obj);
+}
+
+/*
+ * Allocate a sg_table for this GEM object.
+ * Note: Both the table's contents, and the sg_table itself must be freed by
+ * the caller.
+ * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
+ */
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+ struct drm_device *drm = obj->dev;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
+ rk_obj->dma_addr, obj->size,
+ &rk_obj->dma_attrs);
+ if (ret) {
+ DRM_ERROR("failed to allocate sgt, %d\n", ret);
+ kfree(sgt);
+ return ERR_PTR(ret);
+ }
+
+ return sgt;
+}
+
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
+
+ return rk_obj->kvaddr;
+}
+
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ /* Nothing to do */
+}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
new file mode 100644
index 000000000000..67bcebe90003
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_GEM_H
+#define _ROCKCHIP_DRM_GEM_H
+
+#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
+
+struct rockchip_gem_object {
+ struct drm_gem_object base;
+ unsigned int flags;
+
+ void *kvaddr;
+ dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
+};
+
+struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *
+rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
+ struct sg_table *sgt);
+void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
+void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+
+/* drm driver mmap file operations */
+int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/* mmap a gem object to userspace. */
+int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
+ struct vm_area_struct *vma);
+
+struct rockchip_gem_object *
+ rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
+
+void rockchip_gem_free_object(struct drm_gem_object *obj);
+
+int rockchip_gem_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
new file mode 100644
index 000000000000..e7ca25b3fb38
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -0,0 +1,1455 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_plane_helper.h>
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/component.h>
+
+#include <linux/reset.h>
+#include <linux/delay.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_gem.h"
+#include "rockchip_drm_fb.h"
+#include "rockchip_drm_vop.h"
+
+#define VOP_REG(off, _mask, s) \
+ {.offset = off, \
+ .mask = _mask, \
+ .shift = s,}
+
+#define __REG_SET_RELAXED(x, off, mask, shift, v) \
+ vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
+#define __REG_SET_NORMAL(x, off, mask, shift, v) \
+ vop_mask_write(x, off, (mask) << shift, (v) << shift)
+
+#define REG_SET(x, base, reg, v, mode) \
+ __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
+
+#define VOP_WIN_SET(x, win, name, v) \
+ REG_SET(x, win->base, win->phy->name, v, RELAXED)
+#define VOP_CTRL_SET(x, name, v) \
+ REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
+
+#define VOP_WIN_GET(x, win, name) \
+ vop_read_reg(x, win->base, &win->phy->name)
+
+#define VOP_WIN_GET_YRGBADDR(vop, win) \
+ vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
+
+#define to_vop(x) container_of(x, struct vop, crtc)
+#define to_vop_win(x) container_of(x, struct vop_win, base)
+
+struct vop_win_state {
+ struct list_head head;
+ struct drm_framebuffer *fb;
+ dma_addr_t yrgb_mst;
+ struct drm_pending_vblank_event *event;
+};
+
+struct vop_win {
+ struct drm_plane base;
+ const struct vop_win_data *data;
+ struct vop *vop;
+
+ struct list_head pending;
+ struct vop_win_state *active;
+};
+
+struct vop {
+ struct drm_crtc crtc;
+ struct device *dev;
+ struct drm_device *drm_dev;
+ unsigned int dpms;
+
+ int connector_type;
+ int connector_out_mode;
+
+ /* mutex vsync_ work */
+ struct mutex vsync_mutex;
+ bool vsync_work_pending;
+
+ const struct vop_data *data;
+
+ uint32_t *regsbak;
+ void __iomem *regs;
+
+ /* physical map length of vop register */
+ uint32_t len;
+
+ /* one time only one process allowed to config the register */
+ spinlock_t reg_lock;
+ /* lock vop irq reg */
+ spinlock_t irq_lock;
+
+ unsigned int irq;
+
+ /* vop AHP clk */
+ struct clk *hclk;
+ /* vop dclk */
+ struct clk *dclk;
+ /* vop share memory frequency */
+ struct clk *aclk;
+
+ /* vop dclk reset */
+ struct reset_control *dclk_rst;
+
+ int pipe;
+
+ struct vop_win win[];
+};
+
+enum vop_data_format {
+ VOP_FMT_ARGB8888 = 0,
+ VOP_FMT_RGB888,
+ VOP_FMT_RGB565,
+ VOP_FMT_YUV420SP = 4,
+ VOP_FMT_YUV422SP,
+ VOP_FMT_YUV444SP,
+};
+
+struct vop_reg_data {
+ uint32_t offset;
+ uint32_t value;
+};
+
+struct vop_reg {
+ uint32_t offset;
+ uint32_t shift;
+ uint32_t mask;
+};
+
+struct vop_ctrl {
+ struct vop_reg standby;
+ struct vop_reg data_blank;
+ struct vop_reg gate_en;
+ struct vop_reg mmu_en;
+ struct vop_reg rgb_en;
+ struct vop_reg edp_en;
+ struct vop_reg hdmi_en;
+ struct vop_reg mipi_en;
+ struct vop_reg out_mode;
+ struct vop_reg dither_down;
+ struct vop_reg dither_up;
+ struct vop_reg pin_pol;
+
+ struct vop_reg htotal_pw;
+ struct vop_reg hact_st_end;
+ struct vop_reg vtotal_pw;
+ struct vop_reg vact_st_end;
+ struct vop_reg hpost_st_end;
+ struct vop_reg vpost_st_end;
+};
+
+struct vop_win_phy {
+ const uint32_t *data_formats;
+ uint32_t nformats;
+
+ struct vop_reg enable;
+ struct vop_reg format;
+ struct vop_reg act_info;
+ struct vop_reg dsp_info;
+ struct vop_reg dsp_st;
+ struct vop_reg yrgb_mst;
+ struct vop_reg uv_mst;
+ struct vop_reg yrgb_vir;
+ struct vop_reg uv_vir;
+
+ struct vop_reg dst_alpha_ctl;
+ struct vop_reg src_alpha_ctl;
+};
+
+struct vop_win_data {
+ uint32_t base;
+ const struct vop_win_phy *phy;
+ enum drm_plane_type type;
+};
+
+struct vop_data {
+ const struct vop_reg_data *init_table;
+ unsigned int table_size;
+ const struct vop_ctrl *ctrl;
+ const struct vop_win_data *win;
+ unsigned int win_size;
+};
+
+static const uint32_t formats_01[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV24,
+};
+
+static const uint32_t formats_234[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGB565,
+};
+
+static const struct vop_win_phy win01_data = {
+ .data_formats = formats_01,
+ .nformats = ARRAY_SIZE(formats_01),
+ .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
+ .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
+ .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
+ .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
+ .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy win23_data = {
+ .data_formats = formats_234,
+ .nformats = ARRAY_SIZE(formats_234),
+ .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
+ .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
+ .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
+ .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
+ .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
+};
+
+static const struct vop_win_phy cursor_data = {
+ .data_formats = formats_234,
+ .nformats = ARRAY_SIZE(formats_234),
+ .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
+ .format = VOP_REG(HWC_CTRL0, 0x7, 1),
+ .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
+};
+
+static const struct vop_ctrl ctrl_data = {
+ .standby = VOP_REG(SYS_CTRL, 0x1, 22),
+ .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
+ .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
+ .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
+ .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
+ .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
+ .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
+ .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
+ .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
+ .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
+ .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
+ .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
+ .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
+ .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
+ .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
+ .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
+ .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
+ .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
+};
+
+static const struct vop_reg_data vop_init_reg_table[] = {
+ {SYS_CTRL, 0x00c00000},
+ {DSP_CTRL0, 0x00000000},
+ {WIN0_CTRL0, 0x00000080},
+ {WIN1_CTRL0, 0x00000080},
+};
+
+/*
+ * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
+ * special support to get alpha blending working. For now, just use overlay
+ * window 1 for the drm cursor.
+ */
+static const struct vop_win_data rk3288_vop_win_data[] = {
+ { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
+ { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
+};
+
+static const struct vop_data rk3288_vop = {
+ .init_table = vop_init_reg_table,
+ .table_size = ARRAY_SIZE(vop_init_reg_table),
+ .ctrl = &ctrl_data,
+ .win = rk3288_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3288_vop_win_data),
+};
+
+static const struct of_device_id vop_driver_dt_match[] = {
+ { .compatible = "rockchip,rk3288-vop",
+ .data = &rk3288_vop },
+ {},
+};
+
+static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
+{
+ writel(v, vop->regs + offset);
+ vop->regsbak[offset >> 2] = v;
+}
+
+static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
+{
+ return readl(vop->regs + offset);
+}
+
+static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
+ const struct vop_reg *reg)
+{
+ return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
+}
+
+static inline void vop_cfg_done(struct vop *vop)
+{
+ writel(0x01, vop->regs + REG_CFG_DONE);
+}
+
+static inline void vop_mask_write(struct vop *vop, uint32_t offset,
+ uint32_t mask, uint32_t v)
+{
+ if (mask) {
+ uint32_t cached_val = vop->regsbak[offset >> 2];
+
+ cached_val = (cached_val & ~mask) | v;
+ writel(cached_val, vop->regs + offset);
+ vop->regsbak[offset >> 2] = cached_val;
+ }
+}
+
+static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
+ uint32_t mask, uint32_t v)
+{
+ if (mask) {
+ uint32_t cached_val = vop->regsbak[offset >> 2];
+
+ cached_val = (cached_val & ~mask) | v;
+ writel_relaxed(cached_val, vop->regs + offset);
+ vop->regsbak[offset >> 2] = cached_val;
+ }
+}
+
+static enum vop_data_format vop_convert_format(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return VOP_FMT_ARGB8888;
+ case DRM_FORMAT_RGB888:
+ return VOP_FMT_RGB888;
+ case DRM_FORMAT_RGB565:
+ return VOP_FMT_RGB565;
+ case DRM_FORMAT_NV12:
+ return VOP_FMT_YUV420SP;
+ case DRM_FORMAT_NV16:
+ return VOP_FMT_YUV422SP;
+ case DRM_FORMAT_NV24:
+ return VOP_FMT_YUV444SP;
+ default:
+ DRM_ERROR("unsupport format[%08x]\n", format);
+ return -EINVAL;
+ }
+}
+
+static bool is_alpha_support(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB8888:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void vop_enable(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ int ret;
+
+ ret = clk_enable(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
+ return;
+ }
+
+ ret = clk_enable(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ goto err_disable_hclk;
+ }
+
+ ret = clk_enable(vop->aclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
+ goto err_disable_dclk;
+ }
+
+ /*
+ * Slave iommu shares power, irq and clock with vop. It was associated
+ * automatically with this master device via common driver code.
+ * Now that we have enabled the clock we attach it to the shared drm
+ * mapping.
+ */
+ ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
+ if (ret) {
+ dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
+ goto err_disable_aclk;
+ }
+
+ spin_lock(&vop->reg_lock);
+
+ VOP_CTRL_SET(vop, standby, 0);
+
+ spin_unlock(&vop->reg_lock);
+
+ enable_irq(vop->irq);
+
+ drm_vblank_on(vop->drm_dev, vop->pipe);
+
+ return;
+
+err_disable_aclk:
+ clk_disable(vop->aclk);
+err_disable_dclk:
+ clk_disable(vop->dclk);
+err_disable_hclk:
+ clk_disable(vop->hclk);
+}
+
+static void vop_disable(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+
+ drm_vblank_off(crtc->dev, vop->pipe);
+
+ disable_irq(vop->irq);
+
+ /*
+ * TODO: Since standby doesn't take effect until the next vblank,
+ * when we turn off dclk below, the vop is probably still active.
+ */
+ spin_lock(&vop->reg_lock);
+
+ VOP_CTRL_SET(vop, standby, 1);
+
+ spin_unlock(&vop->reg_lock);
+ /*
+ * disable dclk to stop frame scan, so we can safely detach iommu,
+ */
+ clk_disable(vop->dclk);
+
+ rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
+
+ clk_disable(vop->aclk);
+ clk_disable(vop->hclk);
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
+{
+ struct vop_win_state *last;
+ struct vop_win_state *active = vop_win->active;
+
+ if (list_empty(&vop_win->pending))
+ return active ? active->fb : NULL;
+
+ last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
+ return last ? last->fb : NULL;
+}
+
+/*
+ * Caller must hold vsync_mutex.
+ */
+static int vop_win_queue_fb(struct vop_win *vop_win,
+ struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
+ struct drm_pending_vblank_event *event)
+{
+ struct vop_win_state *state;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->fb = fb;
+ state->yrgb_mst = yrgb_mst;
+ state->event = event;
+
+ list_add_tail(&state->head, &vop_win->pending);
+
+ return 0;
+}
+
+static int vop_update_plane_event(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x,
+ int crtc_y, unsigned int crtc_w,
+ unsigned int crtc_h, uint32_t src_x,
+ uint32_t src_y, uint32_t src_w,
+ uint32_t src_h,
+ struct drm_pending_vblank_event *event)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ struct vop *vop = to_vop(crtc);
+ struct drm_gem_object *obj;
+ struct rockchip_gem_object *rk_obj;
+ unsigned long offset;
+ unsigned int actual_w;
+ unsigned int actual_h;
+ unsigned int dsp_stx;
+ unsigned int dsp_sty;
+ unsigned int y_vir_stride;
+ dma_addr_t yrgb_mst;
+ enum vop_data_format format;
+ uint32_t val;
+ bool is_alpha;
+ bool visible;
+ int ret;
+ struct drm_rect dest = {
+ .x1 = crtc_x,
+ .y1 = crtc_y,
+ .x2 = crtc_x + crtc_w,
+ .y2 = crtc_y + crtc_h,
+ };
+ struct drm_rect src = {
+ /* 16.16 fixed point */
+ .x1 = src_x,
+ .y1 = src_y,
+ .x2 = src_x + src_w,
+ .y2 = src_y + src_h,
+ };
+ const struct drm_rect clip = {
+ .x2 = crtc->mode.hdisplay,
+ .y2 = crtc->mode.vdisplay,
+ };
+ bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
+
+ ret = drm_plane_helper_check_update(plane, crtc, fb,
+ &src, &dest, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, false, &visible);
+ if (ret)
+ return ret;
+
+ if (!visible)
+ return 0;
+
+ is_alpha = is_alpha_support(fb->pixel_format);
+ format = vop_convert_format(fb->pixel_format);
+ if (format < 0)
+ return format;
+
+ obj = rockchip_fb_get_gem_obj(fb, 0);
+ if (!obj) {
+ DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
+ return -EINVAL;
+ }
+
+ rk_obj = to_rockchip_obj(obj);
+
+ actual_w = (src.x2 - src.x1) >> 16;
+ actual_h = (src.y2 - src.y1) >> 16;
+ crtc_x = max(0, crtc_x);
+ crtc_y = max(0, crtc_y);
+
+ dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
+ dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
+
+ offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
+ offset += (src.y1 >> 16) * fb->pitches[0];
+ yrgb_mst = rk_obj->dma_addr + offset;
+
+ y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
+
+ /*
+ * If this plane update changes the plane's framebuffer, (or more
+ * precisely, if this update has a different framebuffer than the last
+ * update), enqueue it so we can track when it completes.
+ *
+ * Only when we discover that this update has completed, can we
+ * unreference any previous framebuffers.
+ */
+ mutex_lock(&vop->vsync_mutex);
+ if (fb != vop_win_last_pending_fb(vop_win)) {
+ ret = drm_vblank_get(plane->dev, vop->pipe);
+ if (ret) {
+ DRM_ERROR("failed to get vblank, %d\n", ret);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ drm_framebuffer_reference(fb);
+
+ ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
+ if (ret) {
+ drm_vblank_put(plane->dev, vop->pipe);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ vop->vsync_work_pending = true;
+ }
+ mutex_unlock(&vop->vsync_mutex);
+
+ spin_lock(&vop->reg_lock);
+
+ VOP_WIN_SET(vop, win, format, format);
+ VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
+ VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
+ val = (actual_h - 1) << 16;
+ val |= (actual_w - 1) & 0xffff;
+ VOP_WIN_SET(vop, win, act_info, val);
+ VOP_WIN_SET(vop, win, dsp_info, val);
+ val = (dsp_sty - 1) << 16;
+ val |= (dsp_stx - 1) & 0xffff;
+ VOP_WIN_SET(vop, win, dsp_st, val);
+
+ if (is_alpha) {
+ VOP_WIN_SET(vop, win, dst_alpha_ctl,
+ DST_FACTOR_M0(ALPHA_SRC_INVERSE));
+ val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
+ SRC_ALPHA_M0(ALPHA_STRAIGHT) |
+ SRC_BLEND_M0(ALPHA_PER_PIX) |
+ SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
+ SRC_FACTOR_M0(ALPHA_ONE);
+ VOP_WIN_SET(vop, win, src_alpha_ctl, val);
+ } else {
+ VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
+ }
+
+ VOP_WIN_SET(vop, win, enable, 1);
+
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ return 0;
+}
+
+static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y, uint32_t src_w,
+ uint32_t src_h)
+{
+ return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
+ crtc_h, src_x, src_y, src_w, src_h,
+ NULL);
+}
+
+static int vop_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *event)
+{
+ unsigned int crtc_w, crtc_h;
+
+ crtc_w = crtc->primary->fb->width - crtc->x;
+ crtc_h = crtc->primary->fb->height - crtc->y;
+
+ return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
+ 0, 0, crtc_w, crtc_h, crtc->x << 16,
+ crtc->y << 16, crtc_w << 16,
+ crtc_h << 16, event);
+}
+
+static int vop_disable_plane(struct drm_plane *plane)
+{
+ struct vop_win *vop_win = to_vop_win(plane);
+ const struct vop_win_data *win = vop_win->data;
+ struct vop *vop;
+ int ret;
+
+ if (!plane->crtc)
+ return 0;
+
+ vop = to_vop(plane->crtc);
+
+ ret = drm_vblank_get(plane->dev, vop->pipe);
+ if (ret) {
+ DRM_ERROR("failed to get vblank, %d\n", ret);
+ return ret;
+ }
+
+ mutex_lock(&vop->vsync_mutex);
+
+ ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
+ if (ret) {
+ drm_vblank_put(plane->dev, vop->pipe);
+ mutex_unlock(&vop->vsync_mutex);
+ return ret;
+ }
+
+ vop->vsync_work_pending = true;
+ mutex_unlock(&vop->vsync_mutex);
+
+ spin_lock(&vop->reg_lock);
+ VOP_WIN_SET(vop, win, enable, 0);
+ vop_cfg_done(vop);
+ spin_unlock(&vop->reg_lock);
+
+ return 0;
+}
+
+static void vop_plane_destroy(struct drm_plane *plane)
+{
+ vop_disable_plane(plane);
+ drm_plane_cleanup(plane);
+}
+
+static const struct drm_plane_funcs vop_plane_funcs = {
+ .update_plane = vop_update_plane,
+ .disable_plane = vop_disable_plane,
+ .destroy = vop_plane_destroy,
+};
+
+int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
+ int connector_type,
+ int out_mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ vop->connector_type = connector_type;
+ vop->connector_out_mode = out_mode;
+
+ return 0;
+}
+
+static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long flags;
+
+ if (vop->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
+ spin_lock_irqsave(&vop->irq_lock, flags);
+
+ vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
+
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ return 0;
+}
+
+static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct vop *vop = to_vop(crtc);
+ unsigned long flags;
+
+ if (vop->dpms != DRM_MODE_DPMS_ON)
+ return;
+ spin_lock_irqsave(&vop->irq_lock, flags);
+ vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+}
+
+static const struct rockchip_crtc_funcs private_crtc_funcs = {
+ .enable_vblank = vop_crtc_enable_vblank,
+ .disable_vblank = vop_crtc_disable_vblank,
+};
+
+static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct vop *vop = to_vop(crtc);
+
+ DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+
+ if (vop->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ vop_enable(crtc);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ vop_disable(crtc);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ vop->dpms = mode;
+}
+
+static void vop_crtc_prepare(struct drm_crtc *crtc)
+{
+ vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
+ return false;
+
+ return true;
+}
+
+static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ int ret;
+
+ crtc->x = x;
+ crtc->y = y;
+
+ ret = vop_update_primary_plane(crtc, NULL);
+ if (ret < 0) {
+ DRM_ERROR("fail to update plane\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vop_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y, struct drm_framebuffer *fb)
+{
+ struct vop *vop = to_vop(crtc);
+ u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
+ u16 hdisplay = adjusted_mode->hdisplay;
+ u16 htotal = adjusted_mode->htotal;
+ u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
+ u16 hact_end = hact_st + hdisplay;
+ u16 vdisplay = adjusted_mode->vdisplay;
+ u16 vtotal = adjusted_mode->vtotal;
+ u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
+ u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
+ u16 vact_end = vact_st + vdisplay;
+ int ret;
+ uint32_t val;
+
+ /*
+ * disable dclk to stop frame scan, so that we can safe config mode and
+ * enable iommu.
+ */
+ clk_disable(vop->dclk);
+
+ switch (vop->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ VOP_CTRL_SET(vop, rgb_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_eDP:
+ VOP_CTRL_SET(vop, edp_en, 1);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ VOP_CTRL_SET(vop, hdmi_en, 1);
+ break;
+ default:
+ DRM_ERROR("unsupport connector_type[%d]\n",
+ vop->connector_type);
+ return -EINVAL;
+ };
+ VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
+
+ val = 0x8;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
+ VOP_CTRL_SET(vop, pin_pol, val);
+
+ VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
+ val = hact_st << 16;
+ val |= hact_end;
+ VOP_CTRL_SET(vop, hact_st_end, val);
+ VOP_CTRL_SET(vop, hpost_st_end, val);
+
+ VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
+ val = vact_st << 16;
+ val |= vact_end;
+ VOP_CTRL_SET(vop, vact_st_end, val);
+ VOP_CTRL_SET(vop, vpost_st_end, val);
+
+ ret = vop_crtc_mode_set_base(crtc, x, y, fb);
+ if (ret)
+ return ret;
+
+ /*
+ * reset dclk, take all mode config affect, so the clk would run in
+ * correct frame.
+ */
+ reset_control_assert(vop->dclk_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(vop->dclk_rst);
+
+ clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
+ ret = clk_enable(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vop_crtc_commit(struct drm_crtc *crtc)
+{
+}
+
+static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
+ .dpms = vop_crtc_dpms,
+ .prepare = vop_crtc_prepare,
+ .mode_fixup = vop_crtc_mode_fixup,
+ .mode_set = vop_crtc_mode_set,
+ .mode_set_base = vop_crtc_mode_set_base,
+ .commit = vop_crtc_commit,
+};
+
+static int vop_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct vop *vop = to_vop(crtc);
+ struct drm_framebuffer *old_fb = crtc->primary->fb;
+ int ret;
+
+ /* when the page flip is requested, crtc's dpms should be on */
+ if (vop->dpms > DRM_MODE_DPMS_ON) {
+ DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
+ return 0;
+ }
+
+ crtc->primary->fb = fb;
+
+ ret = vop_update_primary_plane(crtc, event);
+ if (ret)
+ crtc->primary->fb = old_fb;
+
+ return ret;
+}
+
+static void vop_win_state_complete(struct vop_win *vop_win,
+ struct vop_win_state *state)
+{
+ struct vop *vop = vop_win->vop;
+ struct drm_crtc *crtc = &vop->crtc;
+ struct drm_device *drm = crtc->dev;
+ unsigned long flags;
+
+ if (state->event) {
+ spin_lock_irqsave(&drm->event_lock, flags);
+ drm_send_vblank_event(drm, -1, state->event);
+ spin_unlock_irqrestore(&drm->event_lock, flags);
+ }
+
+ list_del(&state->head);
+ drm_vblank_put(crtc->dev, vop->pipe);
+}
+
+static void vop_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+}
+
+static const struct drm_crtc_funcs vop_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .page_flip = vop_crtc_page_flip,
+ .destroy = vop_crtc_destroy,
+};
+
+static bool vop_win_state_is_active(struct vop_win *vop_win,
+ struct vop_win_state *state)
+{
+ bool active = false;
+
+ if (state->fb) {
+ dma_addr_t yrgb_mst;
+
+ /* check yrgb_mst to tell if pending_fb is now front */
+ yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
+
+ active = (yrgb_mst == state->yrgb_mst);
+ } else {
+ bool enabled;
+
+ /* if enable bit is clear, plane is now disabled */
+ enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
+
+ active = (enabled == 0);
+ }
+
+ return active;
+}
+
+static void vop_win_state_destroy(struct vop_win_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+
+ if (fb)
+ drm_framebuffer_unreference(fb);
+
+ kfree(state);
+}
+
+static void vop_win_update_state(struct vop_win *vop_win)
+{
+ struct vop_win_state *state, *n, *new_active = NULL;
+
+ /* Check if any pending states are now active */
+ list_for_each_entry(state, &vop_win->pending, head)
+ if (vop_win_state_is_active(vop_win, state)) {
+ new_active = state;
+ break;
+ }
+
+ if (!new_active)
+ return;
+
+ /*
+ * Destroy any 'skipped' pending states - states that were queued
+ * before the newly active state.
+ */
+ list_for_each_entry_safe(state, n, &vop_win->pending, head) {
+ if (state == new_active)
+ break;
+ vop_win_state_complete(vop_win, state);
+ vop_win_state_destroy(state);
+ }
+
+ vop_win_state_complete(vop_win, new_active);
+
+ if (vop_win->active)
+ vop_win_state_destroy(vop_win->active);
+ vop_win->active = new_active;
+}
+
+static bool vop_win_has_pending_state(struct vop_win *vop_win)
+{
+ return !list_empty(&vop_win->pending);
+}
+
+static irqreturn_t vop_isr_thread(int irq, void *data)
+{
+ struct vop *vop = data;
+ const struct vop_data *vop_data = vop->data;
+ unsigned int i;
+
+ mutex_lock(&vop->vsync_mutex);
+
+ if (!vop->vsync_work_pending)
+ goto done;
+
+ vop->vsync_work_pending = false;
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+
+ vop_win_update_state(vop_win);
+ if (vop_win_has_pending_state(vop_win))
+ vop->vsync_work_pending = true;
+ }
+
+done:
+ mutex_unlock(&vop->vsync_mutex);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t vop_isr(int irq, void *data)
+{
+ struct vop *vop = data;
+ uint32_t intr0_reg, active_irqs;
+ unsigned long flags;
+
+ /*
+ * INTR_CTRL0 register has interrupt status, enable and clear bits, we
+ * must hold irq_lock to avoid a race with enable/disable_vblank().
+ */
+ spin_lock_irqsave(&vop->irq_lock, flags);
+ intr0_reg = vop_readl(vop, INTR_CTRL0);
+ active_irqs = intr0_reg & INTR_MASK;
+ /* Clear all active interrupt sources */
+ if (active_irqs)
+ vop_writel(vop, INTR_CTRL0,
+ intr0_reg | (active_irqs << INTR_CLR_SHIFT));
+ spin_unlock_irqrestore(&vop->irq_lock, flags);
+
+ /* This is expected for vop iommu irqs, since the irq is shared */
+ if (!active_irqs)
+ return IRQ_NONE;
+
+ /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
+ if (!(active_irqs & FS_INTR)) {
+ DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
+ return IRQ_NONE;
+ }
+
+ drm_handle_vblank(vop->drm_dev, vop->pipe);
+
+ return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
+}
+
+static int vop_create_crtc(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ struct device *dev = vop->dev;
+ struct drm_device *drm_dev = vop->drm_dev;
+ struct drm_plane *primary = NULL, *cursor = NULL, *plane;
+ struct drm_crtc *crtc = &vop->crtc;
+ struct device_node *port;
+ int ret;
+ int i;
+
+ /*
+ * Create drm_plane for primary and cursor planes first, since we need
+ * to pass them to drm_crtc_init_with_planes, which sets the
+ * "possible_crtcs" to the newly initialized crtc.
+ */
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = vop_win->data;
+
+ if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
+ win_data->type != DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+ 0, &vop_plane_funcs,
+ win_data->phy->data_formats,
+ win_data->phy->nformats,
+ win_data->type);
+ if (ret) {
+ DRM_ERROR("failed to initialize plane\n");
+ goto err_cleanup_planes;
+ }
+
+ plane = &vop_win->base;
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ primary = plane;
+ else if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ cursor = plane;
+ }
+
+ ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
+ &vop_crtc_funcs);
+ if (ret)
+ return ret;
+
+ drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
+
+ /*
+ * Create drm_planes for overlay windows with possible_crtcs restricted
+ * to the newly created crtc.
+ */
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = vop_win->data;
+ unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
+
+ if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
+ continue;
+
+ ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
+ possible_crtcs,
+ &vop_plane_funcs,
+ win_data->phy->data_formats,
+ win_data->phy->nformats,
+ win_data->type);
+ if (ret) {
+ DRM_ERROR("failed to initialize overlay plane\n");
+ goto err_cleanup_crtc;
+ }
+ }
+
+ port = of_get_child_by_name(dev->of_node, "port");
+ if (!port) {
+ DRM_ERROR("no port node found in %s\n",
+ dev->of_node->full_name);
+ goto err_cleanup_crtc;
+ }
+
+ crtc->port = port;
+ vop->pipe = drm_crtc_index(crtc);
+ rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
+
+ return 0;
+
+err_cleanup_crtc:
+ drm_crtc_cleanup(crtc);
+err_cleanup_planes:
+ list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
+ drm_plane_cleanup(plane);
+ return ret;
+}
+
+static void vop_destroy_crtc(struct vop *vop)
+{
+ struct drm_crtc *crtc = &vop->crtc;
+
+ rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
+ of_node_put(crtc->port);
+ drm_crtc_cleanup(crtc);
+}
+
+static int vop_initial(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ const struct vop_reg_data *init_table = vop_data->init_table;
+ struct reset_control *ahb_rst;
+ int i, ret;
+
+ vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
+ if (IS_ERR(vop->hclk)) {
+ dev_err(vop->dev, "failed to get hclk source\n");
+ return PTR_ERR(vop->hclk);
+ }
+ vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
+ if (IS_ERR(vop->aclk)) {
+ dev_err(vop->dev, "failed to get aclk source\n");
+ return PTR_ERR(vop->aclk);
+ }
+ vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
+ if (IS_ERR(vop->dclk)) {
+ dev_err(vop->dev, "failed to get dclk source\n");
+ return PTR_ERR(vop->dclk);
+ }
+
+ ret = clk_prepare(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare hclk\n");
+ return ret;
+ }
+
+ ret = clk_prepare(vop->dclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare dclk\n");
+ goto err_unprepare_hclk;
+ }
+
+ ret = clk_prepare(vop->aclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare aclk\n");
+ goto err_unprepare_dclk;
+ }
+
+ /*
+ * enable hclk, so that we can config vop register.
+ */
+ ret = clk_enable(vop->hclk);
+ if (ret < 0) {
+ dev_err(vop->dev, "failed to prepare aclk\n");
+ goto err_unprepare_aclk;
+ }
+ /*
+ * do hclk_reset, reset all vop registers.
+ */
+ ahb_rst = devm_reset_control_get(vop->dev, "ahb");
+ if (IS_ERR(ahb_rst)) {
+ dev_err(vop->dev, "failed to get ahb reset\n");
+ ret = PTR_ERR(ahb_rst);
+ goto err_disable_hclk;
+ }
+ reset_control_assert(ahb_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(ahb_rst);
+
+ memcpy(vop->regsbak, vop->regs, vop->len);
+
+ for (i = 0; i < vop_data->table_size; i++)
+ vop_writel(vop, init_table[i].offset, init_table[i].value);
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ const struct vop_win_data *win = &vop_data->win[i];
+
+ VOP_WIN_SET(vop, win, enable, 0);
+ }
+
+ vop_cfg_done(vop);
+
+ /*
+ * do dclk_reset, let all config take affect.
+ */
+ vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
+ if (IS_ERR(vop->dclk_rst)) {
+ dev_err(vop->dev, "failed to get dclk reset\n");
+ ret = PTR_ERR(vop->dclk_rst);
+ goto err_unprepare_aclk;
+ }
+ reset_control_assert(vop->dclk_rst);
+ usleep_range(10, 20);
+ reset_control_deassert(vop->dclk_rst);
+
+ clk_disable(vop->hclk);
+
+ vop->dpms = DRM_MODE_DPMS_OFF;
+
+ return 0;
+
+err_disable_hclk:
+ clk_disable(vop->hclk);
+err_unprepare_aclk:
+ clk_unprepare(vop->aclk);
+err_unprepare_dclk:
+ clk_unprepare(vop->dclk);
+err_unprepare_hclk:
+ clk_unprepare(vop->hclk);
+ return ret;
+}
+
+/*
+ * Initialize the vop->win array elements.
+ */
+static void vop_win_init(struct vop *vop)
+{
+ const struct vop_data *vop_data = vop->data;
+ unsigned int i;
+
+ for (i = 0; i < vop_data->win_size; i++) {
+ struct vop_win *vop_win = &vop->win[i];
+ const struct vop_win_data *win_data = &vop_data->win[i];
+
+ vop_win->data = win_data;
+ vop_win->vop = vop;
+ INIT_LIST_HEAD(&vop_win->pending);
+ }
+}
+
+static int vop_bind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ const struct of_device_id *of_id;
+ const struct vop_data *vop_data;
+ struct drm_device *drm_dev = data;
+ struct vop *vop;
+ struct resource *res;
+ size_t alloc_size;
+ int ret;
+
+ of_id = of_match_device(vop_driver_dt_match, dev);
+ vop_data = of_id->data;
+ if (!vop_data)
+ return -ENODEV;
+
+ /* Allocate vop struct and its vop_win array */
+ alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
+ vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ if (!vop)
+ return -ENOMEM;
+
+ vop->dev = dev;
+ vop->data = vop_data;
+ vop->drm_dev = drm_dev;
+ dev_set_drvdata(dev, vop);
+
+ vop_win_init(vop);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vop->len = resource_size(res);
+ vop->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(vop->regs))
+ return PTR_ERR(vop->regs);
+
+ vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
+ if (!vop->regsbak)
+ return -ENOMEM;
+
+ ret = vop_initial(vop);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
+ return ret;
+ }
+
+ vop->irq = platform_get_irq(pdev, 0);
+ if (vop->irq < 0) {
+ dev_err(dev, "cannot find irq for vop\n");
+ return vop->irq;
+ }
+
+ spin_lock_init(&vop->reg_lock);
+ spin_lock_init(&vop->irq_lock);
+
+ mutex_init(&vop->vsync_mutex);
+
+ ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
+ IRQF_SHARED, dev_name(dev), vop);
+ if (ret)
+ return ret;
+
+ /* IRQ is initially disabled; it gets enabled in power_on */
+ disable_irq(vop->irq);
+
+ ret = vop_create_crtc(vop);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(&pdev->dev);
+ return 0;
+}
+
+static void vop_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct vop *vop = dev_get_drvdata(dev);
+
+ pm_runtime_disable(dev);
+ vop_destroy_crtc(vop);
+}
+
+static const struct component_ops vop_component_ops = {
+ .bind = vop_bind,
+ .unbind = vop_unbind,
+};
+
+static int vop_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ if (!dev->of_node) {
+ dev_err(dev, "can't find vop devices\n");
+ return -ENODEV;
+ }
+
+ return component_add(dev, &vop_component_ops);
+}
+
+static int vop_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &vop_component_ops);
+
+ return 0;
+}
+
+struct platform_driver vop_platform_driver = {
+ .probe = vop_probe,
+ .remove = vop_remove,
+ .driver = {
+ .name = "rockchip-vop",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(vop_driver_dt_match),
+ },
+};
+
+module_platform_driver(vop_platform_driver);
+
+MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
new file mode 100644
index 000000000000..63e9b3a084c5
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:Mark Yao <mark.yao@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ROCKCHIP_DRM_VOP_H
+#define _ROCKCHIP_DRM_VOP_H
+
+/* register definition */
+#define REG_CFG_DONE 0x0000
+#define VERSION_INFO 0x0004
+#define SYS_CTRL 0x0008
+#define SYS_CTRL1 0x000c
+#define DSP_CTRL0 0x0010
+#define DSP_CTRL1 0x0014
+#define DSP_BG 0x0018
+#define MCU_CTRL 0x001c
+#define INTR_CTRL0 0x0020
+#define INTR_CTRL1 0x0024
+#define WIN0_CTRL0 0x0030
+#define WIN0_CTRL1 0x0034
+#define WIN0_COLOR_KEY 0x0038
+#define WIN0_VIR 0x003c
+#define WIN0_YRGB_MST 0x0040
+#define WIN0_CBR_MST 0x0044
+#define WIN0_ACT_INFO 0x0048
+#define WIN0_DSP_INFO 0x004c
+#define WIN0_DSP_ST 0x0050
+#define WIN0_SCL_FACTOR_YRGB 0x0054
+#define WIN0_SCL_FACTOR_CBR 0x0058
+#define WIN0_SCL_OFFSET 0x005c
+#define WIN0_SRC_ALPHA_CTRL 0x0060
+#define WIN0_DST_ALPHA_CTRL 0x0064
+#define WIN0_FADING_CTRL 0x0068
+/* win1 register */
+#define WIN1_CTRL0 0x0070
+#define WIN1_CTRL1 0x0074
+#define WIN1_COLOR_KEY 0x0078
+#define WIN1_VIR 0x007c
+#define WIN1_YRGB_MST 0x0080
+#define WIN1_CBR_MST 0x0084
+#define WIN1_ACT_INFO 0x0088
+#define WIN1_DSP_INFO 0x008c
+#define WIN1_DSP_ST 0x0090
+#define WIN1_SCL_FACTOR_YRGB 0x0094
+#define WIN1_SCL_FACTOR_CBR 0x0098
+#define WIN1_SCL_OFFSET 0x009c
+#define WIN1_SRC_ALPHA_CTRL 0x00a0
+#define WIN1_DST_ALPHA_CTRL 0x00a4
+#define WIN1_FADING_CTRL 0x00a8
+/* win2 register */
+#define WIN2_CTRL0 0x00b0
+#define WIN2_CTRL1 0x00b4
+#define WIN2_VIR0_1 0x00b8
+#define WIN2_VIR2_3 0x00bc
+#define WIN2_MST0 0x00c0
+#define WIN2_DSP_INFO0 0x00c4
+#define WIN2_DSP_ST0 0x00c8
+#define WIN2_COLOR_KEY 0x00cc
+#define WIN2_MST1 0x00d0
+#define WIN2_DSP_INFO1 0x00d4
+#define WIN2_DSP_ST1 0x00d8
+#define WIN2_SRC_ALPHA_CTRL 0x00dc
+#define WIN2_MST2 0x00e0
+#define WIN2_DSP_INFO2 0x00e4
+#define WIN2_DSP_ST2 0x00e8
+#define WIN2_DST_ALPHA_CTRL 0x00ec
+#define WIN2_MST3 0x00f0
+#define WIN2_DSP_INFO3 0x00f4
+#define WIN2_DSP_ST3 0x00f8
+#define WIN2_FADING_CTRL 0x00fc
+/* win3 register */
+#define WIN3_CTRL0 0x0100
+#define WIN3_CTRL1 0x0104
+#define WIN3_VIR0_1 0x0108
+#define WIN3_VIR2_3 0x010c
+#define WIN3_MST0 0x0110
+#define WIN3_DSP_INFO0 0x0114
+#define WIN3_DSP_ST0 0x0118
+#define WIN3_COLOR_KEY 0x011c
+#define WIN3_MST1 0x0120
+#define WIN3_DSP_INFO1 0x0124
+#define WIN3_DSP_ST1 0x0128
+#define WIN3_SRC_ALPHA_CTRL 0x012c
+#define WIN3_MST2 0x0130
+#define WIN3_DSP_INFO2 0x0134
+#define WIN3_DSP_ST2 0x0138
+#define WIN3_DST_ALPHA_CTRL 0x013c
+#define WIN3_MST3 0x0140
+#define WIN3_DSP_INFO3 0x0144
+#define WIN3_DSP_ST3 0x0148
+#define WIN3_FADING_CTRL 0x014c
+/* hwc register */
+#define HWC_CTRL0 0x0150
+#define HWC_CTRL1 0x0154
+#define HWC_MST 0x0158
+#define HWC_DSP_ST 0x015c
+#define HWC_SRC_ALPHA_CTRL 0x0160
+#define HWC_DST_ALPHA_CTRL 0x0164
+#define HWC_FADING_CTRL 0x0168
+/* post process register */
+#define POST_DSP_HACT_INFO 0x0170
+#define POST_DSP_VACT_INFO 0x0174
+#define POST_SCL_FACTOR_YRGB 0x0178
+#define POST_SCL_CTRL 0x0180
+#define POST_DSP_VACT_INFO_F1 0x0184
+#define DSP_HTOTAL_HS_END 0x0188
+#define DSP_HACT_ST_END 0x018c
+#define DSP_VTOTAL_VS_END 0x0190
+#define DSP_VACT_ST_END 0x0194
+#define DSP_VS_ST_END_F1 0x0198
+#define DSP_VACT_ST_END_F1 0x019c
+/* register definition end */
+
+/* interrupt define */
+#define DSP_HOLD_VALID_INTR (1 << 0)
+#define FS_INTR (1 << 1)
+#define LINE_FLAG_INTR (1 << 2)
+#define BUS_ERROR_INTR (1 << 3)
+
+#define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \
+ LINE_FLAG_INTR | BUS_ERROR_INTR)
+
+#define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4)
+#define FS_INTR_EN(x) ((x) << 5)
+#define LINE_FLAG_INTR_EN(x) ((x) << 6)
+#define BUS_ERROR_INTR_EN(x) ((x) << 7)
+#define DSP_HOLD_VALID_INTR_MASK (1 << 4)
+#define FS_INTR_MASK (1 << 5)
+#define LINE_FLAG_INTR_MASK (1 << 6)
+#define BUS_ERROR_INTR_MASK (1 << 7)
+
+#define INTR_CLR_SHIFT 8
+#define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0))
+#define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1))
+#define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2))
+#define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3))
+
+#define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12)
+#define DSP_LINE_NUM_MASK (0x1fff << 12)
+
+/* src alpha ctrl define */
+#define SRC_FADING_VALUE(x) (((x) & 0xff) << 24)
+#define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16)
+#define SRC_FACTOR_M0(x) (((x) & 0x7) << 6)
+#define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5)
+#define SRC_BLEND_M0(x) (((x) & 0x3) << 3)
+#define SRC_ALPHA_M0(x) (((x) & 0x1) << 2)
+#define SRC_COLOR_M0(x) (((x) & 0x1) << 1)
+#define SRC_ALPHA_EN(x) (((x) & 0x1) << 0)
+/* dst alpha ctrl define */
+#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
+
+/*
+ * display output interface supported by rockchip lcdc
+ */
+#define ROCKCHIP_OUT_MODE_P888 0
+#define ROCKCHIP_OUT_MODE_P666 1
+#define ROCKCHIP_OUT_MODE_P565 2
+/* for use special outface */
+#define ROCKCHIP_OUT_MODE_AAAA 15
+
+enum alpha_mode {
+ ALPHA_STRAIGHT,
+ ALPHA_INVERSE,
+};
+
+enum global_blend_mode {
+ ALPHA_GLOBAL,
+ ALPHA_PER_PIX,
+ ALPHA_PER_PIX_GLOBAL,
+};
+
+enum alpha_cal_mode {
+ ALPHA_SATURATION,
+ ALPHA_NO_SATURATION,
+};
+
+enum color_mode {
+ ALPHA_SRC_PRE_MUL,
+ ALPHA_SRC_NO_PRE_MUL,
+};
+
+enum factor_mode {
+ ALPHA_ZERO,
+ ALPHA_ONE,
+ ALPHA_SRC,
+ ALPHA_SRC_INVERSE,
+ ALPHA_SRC_GLOBAL,
+};
+
+#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index ae8850f3e63b..d6d6b705b8c1 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -5,6 +5,7 @@ config DRM_STI
select DRM_KMS_HELPER
select DRM_GEM_CMA_HELPER
select DRM_KMS_CMA_HELPER
+ select FW_LOADER_USER_HELPER_FALLBACK
help
Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index 04ac2ceef27f..6ba9d27c1b90 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -3,6 +3,7 @@ sticompositor-y := \
sti_mixer.o \
sti_gdp.o \
sti_vid.o \
+ sti_cursor.o \
sti_compositor.o \
sti_drm_crtc.o \
sti_drm_plane.o
@@ -18,4 +19,5 @@ obj-$(CONFIG_DRM_STI) = \
sti_hda.o \
sti_tvout.o \
sticompositor.o \
- sti_drm_drv.o \ No newline at end of file
+ sti_hqvdp.o \
+ sti_drm_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 390d93e9a06c..c5cf4aea9694 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -24,14 +24,16 @@
* stiH407 compositor properties
*/
struct sti_compositor_data stih407_compositor_data = {
- .nb_subdev = 6,
+ .nb_subdev = 8,
.subdev_desc = {
+ {STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
{STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
{STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
{STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
{STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
{STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
- {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00}
+ {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
+ {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
},
};
@@ -67,11 +69,11 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
break;
case STI_GPD_SUBDEV:
case STI_VID_SUBDEV:
+ case STI_CURSOR_SUBDEV:
compo->layer[layer_id++] =
sti_layer_create(compo->dev, desc[i].id,
compo->regs + desc[i].offset);
break;
- /* case STI_CURSOR_SUBDEV : TODO */
default:
DRM_ERROR("Unknow subdev compoment type\n");
return 1;
@@ -102,33 +104,35 @@ static int sti_compositor_bind(struct device *dev, struct device *master,
enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
- if (compo->mixer[crtc])
+ if (crtc < compo->nb_mixers)
plane_type = DRM_PLANE_TYPE_PRIMARY;
switch (type) {
case STI_CUR:
cursor = sti_drm_plane_init(drm_dev,
compo->layer[i],
- (1 << crtc) - 1,
- DRM_PLANE_TYPE_CURSOR);
+ 1, DRM_PLANE_TYPE_CURSOR);
break;
case STI_GDP:
case STI_VID:
primary = sti_drm_plane_init(drm_dev,
compo->layer[i],
- (1 << crtc) - 1, plane_type);
+ (1 << compo->nb_mixers) - 1,
+ plane_type);
plane++;
break;
case STI_BCK:
+ case STI_VDP:
break;
}
/* The first planes are reserved for primary planes*/
- if (compo->mixer[crtc]) {
+ if (crtc < compo->nb_mixers && primary) {
sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
primary, cursor);
crtc++;
cursor = NULL;
+ primary = NULL;
}
}
}
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 3ea19db72e0f..019eb44c62cc 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -64,7 +64,6 @@ struct sti_compositor_data {
* @layer: array of layers
* @nb_mixers: number of mixers for this compositor
* @nb_layers: number of layers (GDP,VID,...) for this compositor
- * @enable: true if compositor is enable else false
* @vtg_vblank_nb: callback for VTG VSYNC notification
*/
struct sti_compositor {
@@ -83,7 +82,6 @@ struct sti_compositor {
struct sti_layer *layer[STI_MAX_LAYER];
int nb_mixers;
int nb_layers;
- bool enable;
struct notifier_block vtg_vblank_nb;
};
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
new file mode 100644
index 000000000000..010eaee60bf7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Vincent Abriou <vincent.abriou@st.com>
+ * Fabien Dessenne <fabien.dessenne@st.com>
+ * for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+#include <drm/drmP.h>
+
+#include "sti_cursor.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+/* Registers */
+#define CUR_CTL 0x00
+#define CUR_VPO 0x0C
+#define CUR_PML 0x14
+#define CUR_PMP 0x18
+#define CUR_SIZE 0x1C
+#define CUR_CML 0x20
+#define CUR_AWS 0x28
+#define CUR_AWE 0x2C
+
+#define CUR_CTL_CLUT_UPDATE BIT(1)
+
+#define STI_CURS_MIN_SIZE 1
+#define STI_CURS_MAX_SIZE 128
+
+/*
+ * pixmap dma buffer stucture
+ *
+ * @paddr: physical address
+ * @size: buffer size
+ * @base: virtual address
+ */
+struct dma_pixmap {
+ dma_addr_t paddr;
+ size_t size;
+ void *base;
+};
+
+/**
+ * STI Cursor structure
+ *
+ * @layer: layer structure
+ * @width: cursor width
+ * @height: cursor height
+ * @clut: color look up table
+ * @clut_paddr: color look up table physical address
+ * @pixmap: pixmap dma buffer (clut8-format cursor)
+ */
+struct sti_cursor {
+ struct sti_layer layer;
+ unsigned int width;
+ unsigned int height;
+ unsigned short *clut;
+ dma_addr_t clut_paddr;
+ struct dma_pixmap pixmap;
+};
+
+static const uint32_t cursor_supported_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
+
+static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
+{
+ return cursor_supported_formats;
+}
+
+static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(cursor_supported_formats);
+}
+
+static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ u32 *src = layer->vaddr;
+ u8 *dst = cursor->pixmap.base;
+ unsigned int i, j;
+ u32 a, r, g, b;
+
+ for (i = 0; i < cursor->height; i++) {
+ for (j = 0; j < cursor->width; j++) {
+ /* Pick the 2 higher bits of each component */
+ a = (*src >> 30) & 3;
+ r = (*src >> 22) & 3;
+ g = (*src >> 14) & 3;
+ b = (*src >> 6) & 3;
+ *dst = a << 6 | r << 4 | g << 2 | b;
+ src++;
+ dst++;
+ }
+ }
+}
+
+static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ struct drm_display_mode *mode = layer->mode;
+ u32 y, x;
+ u32 val;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ if (layer->src_w < STI_CURS_MIN_SIZE ||
+ layer->src_h < STI_CURS_MIN_SIZE ||
+ layer->src_w > STI_CURS_MAX_SIZE ||
+ layer->src_h > STI_CURS_MAX_SIZE) {
+ DRM_ERROR("Invalid cursor size (%dx%d)\n",
+ layer->src_w, layer->src_h);
+ return -EINVAL;
+ }
+
+ /* If the cursor size has changed, re-allocated the pixmap */
+ if (!cursor->pixmap.base ||
+ (cursor->width != layer->src_w) ||
+ (cursor->height != layer->src_h)) {
+ cursor->width = layer->src_w;
+ cursor->height = layer->src_h;
+
+ if (cursor->pixmap.base)
+ dma_free_writecombine(layer->dev,
+ cursor->pixmap.size,
+ cursor->pixmap.base,
+ cursor->pixmap.paddr);
+
+ cursor->pixmap.size = cursor->width * cursor->height;
+
+ cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
+ cursor->pixmap.size,
+ &cursor->pixmap.paddr,
+ GFP_KERNEL | GFP_DMA);
+ if (!cursor->pixmap.base) {
+ DRM_ERROR("Failed to allocate memory for pixmap\n");
+ return -ENOMEM;
+ }
+ }
+
+ /* Convert ARGB8888 to CLUT8 */
+ sti_cursor_argb8888_to_clut8(layer);
+
+ /* AWS and AWE depend on the mode */
+ y = sti_vtg_get_line_number(*mode, 0);
+ x = sti_vtg_get_pixel_number(*mode, 0);
+ val = y << 16 | x;
+ writel(val, layer->regs + CUR_AWS);
+ y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
+ x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
+ val = y << 16 | x;
+ writel(val, layer->regs + CUR_AWE);
+
+ if (first_prepare) {
+ /* Set and fetch CLUT */
+ writel(cursor->clut_paddr, layer->regs + CUR_CML);
+ writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
+ }
+
+ return 0;
+}
+
+static int sti_cursor_commit_layer(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ struct drm_display_mode *mode = layer->mode;
+ u32 ydo, xdo;
+
+ dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ /* Set memory location, size, and position */
+ writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
+ writel(cursor->width, layer->regs + CUR_PMP);
+ writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
+
+ ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
+ xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
+ writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
+
+ return 0;
+}
+
+static int sti_cursor_disable_layer(struct sti_layer *layer)
+{
+ return 0;
+}
+
+static void sti_cursor_init(struct sti_layer *layer)
+{
+ struct sti_cursor *cursor = to_sti_cursor(layer);
+ unsigned short *base = cursor->clut;
+ unsigned int a, r, g, b;
+
+ /* Assign CLUT values, ARGB444 format */
+ for (a = 0; a < 4; a++)
+ for (r = 0; r < 4; r++)
+ for (g = 0; g < 4; g++)
+ for (b = 0; b < 4; b++)
+ *base++ = (a * 5) << 12 |
+ (r * 5) << 8 |
+ (g * 5) << 4 |
+ (b * 5);
+}
+
+static const struct sti_layer_funcs cursor_ops = {
+ .get_formats = sti_cursor_get_formats,
+ .get_nb_formats = sti_cursor_get_nb_formats,
+ .init = sti_cursor_init,
+ .prepare = sti_cursor_prepare_layer,
+ .commit = sti_cursor_commit_layer,
+ .disable = sti_cursor_disable_layer,
+};
+
+struct sti_layer *sti_cursor_create(struct device *dev)
+{
+ struct sti_cursor *cursor;
+
+ cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
+ if (!cursor) {
+ DRM_ERROR("Failed to allocate memory for cursor\n");
+ return NULL;
+ }
+
+ /* Allocate clut buffer */
+ cursor->clut = dma_alloc_writecombine(dev,
+ 0x100 * sizeof(unsigned short),
+ &cursor->clut_paddr,
+ GFP_KERNEL | GFP_DMA);
+
+ if (!cursor->clut) {
+ DRM_ERROR("Failed to allocate memory for cursor clut\n");
+ devm_kfree(dev, cursor);
+ return NULL;
+ }
+
+ cursor->layer.ops = &cursor_ops;
+
+ return (struct sti_layer *)cursor;
+}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
new file mode 100644
index 000000000000..3c9827404f27
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2013
+ * Authors: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_CURSOR_H_
+#define _STI_CURSOR_H_
+
+struct sti_layer *sti_cursor_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index 36a1ad3c4823..4c651c200f20 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -28,7 +28,7 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
struct device *dev = mixer->dev;
struct sti_compositor *compo = dev_get_drvdata(dev);
- compo->enable = true;
+ mixer->enabled = true;
/* Prepare and enable the compo IP clock */
if (mixer->id == STI_MIXER_MAIN) {
@@ -38,6 +38,8 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
if (clk_prepare_enable(compo->clk_compo_aux))
DRM_INFO("Failed to prepare/enable compo_aux clk\n");
}
+
+ sti_mixer_clear_all_layers(mixer);
}
static void sti_drm_crtc_commit(struct drm_crtc *crtc)
@@ -62,6 +64,8 @@ static void sti_drm_crtc_commit(struct drm_crtc *crtc)
/* Enable layer on mixer */
if (sti_mixer_set_layer_status(mixer, layer, true))
DRM_ERROR("Can not enable layer at mixer\n");
+
+ drm_crtc_vblank_on(crtc);
}
static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -144,7 +148,8 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
w = crtc->primary->fb->width - x;
h = crtc->primary->fb->height - y;
- return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ return sti_layer_prepare(layer, crtc,
+ crtc->primary->fb, &crtc->mode,
mixer->id, 0, 0, w, h, x, y, w, h);
}
@@ -171,7 +176,8 @@ static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
w = crtc->primary->fb->width - crtc->x;
h = crtc->primary->fb->height - crtc->y;
- ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode,
+ ret = sti_layer_prepare(layer, crtc,
+ crtc->primary->fb, &crtc->mode,
mixer->id, 0, 0, w, h,
crtc->x, crtc->y, w, h);
if (ret) {
@@ -196,7 +202,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
struct sti_compositor *compo = dev_get_drvdata(dev);
struct sti_layer *layer;
- if (!compo->enable)
+ if (!mixer->enabled)
return;
DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
@@ -222,7 +228,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
/* Then disable layer itself */
sti_layer_disable(layer);
- drm_vblank_off(crtc->dev, mixer->id);
+ drm_crtc_vblank_off(crtc);
/* Disable pixel clock and compo IP clocks */
if (mixer->id == STI_MIXER_MAIN) {
@@ -233,7 +239,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
clk_disable_unprepare(compo->clk_compo_aux);
}
- compo->enable = false;
+ mixer->enabled = false;
}
static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
@@ -364,7 +370,6 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
struct sti_drm_private *priv = dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
- unsigned long flags;
DRM_DEBUG_DRIVER("\n");
@@ -373,13 +378,10 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
/* free the resources of the pending requests */
- spin_lock_irqsave(&dev->event_lock, flags);
if (compo->mixer[crtc]->pending_event) {
drm_vblank_put(dev, crtc);
compo->mixer[crtc]->pending_event = NULL;
}
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
}
EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
@@ -399,6 +401,7 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
return false;
}
+EXPORT_SYMBOL(sti_drm_crtc_is_main);
int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor)
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index 223d93c3a05d..9ce7574ac873 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -67,8 +67,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
sti_drm_mode_config_init(dev);
ret = component_bind_all(dev->dev, dev);
- if (ret)
+ if (ret) {
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ kfree(private);
return ret;
+ }
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index f4118d4cac22..bb6a29339e10 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -45,7 +45,8 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
/* src_x are in 16.16 format. */
- res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id,
+ res = sti_layer_prepare(layer, crtc, fb,
+ &crtc->mode, mixer->id,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x >> 16, src_y >> 16,
src_w >> 16, src_h >> 16);
@@ -193,3 +194,4 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
return &layer->plane;
}
+EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 4e30b74559f5..32448d1d1e8f 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -73,7 +73,9 @@ struct sti_gdp_node {
struct sti_gdp_node_list {
struct sti_gdp_node *top_field;
+ dma_addr_t top_field_paddr;
struct sti_gdp_node *btm_field;
+ dma_addr_t btm_field_paddr;
};
/**
@@ -81,6 +83,8 @@ struct sti_gdp_node_list {
*
* @layer: layer structure
* @clk_pix: pixel clock for the current gdp
+ * @clk_main_parent: gdp parent clock if main path used
+ * @clk_aux_parent: gdp parent clock if aux path used
* @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
* @is_curr_top: true if the current node processed is the top field
* @node_list: array of node list
@@ -88,6 +92,8 @@ struct sti_gdp_node_list {
struct sti_gdp {
struct sti_layer layer;
struct clk *clk_pix;
+ struct clk *clk_main_parent;
+ struct clk *clk_aux_parent;
struct notifier_block vtg_field_nb;
bool is_curr_top;
struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
@@ -168,7 +174,6 @@ static int sti_gdp_get_alpharange(int format)
static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
{
int hw_nvn;
- void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
@@ -176,11 +181,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
if (!hw_nvn)
goto end;
- virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
-
for (i = 0; i < GDP_NODE_NB_BANK; i++)
- if ((virt_nvn != gdp->node_list[i].btm_field) &&
- (virt_nvn != gdp->node_list[i].top_field))
+ if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
+ (hw_nvn != gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
/* in hazardious cases restart with the first node */
@@ -204,7 +207,6 @@ static
struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
{
int hw_nvn;
- void *virt_nvn;
struct sti_gdp *gdp = to_sti_gdp(layer);
unsigned int i;
@@ -212,11 +214,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
if (!hw_nvn)
goto end;
- virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
-
for (i = 0; i < GDP_NODE_NB_BANK; i++)
- if ((virt_nvn == gdp->node_list[i].btm_field) ||
- (virt_nvn == gdp->node_list[i].top_field))
+ if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
+ (hw_nvn == gdp->node_list[i].top_field_paddr))
return &gdp->node_list[i];
end:
@@ -292,8 +292,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Same content and chained together */
memcpy(btm_field, top_field, sizeof(*btm_field));
- top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field);
- btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field);
+ top_field->gam_gdp_nvn = list->btm_field_paddr;
+ btm_field->gam_gdp_nvn = list->top_field_paddr;
/* Interlaced mode */
if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -311,6 +311,17 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
/* Set and enable gdp clock */
if (gdp->clk_pix) {
+ struct clk *clkp;
+ /* According to the mixer used, the gdp pixel clock
+ * should have a different parent clock. */
+ if (layer->mixer_id == STI_MIXER_MAIN)
+ clkp = gdp->clk_main_parent;
+ else
+ clkp = gdp->clk_aux_parent;
+
+ if (clkp)
+ clk_set_parent(gdp->clk_pix, clkp);
+
res = clk_set_rate(gdp->clk_pix, rate);
if (res < 0) {
DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
@@ -349,8 +360,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
struct sti_gdp_node *updated_top_node = updated_list->top_field;
struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
struct sti_gdp *gdp = to_sti_gdp(layer);
- u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node);
- u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node);
+ u32 dma_updated_top = updated_list->top_field_paddr;
+ u32 dma_updated_btm = updated_list->btm_field_paddr;
struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
@@ -461,16 +472,16 @@ static void sti_gdp_init(struct sti_layer *layer)
{
struct sti_gdp *gdp = to_sti_gdp(layer);
struct device_node *np = layer->dev->of_node;
- dma_addr_t dma;
+ dma_addr_t dma_addr;
void *base;
unsigned int i, size;
/* Allocate all the nodes within a single memory page */
size = sizeof(struct sti_gdp_node) *
GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
-
base = dma_alloc_writecombine(layer->dev,
- size, &dma, GFP_KERNEL | GFP_DMA);
+ size, &dma_addr, GFP_KERNEL | GFP_DMA);
+
if (!base) {
DRM_ERROR("Failed to allocate memory for GDP node\n");
return;
@@ -478,21 +489,26 @@ static void sti_gdp_init(struct sti_layer *layer)
memset(base, 0, size);
for (i = 0; i < GDP_NODE_NB_BANK; i++) {
- if (virt_to_dma(layer->dev, base) & 0xF) {
+ if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].top_field = base;
+ gdp->node_list[i].top_field_paddr = dma_addr;
+
DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
+ dma_addr += sizeof(struct sti_gdp_node);
- if (virt_to_dma(layer->dev, base) & 0xF) {
+ if (dma_addr & 0xF) {
DRM_ERROR("Mem alignment failed\n");
return;
}
gdp->node_list[i].btm_field = base;
+ gdp->node_list[i].btm_field_paddr = dma_addr;
DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
base += sizeof(struct sti_gdp_node);
+ dma_addr += sizeof(struct sti_gdp_node);
}
if (of_device_is_compatible(np, "st,stih407-compositor")) {
@@ -520,6 +536,14 @@ static void sti_gdp_init(struct sti_layer *layer)
gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
if (IS_ERR(gdp->clk_pix))
DRM_ERROR("Cannot get %s clock\n", clk_name);
+
+ gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
+ if (IS_ERR(gdp->clk_main_parent))
+ DRM_ERROR("Cannot get main_parent clock\n");
+
+ gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
+ if (IS_ERR(gdp->clk_aux_parent))
+ DRM_ERROR("Cannot get aux_parent clock\n");
}
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index b22968c08d1f..d032e024b0b8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -130,8 +130,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
/* Hot plug/unplug IRQ */
if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
- /* read gpio to get the status */
- hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
if (hdmi->drm_dev)
drm_helper_hpd_irq_event(hdmi->drm_dev);
}
@@ -273,31 +272,32 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
/* Infoframe header */
- val = buffer[0x0];
- val |= buffer[0x1] << 8;
- val |= buffer[0x2] << 16;
+ val = buffer[0];
+ val |= buffer[1] << 8;
+ val |= buffer[2] << 16;
hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
/* Infoframe packet bytes */
- val = frame[0x0];
- val |= frame[0x1] << 8;
- val |= frame[0x2] << 16;
- val |= frame[0x3] << 24;
+ val = buffer[3];
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
- val = frame[0x4];
- val |= frame[0x5] << 8;
- val |= frame[0x6] << 16;
- val |= frame[0x7] << 24;
+ val = *(frame++);
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
- val = frame[0x8];
- val |= frame[0x9] << 8;
- val |= frame[0xA] << 16;
- val |= frame[0xB] << 24;
+ val = *(frame++);
+ val |= *(frame++) << 8;
+ val |= *(frame++) << 16;
+ val |= *(frame++) << 24;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
- val = frame[0xC];
+ val = *(frame++);
+ val |= *(frame) << 8;
hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
/* Enable transmission slot for AVI infoframe
@@ -480,17 +480,15 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
{
- struct i2c_adapter *i2c_adap;
+ struct sti_hdmi_connector *hdmi_connector
+ = to_sti_hdmi_connector(connector);
+ struct sti_hdmi *hdmi = hdmi_connector->hdmi;
struct edid *edid;
int count;
DRM_DEBUG_DRIVER("\n");
- i2c_adap = i2c_get_adapter(1);
- if (!i2c_adap)
- goto fail;
-
- edid = drm_get_edid(connector, i2c_adap);
+ edid = drm_get_edid(connector, hdmi->ddc_adapt);
if (!edid)
goto fail;
@@ -603,29 +601,38 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
struct sti_hdmi_connector *connector;
struct drm_connector *drm_connector;
struct drm_bridge *bridge;
- struct i2c_adapter *i2c_adap;
+ struct device_node *ddc;
int err;
- i2c_adap = i2c_get_adapter(1);
- if (!i2c_adap)
- return -EPROBE_DEFER;
+ ddc = of_parse_phandle(dev->of_node, "ddc", 0);
+ if (ddc) {
+ hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
+ if (!hdmi->ddc_adapt) {
+ err = -EPROBE_DEFER;
+ of_node_put(ddc);
+ return err;
+ }
+
+ of_node_put(ddc);
+ }
/* Set the drm device handle */
hdmi->drm_dev = drm_dev;
encoder = sti_hdmi_find_encoder(drm_dev);
if (!encoder)
- return -ENOMEM;
+ goto err_adapt;
connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
if (!connector)
- return -ENOMEM;
+ goto err_adapt;
+
connector->hdmi = hdmi;
bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return -ENOMEM;
+ goto err_adapt;
bridge->driver_private = hdmi;
drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
@@ -662,6 +669,8 @@ err_sysfs:
err_connector:
drm_bridge_cleanup(bridge);
drm_connector_cleanup(drm_connector);
+err_adapt:
+ put_device(&hdmi->ddc_adapt->dev);
return -EINVAL;
}
@@ -757,13 +766,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
return PTR_ERR(hdmi->clk_audio);
}
- hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0);
- if (hdmi->hpd_gpio < 0) {
- DRM_ERROR("Failed to get hdmi hpd-gpio\n");
- return -EIO;
- }
-
- hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
+ hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
init_waitqueue_head(&hdmi->wait_event);
@@ -788,6 +791,11 @@ static int sti_hdmi_probe(struct platform_device *pdev)
static int sti_hdmi_remove(struct platform_device *pdev)
{
+ struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
+
+ if (hdmi->ddc_adapt)
+ put_device(&hdmi->ddc_adapt->dev);
+
component_del(&pdev->dev, &sti_hdmi_ops);
return 0;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 61bec6557ceb..3d22390e1f3b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -14,6 +14,9 @@
#define HDMI_STA 0x0010
#define HDMI_STA_DLL_LCK BIT(5)
+#define HDMI_STA_HOT_PLUG_SHIFT 4
+#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT)
+
struct sti_hdmi;
struct hdmi_phy_ops {
@@ -37,7 +40,6 @@ struct hdmi_phy_ops {
* @irq_status: interrupt status register
* @phy_ops: phy start/stop operations
* @enabled: true if hdmi is enabled else false
- * @hpd_gpio: hdmi hot plug detect gpio number
* @hpd: hot plug detect status
* @wait_event: wait event
* @event_received: wait event status
@@ -57,11 +59,11 @@ struct sti_hdmi {
u32 irq_status;
struct hdmi_phy_ops *phy_ops;
bool enabled;
- int hpd_gpio;
bool hpd;
wait_queue_head_t wait_event;
bool event_received;
struct reset_control *reset;
+ struct i2c_adapter *ddc_adapt;
};
u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
new file mode 100644
index 000000000000..f3db05dab0ab
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -0,0 +1,1073 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#include <linux/clk.h>
+#include <linux/component.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+#include <drm/drmP.h>
+
+#include "sti_drm_plane.h"
+#include "sti_hqvdp.h"
+#include "sti_hqvdp_lut.h"
+#include "sti_layer.h"
+#include "sti_vtg.h"
+
+/* Firmware name */
+#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
+
+/* Regs address */
+#define HQVDP_DMEM 0x00000000 /* 0x00000000 */
+#define HQVDP_PMEM 0x00040000 /* 0x00040000 */
+#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
+#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
+#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
+#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
+#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
+#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
+#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
+#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
+#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
+#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
+#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
+#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
+#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
+#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
+#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
+#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
+#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
+#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
+#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
+#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
+#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
+#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
+#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
+#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
+#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
+#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
+#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
+#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
+
+/* Plugs config */
+#define PLUG_CONTROL_ENABLE 0x00000001
+#define PLUG_PAGE_SIZE_256 0x00000002
+#define PLUG_MIN_OPC_8 0x00000003
+#define PLUG_MAX_OPC_64 0x00000006
+#define PLUG_MAX_CHK_2X 0x00000001
+#define PLUG_MAX_MSG_1X 0x00000000
+#define PLUG_MIN_SPACE_1 0x00000000
+
+/* SW reset CTRL */
+#define SW_RESET_CTRL_FULL BIT(0)
+#define SW_RESET_CTRL_CORE BIT(1)
+
+/* Startup ctrl 1 */
+#define STARTUP_CTRL1_RST_DONE BIT(0)
+#define STARTUP_CTRL1_AUTH_IDLE BIT(2)
+
+/* Startup ctrl 2 */
+#define STARTUP_CTRL2_FETCH_EN BIT(1)
+
+/* Info xP70 */
+#define INFO_XP70_FW_READY BIT(15)
+#define INFO_XP70_FW_PROCESSING BIT(14)
+#define INFO_XP70_FW_INITQUEUES BIT(13)
+
+/* SOFT_VSYNC */
+#define SOFT_VSYNC_HW 0x00000000
+#define SOFT_VSYNC_SW_CMD 0x00000001
+#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
+
+/* Reset & boot poll config */
+#define POLL_MAX_ATTEMPT 50
+#define POLL_DELAY_MS 20
+
+#define SCALE_FACTOR 8192
+#define SCALE_MAX_FOR_LEG_LUT_F 4096
+#define SCALE_MAX_FOR_LEG_LUT_E 4915
+#define SCALE_MAX_FOR_LEG_LUT_D 6654
+#define SCALE_MAX_FOR_LEG_LUT_C 8192
+
+enum sti_hvsrc_orient {
+ HVSRC_HORI,
+ HVSRC_VERT
+};
+
+/* Command structures */
+struct sti_hqvdp_top {
+ u32 config;
+ u32 mem_format;
+ u32 current_luma;
+ u32 current_enh_luma;
+ u32 current_right_luma;
+ u32 current_enh_right_luma;
+ u32 current_chroma;
+ u32 current_enh_chroma;
+ u32 current_right_chroma;
+ u32 current_enh_right_chroma;
+ u32 output_luma;
+ u32 output_chroma;
+ u32 luma_src_pitch;
+ u32 luma_enh_src_pitch;
+ u32 luma_right_src_pitch;
+ u32 luma_enh_right_src_pitch;
+ u32 chroma_src_pitch;
+ u32 chroma_enh_src_pitch;
+ u32 chroma_right_src_pitch;
+ u32 chroma_enh_right_src_pitch;
+ u32 luma_processed_pitch;
+ u32 chroma_processed_pitch;
+ u32 input_frame_size;
+ u32 input_viewport_ori;
+ u32 input_viewport_ori_right;
+ u32 input_viewport_size;
+ u32 left_view_border_width;
+ u32 right_view_border_width;
+ u32 left_view_3d_offset_width;
+ u32 right_view_3d_offset_width;
+ u32 side_stripe_color;
+ u32 crc_reset_ctrl;
+};
+
+/* Configs for interlaced : no IT, no pass thru, 3 fields */
+#define TOP_CONFIG_INTER_BTM 0x00000000
+#define TOP_CONFIG_INTER_TOP 0x00000002
+
+/* Config for progressive : no IT, no pass thru, 3 fields */
+#define TOP_CONFIG_PROGRESSIVE 0x00000001
+
+/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
+#define TOP_MEM_FORMAT_DFLT 0x00018060
+
+/* Min/Max size */
+#define MAX_WIDTH 0x1FFF
+#define MAX_HEIGHT 0x0FFF
+#define MIN_WIDTH 0x0030
+#define MIN_HEIGHT 0x0010
+
+struct sti_hqvdp_vc1re {
+ u32 ctrl_prv_csdi;
+ u32 ctrl_cur_csdi;
+ u32 ctrl_nxt_csdi;
+ u32 ctrl_cur_fmd;
+ u32 ctrl_nxt_fmd;
+};
+
+struct sti_hqvdp_fmd {
+ u32 config;
+ u32 viewport_ori;
+ u32 viewport_size;
+ u32 next_next_luma;
+ u32 next_next_right_luma;
+ u32 next_next_next_luma;
+ u32 next_next_next_right_luma;
+ u32 threshold_scd;
+ u32 threshold_rfd;
+ u32 threshold_move;
+ u32 threshold_cfd;
+};
+
+struct sti_hqvdp_csdi {
+ u32 config;
+ u32 config2;
+ u32 dcdi_config;
+ u32 prev_luma;
+ u32 prev_enh_luma;
+ u32 prev_right_luma;
+ u32 prev_enh_right_luma;
+ u32 next_luma;
+ u32 next_enh_luma;
+ u32 next_right_luma;
+ u32 next_enh_right_luma;
+ u32 prev_chroma;
+ u32 prev_enh_chroma;
+ u32 prev_right_chroma;
+ u32 prev_enh_right_chroma;
+ u32 next_chroma;
+ u32 next_enh_chroma;
+ u32 next_right_chroma;
+ u32 next_enh_right_chroma;
+ u32 prev_motion;
+ u32 prev_right_motion;
+ u32 cur_motion;
+ u32 cur_right_motion;
+ u32 next_motion;
+ u32 next_right_motion;
+};
+
+/* Config for progressive: by pass */
+#define CSDI_CONFIG_PROG 0x00000000
+/* Config for directional deinterlacing without motion */
+#define CSDI_CONFIG_INTER_DIR 0x00000016
+/* Additional configs for fader, blender, motion,... deinterlace algorithms */
+#define CSDI_CONFIG2_DFLT 0x000001B3
+#define CSDI_DCDI_CONFIG_DFLT 0x00203803
+
+struct sti_hqvdp_hvsrc {
+ u32 hor_panoramic_ctrl;
+ u32 output_picture_size;
+ u32 init_horizontal;
+ u32 init_vertical;
+ u32 param_ctrl;
+ u32 yh_coef[NB_COEF];
+ u32 ch_coef[NB_COEF];
+ u32 yv_coef[NB_COEF];
+ u32 cv_coef[NB_COEF];
+ u32 hori_shift;
+ u32 vert_shift;
+};
+
+/* Default ParamCtrl: all controls enabled */
+#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
+
+struct sti_hqvdp_iqi {
+ u32 config;
+ u32 demo_wind_size;
+ u32 pk_config;
+ u32 coeff0_coeff1;
+ u32 coeff2_coeff3;
+ u32 coeff4;
+ u32 pk_lut;
+ u32 pk_gain;
+ u32 pk_coring_level;
+ u32 cti_config;
+ u32 le_config;
+ u32 le_lut[64];
+ u32 con_bri;
+ u32 sat_gain;
+ u32 pxf_conf;
+ u32 default_color;
+};
+
+/* Default Config : IQI bypassed */
+#define IQI_CONFIG_DFLT 0x00000001
+/* Default Contrast & Brightness gain = 256 */
+#define IQI_CON_BRI_DFLT 0x00000100
+/* Default Saturation gain = 256 */
+#define IQI_SAT_GAIN_DFLT 0x00000100
+/* Default PxfConf : P2I bypassed */
+#define IQI_PXF_CONF_DFLT 0x00000001
+
+struct sti_hqvdp_top_status {
+ u32 processing_time;
+ u32 input_y_crc;
+ u32 input_uv_crc;
+};
+
+struct sti_hqvdp_fmd_status {
+ u32 fmd_repeat_move_status;
+ u32 fmd_scene_count_status;
+ u32 cfd_sum;
+ u32 field_sum;
+ u32 next_y_fmd_crc;
+ u32 next_next_y_fmd_crc;
+ u32 next_next_next_y_fmd_crc;
+};
+
+struct sti_hqvdp_csdi_status {
+ u32 prev_y_csdi_crc;
+ u32 cur_y_csdi_crc;
+ u32 next_y_csdi_crc;
+ u32 prev_uv_csdi_crc;
+ u32 cur_uv_csdi_crc;
+ u32 next_uv_csdi_crc;
+ u32 y_csdi_crc;
+ u32 uv_csdi_crc;
+ u32 uv_cup_crc;
+ u32 mot_csdi_crc;
+ u32 mot_cur_csdi_crc;
+ u32 mot_prev_csdi_crc;
+};
+
+struct sti_hqvdp_hvsrc_status {
+ u32 y_hvsrc_crc;
+ u32 u_hvsrc_crc;
+ u32 v_hvsrc_crc;
+};
+
+struct sti_hqvdp_iqi_status {
+ u32 pxf_it_status;
+ u32 y_iqi_crc;
+ u32 u_iqi_crc;
+ u32 v_iqi_crc;
+};
+
+/* Main commands. We use 2 commands one being processed by the firmware, one
+ * ready to be fetched upon next Vsync*/
+#define NB_VDP_CMD 2
+
+struct sti_hqvdp_cmd {
+ struct sti_hqvdp_top top;
+ struct sti_hqvdp_vc1re vc1re;
+ struct sti_hqvdp_fmd fmd;
+ struct sti_hqvdp_csdi csdi;
+ struct sti_hqvdp_hvsrc hvsrc;
+ struct sti_hqvdp_iqi iqi;
+ struct sti_hqvdp_top_status top_status;
+ struct sti_hqvdp_fmd_status fmd_status;
+ struct sti_hqvdp_csdi_status csdi_status;
+ struct sti_hqvdp_hvsrc_status hvsrc_status;
+ struct sti_hqvdp_iqi_status iqi_status;
+};
+
+/*
+ * STI HQVDP structure
+ *
+ * @dev: driver device
+ * @drm_dev: the drm device
+ * @regs: registers
+ * @layer: layer structure for hqvdp it self
+ * @vid_plane: VID plug used as link with compositor IP
+ * @clk: IP clock
+ * @clk_pix_main: pix main clock
+ * @reset: reset control
+ * @vtg_nb: notifier to handle VTG Vsync
+ * @btm_field_pending: is there any bottom field (interlaced frame) to display
+ * @curr_field_count: number of field updates
+ * @last_field_count: number of field updates since last fps measure
+ * @hqvdp_cmd: buffer of commands
+ * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
+ * @vtg: vtg for main data path
+ */
+struct sti_hqvdp {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ void __iomem *regs;
+ struct sti_layer layer;
+ struct drm_plane *vid_plane;
+ struct clk *clk;
+ struct clk *clk_pix_main;
+ struct reset_control *reset;
+ struct notifier_block vtg_nb;
+ bool btm_field_pending;
+ unsigned int curr_field_count;
+ unsigned int last_field_count;
+ void *hqvdp_cmd;
+ dma_addr_t hqvdp_cmd_paddr;
+ struct sti_vtg *vtg;
+};
+
+#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
+
+static const uint32_t hqvdp_supported_formats[] = {
+ DRM_FORMAT_NV12,
+};
+
+static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
+{
+ return hqvdp_supported_formats;
+}
+
+static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
+{
+ return ARRAY_SIZE(hqvdp_supported_formats);
+}
+
+/**
+ * sti_hqvdp_get_free_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
+ *
+ * RETURNS:
+ * the offset of the command to be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
+{
+ int curr_cmd, next_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ int i;
+
+ curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+ next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if ((cmd != curr_cmd) && (cmd != next_cmd))
+ return i * sizeof(struct sti_hqvdp_cmd);
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+/**
+ * sti_hqvdp_get_curr_cmd
+ * @hqvdp: hqvdp structure
+ *
+ * Look for the hqvdp_cmd that is being used by the FW.
+ *
+ * RETURNS:
+ * the offset of the command to be used.
+ * -1 in error cases
+ */
+static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
+{
+ int curr_cmd;
+ dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+ unsigned int i;
+
+ curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+
+ for (i = 0; i < NB_VDP_CMD; i++) {
+ if (cmd == curr_cmd)
+ return i * sizeof(struct sti_hqvdp_cmd);
+
+ cmd += sizeof(struct sti_hqvdp_cmd);
+ }
+
+ return -1;
+}
+
+/**
+ * sti_hqvdp_update_hvsrc
+ * @orient: horizontal or vertical
+ * @scale: scaling/zoom factor
+ * @hvsrc: the structure containing the LUT coef
+ *
+ * Update the Y and C Lut coef, as well as the shift param
+ *
+ * RETURNS:
+ * None.
+ */
+static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
+ struct sti_hqvdp_hvsrc *hvsrc)
+{
+ const int *coef_c, *coef_y;
+ int shift_c, shift_y;
+
+ /* Get the appropriate coef tables */
+ if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
+ coef_y = coef_lut_f_y_legacy;
+ coef_c = coef_lut_f_c_legacy;
+ shift_y = SHIFT_LUT_F_Y_LEGACY;
+ shift_c = SHIFT_LUT_F_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
+ coef_y = coef_lut_e_y_legacy;
+ coef_c = coef_lut_e_c_legacy;
+ shift_y = SHIFT_LUT_E_Y_LEGACY;
+ shift_c = SHIFT_LUT_E_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
+ coef_y = coef_lut_d_y_legacy;
+ coef_c = coef_lut_d_c_legacy;
+ shift_y = SHIFT_LUT_D_Y_LEGACY;
+ shift_c = SHIFT_LUT_D_C_LEGACY;
+ } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
+ coef_y = coef_lut_c_y_legacy;
+ coef_c = coef_lut_c_c_legacy;
+ shift_y = SHIFT_LUT_C_Y_LEGACY;
+ shift_c = SHIFT_LUT_C_C_LEGACY;
+ } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
+ coef_y = coef_c = coef_lut_b;
+ shift_y = shift_c = SHIFT_LUT_B;
+ } else {
+ coef_y = coef_c = coef_lut_a_legacy;
+ shift_y = shift_c = SHIFT_LUT_A_LEGACY;
+ }
+
+ if (orient == HVSRC_HORI) {
+ hvsrc->hori_shift = (shift_c << 16) | shift_y;
+ memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
+ memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
+ } else {
+ hvsrc->vert_shift = (shift_c << 16) | shift_y;
+ memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
+ memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
+ }
+}
+
+/**
+ * sti_hqvdp_check_hw_scaling
+ * @layer: hqvdp layer
+ *
+ * Check if the HW is able to perform the scaling request
+ * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
+ * Zy = OutputHeight / InputHeight
+ * LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
+ * Tx : Total video mode horizontal resolution
+ * IPClock : HQVDP IP clock (Mhz)
+ * MaxNbCycles: max(InputWidth, OutputWidth)
+ * Cp: Video mode pixel clock (Mhz)
+ *
+ * RETURNS:
+ * True if the HW can scale.
+ */
+static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ unsigned long lfw;
+ unsigned int inv_zy;
+
+ lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
+ lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
+
+ inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
+
+ return (inv_zy <= lfw) ? true : false;
+}
+
+/**
+ * sti_hqvdp_prepare_layer
+ * @layer: hqvdp layer
+ * @first_prepare: true if it is the first time this function is called
+ *
+ * Prepares a command for the firmware
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ struct sti_hqvdp_cmd *cmd;
+ int scale_h, scale_v;
+ int cmd_offset;
+
+ dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ /* prepare and commit VID plane */
+ hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
+ layer->crtc, layer->fb,
+ layer->dst_x, layer->dst_y,
+ layer->dst_w, layer->dst_h,
+ layer->src_x, layer->src_y,
+ layer->src_w, layer->src_h);
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return -EBUSY;
+ }
+ cmd = hqvdp->hqvdp_cmd + cmd_offset;
+
+ if (!sti_hqvdp_check_hw_scaling(layer)) {
+ DRM_ERROR("Scaling beyond HW capabilities\n");
+ return -EINVAL;
+ }
+
+ /* Static parameters, defaulting to progressive mode */
+ cmd->top.config = TOP_CONFIG_PROGRESSIVE;
+ cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
+ cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
+ cmd->csdi.config = CSDI_CONFIG_PROG;
+
+ /* VC1RE, FMD bypassed : keep everything set to 0
+ * IQI/P2I bypassed */
+ cmd->iqi.config = IQI_CONFIG_DFLT;
+ cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
+ cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
+ cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
+
+ /* Buffer planes address */
+ cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
+ cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
+
+ /* Pitches */
+ cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
+ layer->pitches[0];
+ cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
+ layer->pitches[1];
+
+ /* Input / output size
+ * Align to upper even value */
+ layer->dst_w = ALIGN(layer->dst_w, 2);
+ layer->dst_h = ALIGN(layer->dst_h, 2);
+
+ if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
+ (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
+ (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
+ (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
+ DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+ layer->src_w, layer->src_h,
+ layer->dst_w, layer->dst_h);
+ return -EINVAL;
+ }
+ cmd->top.input_viewport_size = cmd->top.input_frame_size =
+ layer->src_h << 16 | layer->src_w;
+ cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
+ cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
+
+ /* Handle interlaced */
+ if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
+ /* Top field to display */
+ cmd->top.config = TOP_CONFIG_INTER_TOP;
+
+ /* Update pitches and vert size */
+ cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
+ layer->src_w;
+ cmd->top.luma_processed_pitch *= 2;
+ cmd->top.luma_src_pitch *= 2;
+ cmd->top.chroma_processed_pitch *= 2;
+ cmd->top.chroma_src_pitch *= 2;
+
+ /* Enable directional deinterlacing processing */
+ cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
+ cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
+ cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
+ }
+
+ /* Update hvsrc lut coef */
+ scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
+ sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
+
+ scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
+ sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
+
+ if (first_prepare) {
+ /* Prevent VTG shutdown */
+ if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+ DRM_ERROR("Failed to prepare/enable pix main clk\n");
+ return -ENXIO;
+ }
+
+ /* Register VTG Vsync callback to handle bottom fields */
+ if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
+ sti_vtg_register_client(hqvdp->vtg,
+ &hqvdp->vtg_nb, layer->mixer_id)) {
+ DRM_ERROR("Cannot register VTG notifier\n");
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static int sti_hqvdp_commit_layer(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int cmd_offset;
+
+ dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
+
+ cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ if (cmd_offset == -1) {
+ DRM_ERROR("No available hqvdp_cmd now\n");
+ return -EBUSY;
+ }
+
+ writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+
+ /* Interlaced : get ready to display the bottom field at next Vsync */
+ if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
+ hqvdp->btm_field_pending = true;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
+
+ return 0;
+}
+
+static int sti_hqvdp_disable_layer(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int i;
+
+ DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
+
+ /* Unregister VTG Vsync callback */
+ if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
+ sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
+ DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
+
+ /* Set next cmd to NULL */
+ writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
+ & INFO_XP70_FW_READY)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+
+ /* VTG can stop now */
+ clk_disable_unprepare(hqvdp->clk_pix_main);
+
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("XP70 could not revert to idle\n");
+ return -ENXIO;
+ }
+
+ /* disable VID plane */
+ hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
+
+ return 0;
+}
+
+/**
+ * sti_vdp_vtg_cb
+ * @nb: notifier block
+ * @evt: event message
+ * @data: private data
+ *
+ * Handle VTG Vsync event, display pending bottom field
+ *
+ * RETURNS:
+ * 0 on success.
+ */
+int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
+{
+ struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
+ int btm_cmd_offset, top_cmd_offest;
+ struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
+
+ if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
+ DRM_DEBUG_DRIVER("Unknown event\n");
+ return 0;
+ }
+
+ if (hqvdp->btm_field_pending) {
+ /* Create the btm field command from the current one */
+ btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
+ top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
+ if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
+ DRM_ERROR("Cannot get cmds, skip btm field\n");
+ return -EBUSY;
+ }
+
+ btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
+ top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
+
+ memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
+
+ btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
+ btm_cmd->top.current_luma +=
+ btm_cmd->top.luma_src_pitch / 2;
+ btm_cmd->top.current_chroma +=
+ btm_cmd->top.chroma_src_pitch / 2;
+
+ /* Post the command to mailbox */
+ writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
+ hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ hqvdp->curr_field_count++;
+ hqvdp->btm_field_pending = false;
+
+ dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
+ __func__, hqvdp->hqvdp_cmd_paddr);
+ }
+
+ return 0;
+}
+
+static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
+{
+ struct drm_plane *plane;
+
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ struct sti_layer *layer = to_sti_layer(plane);
+
+ if (layer->desc == id)
+ return plane;
+ }
+
+ return NULL;
+}
+
+static void sti_hqvd_init(struct sti_layer *layer)
+{
+ struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
+ int size;
+
+ /* find the plane macthing with vid 0 */
+ hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
+ if (!hqvdp->vid_plane) {
+ DRM_ERROR("Cannot find Main video layer\n");
+ return;
+ }
+
+ hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
+
+ /* Allocate memory for the VDP commands */
+ size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
+ hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
+ &hqvdp->hqvdp_cmd_paddr,
+ GFP_KERNEL | GFP_DMA);
+ if (!hqvdp->hqvdp_cmd) {
+ DRM_ERROR("Failed to allocate memory for VDP cmd\n");
+ return;
+ }
+
+ memset(hqvdp->hqvdp_cmd, 0, size);
+}
+
+static const struct sti_layer_funcs hqvdp_ops = {
+ .get_formats = sti_hqvdp_get_formats,
+ .get_nb_formats = sti_hqvdp_get_nb_formats,
+ .init = sti_hqvd_init,
+ .prepare = sti_hqvdp_prepare_layer,
+ .commit = sti_hqvdp_commit_layer,
+ .disable = sti_hqvdp_disable_layer,
+};
+
+struct sti_layer *sti_hqvdp_create(struct device *dev)
+{
+ struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+
+ hqvdp->layer.ops = &hqvdp_ops;
+
+ return &hqvdp->layer;
+}
+EXPORT_SYMBOL(sti_hqvdp_create);
+
+static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
+{
+ /* Configure Plugs (same for RD & WR) */
+ writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
+ writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
+ writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
+ writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
+ writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
+ writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
+ writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
+
+ writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
+ writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
+ writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
+ writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
+ writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
+ writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
+ writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
+}
+
+/**
+ * sti_hqvdp_start_xp70
+ * @firmware: firmware found
+ * @ctxt: hqvdp structure
+ *
+ * Run the xP70 initialization sequence
+ */
+static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
+{
+ struct sti_hqvdp *hqvdp = ctxt;
+ u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
+ u8 *data;
+ int i;
+ struct fw_header {
+ int rd_size;
+ int wr_size;
+ int pmem_size;
+ int dmem_size;
+ } *header;
+
+ DRM_DEBUG_DRIVER("\n");
+ /* Check firmware parts */
+ if (!firmware) {
+ DRM_ERROR("Firmware not available\n");
+ return;
+ }
+
+ header = (struct fw_header *) firmware->data;
+ if (firmware->size < sizeof(*header)) {
+ DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
+ goto out;
+ }
+ if ((sizeof(*header) + header->rd_size + header->wr_size +
+ header->pmem_size + header->dmem_size) != firmware->size) {
+ DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
+ sizeof(*header), header->rd_size, header->wr_size,
+ header->pmem_size, header->dmem_size,
+ firmware->size);
+ goto out;
+ }
+
+ data = (u8 *) firmware->data;
+ data += sizeof(*header);
+ fw_rd_plug = (void *) data;
+ data += header->rd_size;
+ fw_wr_plug = (void *) data;
+ data += header->wr_size;
+ fw_pmem = (void *) data;
+ data += header->pmem_size;
+ fw_dmem = (void *) data;
+
+ /* Enable clock */
+ if (clk_prepare_enable(hqvdp->clk))
+ DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
+
+ /* Reset */
+ writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
+
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+ & STARTUP_CTRL1_RST_DONE)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("Could not reset\n");
+ goto out;
+ }
+
+ /* Init Read & Write plugs */
+ for (i = 0; i < header->rd_size / 4; i++)
+ writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
+ for (i = 0; i < header->wr_size / 4; i++)
+ writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
+
+ sti_hqvdp_init_plugs(hqvdp);
+
+ /* Authorize Idle Mode */
+ writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
+
+ /* Prevent VTG interruption during the boot */
+ writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
+ writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+
+ /* Download PMEM & DMEM */
+ for (i = 0; i < header->pmem_size / 4; i++)
+ writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
+ for (i = 0; i < header->dmem_size / 4; i++)
+ writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
+
+ /* Enable fetch */
+ writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
+
+ /* Wait end of boot */
+ for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
+ if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
+ & INFO_XP70_FW_READY)
+ break;
+ msleep(POLL_DELAY_MS);
+ }
+ if (i == POLL_MAX_ATTEMPT) {
+ DRM_ERROR("Could not boot\n");
+ goto out;
+ }
+
+ /* Launch Vsync */
+ writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
+
+ DRM_INFO("HQVDP XP70 started\n");
+out:
+ release_firmware(firmware);
+}
+
+int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct sti_layer *layer;
+ int err;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hqvdp->drm_dev = drm_dev;
+
+ /* Request for firmware */
+ err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ HQVDP_FMW_NAME, hqvdp->dev,
+ GFP_KERNEL, hqvdp, sti_hqvdp_start_xp70);
+ if (err) {
+ DRM_ERROR("Can't get HQVDP firmware\n");
+ return err;
+ }
+
+ layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
+ if (!layer) {
+ DRM_ERROR("Can't create HQVDP plane\n");
+ return -ENOMEM;
+ }
+
+ sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
+
+ return 0;
+}
+
+static void sti_hqvdp_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ /* do nothing */
+}
+
+static const struct component_ops sti_hqvdp_ops = {
+ .bind = sti_hqvdp_bind,
+ .unbind = sti_hqvdp_unbind,
+};
+
+static int sti_hqvdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *vtg_np;
+ struct sti_hqvdp *hqvdp;
+ struct resource *res;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
+ if (!hqvdp) {
+ DRM_ERROR("Failed to allocate HQVDP context\n");
+ return -ENOMEM;
+ }
+
+ hqvdp->dev = dev;
+
+ /* Get Memory resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ DRM_ERROR("Get memory resource failed\n");
+ return -ENXIO;
+ }
+ hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (hqvdp->regs == NULL) {
+ DRM_ERROR("Register mapping failed\n");
+ return -ENXIO;
+ }
+
+ /* Get clock resources */
+ hqvdp->clk = devm_clk_get(dev, "hqvdp");
+ hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
+ if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) {
+ DRM_ERROR("Cannot get clocks\n");
+ return -ENXIO;
+ }
+
+ /* Get reset resources */
+ hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
+ if (!IS_ERR(hqvdp->reset))
+ reset_control_deassert(hqvdp->reset);
+
+ vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
+ if (vtg_np)
+ hqvdp->vtg = of_vtg_find(vtg_np);
+
+ platform_set_drvdata(pdev, hqvdp);
+
+ return component_add(&pdev->dev, &sti_hqvdp_ops);
+}
+
+static int sti_hqvdp_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &sti_hqvdp_ops);
+ return 0;
+}
+
+static struct of_device_id hqvdp_of_match[] = {
+ { .compatible = "st,stih407-hqvdp", },
+ { /* end node */ }
+};
+MODULE_DEVICE_TABLE(of, hqvdp_of_match);
+
+struct platform_driver sti_hqvdp_driver = {
+ .driver = {
+ .name = "sti-hqvdp",
+ .owner = THIS_MODULE,
+ .of_match_table = hqvdp_of_match,
+ },
+ .probe = sti_hqvdp_probe,
+ .remove = sti_hqvdp_remove,
+};
+
+module_platform_driver(sti_hqvdp_driver);
+
+MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
new file mode 100644
index 000000000000..cd5ecd0a6dea
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.h
@@ -0,0 +1,12 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HQVDP_H_
+#define _STI_HQVDP_H_
+
+struct sti_layer *sti_hqvdp_create(struct device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_hqvdp_lut.h b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
new file mode 100644
index 000000000000..619af7f4384e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) STMicroelectronics SA 2014
+ * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
+ * License terms: GNU General Public License (GPL), version 2
+ */
+
+#ifndef _STI_HQVDP_LUT_H_
+#define _STI_HQVDP_LUT_H_
+
+#define NB_COEF 128
+
+#define SHIFT_LUT_A_LEGACY 8
+#define SHIFT_LUT_B 8
+#define SHIFT_LUT_C_Y_LEGACY 8
+#define SHIFT_LUT_C_C_LEGACY 8
+#define SHIFT_LUT_D_Y_LEGACY 8
+#define SHIFT_LUT_D_C_LEGACY 8
+#define SHIFT_LUT_E_Y_LEGACY 8
+#define SHIFT_LUT_E_C_LEGACY 8
+#define SHIFT_LUT_F_Y_LEGACY 8
+#define SHIFT_LUT_F_C_LEGACY 8
+
+static const u32 coef_lut_a_legacy[NB_COEF] = {
+ 0x0000ffff, 0x00010000, 0x000100ff, 0x00000000,
+ 0x00000000, 0x00050000, 0xfffc00ff, 0x00000000,
+ 0x00000000, 0x00090000, 0xfff900fe, 0x00000000,
+ 0x00000000, 0x0010ffff, 0xfff600fb, 0x00000000,
+ 0x00000000, 0x0017fffe, 0xfff400f7, 0x00000000,
+ 0x00000000, 0x001ffffd, 0xfff200f2, 0x00000000,
+ 0x00000000, 0x0027fffc, 0xfff100ec, 0x00000000,
+ 0x00000000, 0x0030fffb, 0xfff000e5, 0x00000000,
+ 0x00000000, 0x003afffa, 0xffee00de, 0x00000000,
+ 0x00000000, 0x0044fff9, 0xffed00d6, 0x00000000,
+ 0x00000000, 0x004efff8, 0xffed00cd, 0x00000000,
+ 0x00000000, 0x0059fff6, 0xffed00c4, 0x00000000,
+ 0x00000000, 0x0064fff5, 0xffed00ba, 0x00000000,
+ 0x00000000, 0x006ffff3, 0xffee00b0, 0x00000000,
+ 0x00000000, 0x007afff2, 0xffee00a6, 0x00000000,
+ 0x00000000, 0x0085fff1, 0xffef009b, 0x00000000,
+ 0x00000000, 0x0090fff0, 0xfff00090, 0x00000000,
+ 0x00000000, 0x009bffef, 0xfff10085, 0x00000000,
+ 0x00000000, 0x00a6ffee, 0xfff2007a, 0x00000000,
+ 0x00000000, 0x00b0ffee, 0xfff3006f, 0x00000000,
+ 0x00000000, 0x00baffed, 0xfff50064, 0x00000000,
+ 0x00000000, 0x00c4ffed, 0xfff60059, 0x00000000,
+ 0x00000000, 0x00cdffed, 0xfff8004e, 0x00000000,
+ 0x00000000, 0x00d6ffed, 0xfff90044, 0x00000000,
+ 0x00000000, 0x00deffee, 0xfffa003a, 0x00000000,
+ 0x00000000, 0x00e5fff0, 0xfffb0030, 0x00000000,
+ 0x00000000, 0x00ecfff1, 0xfffc0027, 0x00000000,
+ 0x00000000, 0x00f2fff2, 0xfffd001f, 0x00000000,
+ 0x00000000, 0x00f7fff4, 0xfffe0017, 0x00000000,
+ 0x00000000, 0x00fbfff6, 0xffff0010, 0x00000000,
+ 0x00000000, 0x00fefff9, 0x00000009, 0x00000000,
+ 0x00000000, 0x00fffffc, 0x00000005, 0x00000000
+};
+
+static const u32 coef_lut_b[NB_COEF] = {
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000100, 0x00000000
+};
+
+static const u32 coef_lut_c_y_legacy[NB_COEF] = {
+ 0x00060004, 0x0038ffe1, 0x003800be, 0x0006ffe1,
+ 0x00050005, 0x0042ffe1, 0x003800b3, 0x0007ffe1,
+ 0x00040006, 0x0046ffe1, 0x003300b2, 0x0008ffe2,
+ 0x00030007, 0x004cffe1, 0x002e00b1, 0x0008ffe2,
+ 0x00020006, 0x0051ffe2, 0x002900b0, 0x0009ffe3,
+ 0x00010008, 0x0056ffe2, 0x002400ae, 0x0009ffe4,
+ 0xffff0008, 0x005cffe3, 0x001f00ad, 0x000affe4,
+ 0xfffe0008, 0x0062ffe4, 0x001a00ab, 0x000affe5,
+ 0xfffd000a, 0x0066ffe5, 0x001500a8, 0x000bffe6,
+ 0xfffc0009, 0x006bffe7, 0x001100a5, 0x000bffe8,
+ 0xfffa000a, 0x0070ffe8, 0x000d00a3, 0x000bffe9,
+ 0xfff9000b, 0x0076ffea, 0x0008009f, 0x000bffea,
+ 0xfff7000b, 0x007affec, 0x0005009b, 0x000cffec,
+ 0xfff6000b, 0x007effef, 0x00010098, 0x000cffed,
+ 0xfff4000b, 0x0084fff1, 0xfffd0095, 0x000cffee,
+ 0xfff3000b, 0x0088fff4, 0xfffa0090, 0x000cfff0,
+ 0xfff1000b, 0x008dfff7, 0xfff7008d, 0x000bfff1,
+ 0xfff0000c, 0x0090fffa, 0xfff40088, 0x000bfff3,
+ 0xffee000c, 0x0095fffd, 0xfff10084, 0x000bfff4,
+ 0xffed000c, 0x00980001, 0xffef007e, 0x000bfff6,
+ 0xffec000c, 0x009b0005, 0xffec007a, 0x000bfff7,
+ 0xffea000b, 0x009f0008, 0xffea0076, 0x000bfff9,
+ 0xffe9000b, 0x00a3000d, 0xffe80070, 0x000afffa,
+ 0xffe8000b, 0x00a50011, 0xffe7006b, 0x0009fffc,
+ 0xffe6000b, 0x00a80015, 0xffe50066, 0x000afffd,
+ 0xffe5000a, 0x00ab001a, 0xffe40062, 0x0008fffe,
+ 0xffe4000a, 0x00ad001f, 0xffe3005c, 0x0008ffff,
+ 0xffe40009, 0x00ae0024, 0xffe20056, 0x00080001,
+ 0xffe30009, 0x00b00029, 0xffe20051, 0x00060002,
+ 0xffe20008, 0x00b1002e, 0xffe1004c, 0x00070003,
+ 0xffe20008, 0x00b20033, 0xffe10046, 0x00060004,
+ 0xffe10007, 0x00b30038, 0xffe10042, 0x00050005
+};
+
+static const u32 coef_lut_c_c_legacy[NB_COEF] = {
+ 0x0001fff3, 0x003afffb, 0x003a00a1, 0x0001fffb,
+ 0x0001fff5, 0x0041fffb, 0x0038009a, 0x0001fffb,
+ 0x0001fff5, 0x0046fffb, 0x00340099, 0x0001fffb,
+ 0x0001fff7, 0x0049fffb, 0x00300098, 0x0001fffb,
+ 0x0001fff9, 0x004cfffb, 0x002d0096, 0x0001fffb,
+ 0x0001fffa, 0x004ffffc, 0x00290095, 0x0001fffb,
+ 0x0001fff9, 0x0054fffd, 0x00250093, 0x0001fffc,
+ 0x0001fffa, 0x0058fffd, 0x00220092, 0x0000fffc,
+ 0x0001fffb, 0x005bfffe, 0x001f0090, 0x0000fffc,
+ 0x0001fffd, 0x005effff, 0x001c008c, 0x0000fffd,
+ 0x0001fffd, 0x00620000, 0x0019008a, 0x0000fffd,
+ 0x0001fffe, 0x00660001, 0x00160088, 0xfffffffd,
+ 0x0000fffe, 0x006a0003, 0x00130085, 0xfffffffe,
+ 0x0000fffe, 0x006e0004, 0x00100083, 0xfffffffe,
+ 0x0000fffe, 0x00710006, 0x000e007f, 0xffffffff,
+ 0x0000fffe, 0x00750008, 0x000c007c, 0xfffeffff,
+ 0xfffffffe, 0x0079000a, 0x000a0079, 0xfffeffff,
+ 0xfffffffe, 0x007c000c, 0x00080075, 0xfffe0000,
+ 0xffffffff, 0x007f000e, 0x00060071, 0xfffe0000,
+ 0xfffeffff, 0x00830010, 0x0004006e, 0xfffe0000,
+ 0xfffeffff, 0x00850013, 0x0003006a, 0xfffe0000,
+ 0xfffdffff, 0x00880016, 0x00010066, 0xfffe0001,
+ 0xfffd0000, 0x008a0019, 0x00000062, 0xfffd0001,
+ 0xfffd0000, 0x008c001c, 0xffff005e, 0xfffd0001,
+ 0xfffc0000, 0x0090001f, 0xfffe005b, 0xfffb0001,
+ 0xfffc0000, 0x00920022, 0xfffd0058, 0xfffa0001,
+ 0xfffc0001, 0x00930025, 0xfffd0054, 0xfff90001,
+ 0xfffb0001, 0x00950029, 0xfffc004f, 0xfffa0001,
+ 0xfffb0001, 0x0096002d, 0xfffb004c, 0xfff90001,
+ 0xfffb0001, 0x00980030, 0xfffb0049, 0xfff70001,
+ 0xfffb0001, 0x00990034, 0xfffb0046, 0xfff50001,
+ 0xfffb0001, 0x009a0038, 0xfffb0041, 0xfff50001
+};
+
+static const u32 coef_lut_d_y_legacy[NB_COEF] = {
+ 0xfff80009, 0x0046ffec, 0x004600a3, 0xfff8ffec,
+ 0xfff70009, 0x004effed, 0x0044009d, 0xfff9ffeb,
+ 0xfff6000a, 0x0052ffee, 0x003f009d, 0xfffaffea,
+ 0xfff50009, 0x0057ffef, 0x003b009d, 0xfffbffe9,
+ 0xfff50008, 0x005bfff0, 0x0037009c, 0xfffcffe9,
+ 0xfff40008, 0x005ffff2, 0x0033009b, 0xfffcffe9,
+ 0xfff30007, 0x0064fff3, 0x002f009b, 0xfffdffe8,
+ 0xfff20007, 0x0068fff5, 0x002b0099, 0xfffeffe8,
+ 0xfff10008, 0x006bfff7, 0x00270097, 0xffffffe8,
+ 0xfff00007, 0x006ffff9, 0x00230097, 0xffffffe8,
+ 0xffef0006, 0x0073fffb, 0x00200095, 0x0000ffe8,
+ 0xffee0005, 0x0077fffe, 0x001c0093, 0x0000ffe9,
+ 0xffee0005, 0x007a0000, 0x00180091, 0x0001ffe9,
+ 0xffed0005, 0x007d0003, 0x0015008e, 0x0002ffe9,
+ 0xffec0005, 0x00800006, 0x0012008b, 0x0002ffea,
+ 0xffeb0004, 0x00840008, 0x000e008a, 0x0003ffea,
+ 0xffeb0003, 0x0087000b, 0x000b0087, 0x0003ffeb,
+ 0xffea0003, 0x008a000e, 0x00080084, 0x0004ffeb,
+ 0xffea0002, 0x008b0012, 0x00060080, 0x0005ffec,
+ 0xffe90002, 0x008e0015, 0x0003007d, 0x0005ffed,
+ 0xffe90001, 0x00910018, 0x0000007a, 0x0005ffee,
+ 0xffe90000, 0x0093001c, 0xfffe0077, 0x0005ffee,
+ 0xffe80000, 0x00950020, 0xfffb0073, 0x0006ffef,
+ 0xffe8ffff, 0x00970023, 0xfff9006f, 0x0007fff0,
+ 0xffe8ffff, 0x00970027, 0xfff7006b, 0x0008fff1,
+ 0xffe8fffe, 0x0099002b, 0xfff50068, 0x0007fff2,
+ 0xffe8fffd, 0x009b002f, 0xfff30064, 0x0007fff3,
+ 0xffe9fffc, 0x009b0033, 0xfff2005f, 0x0008fff4,
+ 0xffe9fffc, 0x009c0037, 0xfff0005b, 0x0008fff5,
+ 0xffe9fffb, 0x009d003b, 0xffef0057, 0x0009fff5,
+ 0xffeafffa, 0x009d003f, 0xffee0052, 0x000afff6,
+ 0xffebfff9, 0x009d0044, 0xffed004e, 0x0009fff7
+};
+
+static const u32 coef_lut_d_c_legacy[NB_COEF] = {
+ 0xfffeffff, 0x003fffff, 0x003f0089, 0xfffeffff,
+ 0xfffe0000, 0x00460000, 0x0042007d, 0xfffffffe,
+ 0xfffe0000, 0x00490001, 0x003f007d, 0xfffffffd,
+ 0xfffd0001, 0x004b0002, 0x003c007d, 0x0000fffc,
+ 0xfffd0001, 0x004e0003, 0x0039007c, 0x0000fffc,
+ 0xfffc0001, 0x00510005, 0x0036007c, 0x0000fffb,
+ 0xfffc0001, 0x00540006, 0x0033007b, 0x0001fffa,
+ 0xfffc0003, 0x00550008, 0x00310078, 0x0001fffa,
+ 0xfffb0003, 0x00580009, 0x002e0078, 0x0001fffa,
+ 0xfffb0002, 0x005b000b, 0x002b0077, 0x0002fff9,
+ 0xfffa0003, 0x005e000d, 0x00280075, 0x0002fff9,
+ 0xfffa0002, 0x0060000f, 0x00260074, 0x0002fff9,
+ 0xfffa0004, 0x00610011, 0x00230072, 0x0002fff9,
+ 0xfffa0004, 0x00640013, 0x00200070, 0x0002fff9,
+ 0xfff90004, 0x00660015, 0x001e006e, 0x0003fff9,
+ 0xfff90004, 0x00680017, 0x001c006c, 0x0003fff9,
+ 0xfff90003, 0x006b0019, 0x0019006b, 0x0003fff9,
+ 0xfff90003, 0x006c001c, 0x00170068, 0x0004fff9,
+ 0xfff90003, 0x006e001e, 0x00150066, 0x0004fff9,
+ 0xfff90002, 0x00700020, 0x00130064, 0x0004fffa,
+ 0xfff90002, 0x00720023, 0x00110061, 0x0004fffa,
+ 0xfff90002, 0x00740026, 0x000f0060, 0x0002fffa,
+ 0xfff90002, 0x00750028, 0x000d005e, 0x0003fffa,
+ 0xfff90002, 0x0077002b, 0x000b005b, 0x0002fffb,
+ 0xfffa0001, 0x0078002e, 0x00090058, 0x0003fffb,
+ 0xfffa0001, 0x00780031, 0x00080055, 0x0003fffc,
+ 0xfffa0001, 0x007b0033, 0x00060054, 0x0001fffc,
+ 0xfffb0000, 0x007c0036, 0x00050051, 0x0001fffc,
+ 0xfffc0000, 0x007c0039, 0x0003004e, 0x0001fffd,
+ 0xfffc0000, 0x007d003c, 0x0002004b, 0x0001fffd,
+ 0xfffdffff, 0x007d003f, 0x00010049, 0x0000fffe,
+ 0xfffeffff, 0x007d0042, 0x00000046, 0x0000fffe
+};
+
+static const u32 coef_lut_e_y_legacy[NB_COEF] = {
+ 0xfff10001, 0x00490004, 0x00490083, 0xfff10004,
+ 0xfff10000, 0x00500006, 0x004b007b, 0xfff10002,
+ 0xfff10000, 0x00530007, 0x0048007b, 0xfff10001,
+ 0xfff10000, 0x00550009, 0x0046007a, 0xfff10000,
+ 0xfff1fffe, 0x0058000b, 0x0043007b, 0xfff2fffe,
+ 0xfff1ffff, 0x005a000d, 0x0040007a, 0xfff2fffd,
+ 0xfff1fffd, 0x005d000f, 0x003e007a, 0xfff2fffc,
+ 0xfff1fffd, 0x005f0011, 0x003b0079, 0xfff3fffb,
+ 0xfff1fffc, 0x00610013, 0x00390079, 0xfff3fffa,
+ 0xfff1fffb, 0x00640015, 0x00360079, 0xfff3fff9,
+ 0xfff1fffa, 0x00660017, 0x00340078, 0xfff4fff8,
+ 0xfff1fffb, 0x00680019, 0x00310077, 0xfff4fff7,
+ 0xfff2fff9, 0x006a001b, 0x002f0076, 0xfff5fff6,
+ 0xfff2fff9, 0x006c001e, 0x002c0075, 0xfff5fff5,
+ 0xfff2fff9, 0x006d0020, 0x002a0073, 0xfff6fff5,
+ 0xfff3fff7, 0x00700022, 0x00270073, 0xfff6fff4,
+ 0xfff3fff7, 0x00710025, 0x00250071, 0xfff7fff3,
+ 0xfff4fff6, 0x00730027, 0x00220070, 0xfff7fff3,
+ 0xfff5fff6, 0x0073002a, 0x0020006d, 0xfff9fff2,
+ 0xfff5fff5, 0x0075002c, 0x001e006c, 0xfff9fff2,
+ 0xfff6fff5, 0x0076002f, 0x001b006a, 0xfff9fff2,
+ 0xfff7fff4, 0x00770031, 0x00190068, 0xfffbfff1,
+ 0xfff8fff4, 0x00780034, 0x00170066, 0xfffafff1,
+ 0xfff9fff3, 0x00790036, 0x00150064, 0xfffbfff1,
+ 0xfffafff3, 0x00790039, 0x00130061, 0xfffcfff1,
+ 0xfffbfff3, 0x0079003b, 0x0011005f, 0xfffdfff1,
+ 0xfffcfff2, 0x007a003e, 0x000f005d, 0xfffdfff1,
+ 0xfffdfff2, 0x007a0040, 0x000d005a, 0xfffffff1,
+ 0xfffefff2, 0x007b0043, 0x000b0058, 0xfffefff1,
+ 0x0000fff1, 0x007a0046, 0x00090055, 0x0000fff1,
+ 0x0001fff1, 0x007b0048, 0x00070053, 0x0000fff1,
+ 0x0002fff1, 0x007b004b, 0x00060050, 0x0000fff1
+};
+
+static const u32 coef_lut_e_c_legacy[NB_COEF] = {
+ 0xfffa0001, 0x003f0010, 0x003f006d, 0xfffa0010,
+ 0xfffb0002, 0x00440011, 0x00440062, 0xfffa000e,
+ 0xfffb0001, 0x00460013, 0x00420062, 0xfffa000d,
+ 0xfffb0000, 0x00480014, 0x00410062, 0xfffa000c,
+ 0xfffb0001, 0x00490015, 0x003f0061, 0xfffb000b,
+ 0xfffb0000, 0x004b0017, 0x003d0061, 0xfffb000a,
+ 0xfffb0000, 0x004d0018, 0x003b0062, 0xfffb0008,
+ 0xfffcffff, 0x004f001a, 0x00390061, 0xfffb0007,
+ 0xfffc0000, 0x004f001c, 0x00380060, 0xfffb0006,
+ 0xfffcffff, 0x0052001d, 0x00360060, 0xfffb0005,
+ 0xfffdfffe, 0x0053001f, 0x00340060, 0xfffb0004,
+ 0xfffdfffe, 0x00540021, 0x0032005e, 0xfffc0004,
+ 0xfffeffff, 0x00550022, 0x0030005d, 0xfffc0003,
+ 0xfffeffff, 0x00560024, 0x002f005c, 0xfffc0002,
+ 0xfffffffd, 0x00580026, 0x002d005c, 0xfffc0001,
+ 0xfffffffd, 0x005a0027, 0x002b005c, 0xfffc0000,
+ 0x0000fffd, 0x005a0029, 0x0029005a, 0xfffd0000,
+ 0x0000fffc, 0x005c002b, 0x0027005a, 0xfffdffff,
+ 0x0001fffc, 0x005c002d, 0x00260058, 0xfffdffff,
+ 0x0002fffc, 0x005c002f, 0x00240056, 0xfffffffe,
+ 0x0003fffc, 0x005d0030, 0x00220055, 0xfffffffe,
+ 0x0004fffc, 0x005e0032, 0x00210054, 0xfffefffd,
+ 0x0004fffb, 0x00600034, 0x001f0053, 0xfffefffd,
+ 0x0005fffb, 0x00600036, 0x001d0052, 0xfffffffc,
+ 0x0006fffb, 0x00600038, 0x001c004f, 0x0000fffc,
+ 0x0007fffb, 0x00610039, 0x001a004f, 0xfffffffc,
+ 0x0008fffb, 0x0062003b, 0x0018004d, 0x0000fffb,
+ 0x000afffb, 0x0061003d, 0x0017004b, 0x0000fffb,
+ 0x000bfffb, 0x0061003f, 0x00150049, 0x0001fffb,
+ 0x000cfffa, 0x00620041, 0x00140048, 0x0000fffb,
+ 0x000dfffa, 0x00620042, 0x00130046, 0x0001fffb,
+ 0x000efffa, 0x00620044, 0x00110044, 0x0002fffb
+};
+
+static const u32 coef_lut_f_y_legacy[NB_COEF] = {
+ 0xfff6fff0, 0x00490012, 0x0049006e, 0xfff60012,
+ 0xfff7fff1, 0x004e0013, 0x00490068, 0xfff60010,
+ 0xfff7fff2, 0x004f0015, 0x00470067, 0xfff6000f,
+ 0xfff7fff5, 0x004f0017, 0x00450065, 0xfff6000e,
+ 0xfff8fff5, 0x00500018, 0x00440065, 0xfff6000c,
+ 0xfff8fff6, 0x0051001a, 0x00420064, 0xfff6000b,
+ 0xfff8fff6, 0x0052001c, 0x00400064, 0xfff6000a,
+ 0xfff9fff6, 0x0054001d, 0x003e0064, 0xfff60008,
+ 0xfff9fff8, 0x0054001f, 0x003c0063, 0xfff60007,
+ 0xfffafff8, 0x00550021, 0x003a0062, 0xfff60006,
+ 0xfffbfff7, 0x00560022, 0x00390062, 0xfff60005,
+ 0xfffbfff8, 0x00570024, 0x00370061, 0xfff60004,
+ 0xfffcfff8, 0x00580026, 0x00350060, 0xfff60003,
+ 0xfffdfff8, 0x00590028, 0x0033005f, 0xfff60002,
+ 0xfffdfff7, 0x005b002a, 0x0031005f, 0xfff60001,
+ 0xfffefff7, 0x005c002c, 0x002f005e, 0xfff60000,
+ 0xfffffff6, 0x005e002d, 0x002d005e, 0xfff6ffff,
+ 0x0000fff6, 0x005e002f, 0x002c005c, 0xfff7fffe,
+ 0x0001fff6, 0x005f0031, 0x002a005b, 0xfff7fffd,
+ 0x0002fff6, 0x005f0033, 0x00280059, 0xfff8fffd,
+ 0x0003fff6, 0x00600035, 0x00260058, 0xfff8fffc,
+ 0x0004fff6, 0x00610037, 0x00240057, 0xfff8fffb,
+ 0x0005fff6, 0x00620039, 0x00220056, 0xfff7fffb,
+ 0x0006fff6, 0x0062003a, 0x00210055, 0xfff8fffa,
+ 0x0007fff6, 0x0063003c, 0x001f0054, 0xfff8fff9,
+ 0x0008fff6, 0x0064003e, 0x001d0054, 0xfff6fff9,
+ 0x000afff6, 0x00640040, 0x001c0052, 0xfff6fff8,
+ 0x000bfff6, 0x00640042, 0x001a0051, 0xfff6fff8,
+ 0x000cfff6, 0x00650044, 0x00180050, 0xfff5fff8,
+ 0x000efff6, 0x00650045, 0x0017004f, 0xfff5fff7,
+ 0x000ffff6, 0x00670047, 0x0015004f, 0xfff2fff7,
+ 0x0010fff6, 0x00680049, 0x0013004e, 0xfff1fff7
+};
+
+static const u32 coef_lut_f_c_legacy[NB_COEF] = {
+ 0x0000fffb, 0x003a001a, 0x003a005d, 0x0000001a,
+ 0x0001fffb, 0x003f001b, 0x00400051, 0x00000019,
+ 0x0001fffc, 0x0040001c, 0x003f0051, 0x00000017,
+ 0x0002fffb, 0x0042001d, 0x003e0051, 0xffff0016,
+ 0x0002fffb, 0x0043001e, 0x003d0051, 0xffff0015,
+ 0x0003fffc, 0x00430020, 0x003b0050, 0xffff0014,
+ 0x0003fffb, 0x00450021, 0x003a0051, 0xfffe0013,
+ 0x0004fffc, 0x00450022, 0x00390050, 0xfffe0012,
+ 0x0005fffc, 0x00460023, 0x0038004f, 0xfffe0011,
+ 0x0005fffb, 0x00480025, 0x00360050, 0xfffd0010,
+ 0x0006fffc, 0x00480026, 0x0035004f, 0xfffd000f,
+ 0x0006fffc, 0x00490027, 0x0034004f, 0xfffd000e,
+ 0x0007fffd, 0x00490028, 0x0033004e, 0xfffd000d,
+ 0x0008fffc, 0x004a002a, 0x0031004d, 0xfffd000d,
+ 0x0009fffd, 0x004a002b, 0x0030004d, 0xfffc000c,
+ 0x0009fffc, 0x004c002c, 0x002f004d, 0xfffc000b,
+ 0x000afffc, 0x004c002e, 0x002e004c, 0xfffc000a,
+ 0x000bfffc, 0x004d002f, 0x002c004c, 0xfffc0009,
+ 0x000cfffc, 0x004d0030, 0x002b004a, 0xfffd0009,
+ 0x000dfffd, 0x004d0031, 0x002a004a, 0xfffc0008,
+ 0x000dfffd, 0x004e0033, 0x00280049, 0xfffd0007,
+ 0x000efffd, 0x004f0034, 0x00270049, 0xfffc0006,
+ 0x000ffffd, 0x004f0035, 0x00260048, 0xfffc0006,
+ 0x0010fffd, 0x00500036, 0x00250048, 0xfffb0005,
+ 0x0011fffe, 0x004f0038, 0x00230046, 0xfffc0005,
+ 0x0012fffe, 0x00500039, 0x00220045, 0xfffc0004,
+ 0x0013fffe, 0x0051003a, 0x00210045, 0xfffb0003,
+ 0x0014ffff, 0x0050003b, 0x00200043, 0xfffc0003,
+ 0x0015ffff, 0x0051003d, 0x001e0043, 0xfffb0002,
+ 0x0016ffff, 0x0051003e, 0x001d0042, 0xfffb0002,
+ 0x00170000, 0x0051003f, 0x001c0040, 0xfffc0001,
+ 0x00190000, 0x00510040, 0x001b003f, 0xfffb0001
+};
+
+#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
index 06a587c4f1bb..899104f9d4bc 100644
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -11,7 +11,9 @@
#include <drm/drm_fb_cma_helper.h>
#include "sti_compositor.h"
+#include "sti_cursor.h"
#include "sti_gdp.h"
+#include "sti_hqvdp.h"
#include "sti_layer.h"
#include "sti_vid.h"
@@ -32,10 +34,13 @@ const char *sti_layer_to_str(struct sti_layer *layer)
return "VID1";
case STI_CURSOR:
return "CURSOR";
+ case STI_HQVDP_0:
+ return "HQVDP0";
default:
return "<UNKNOWN LAYER>";
}
}
+EXPORT_SYMBOL(sti_layer_to_str);
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr)
@@ -50,6 +55,12 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
case STI_VID:
layer = sti_vid_create(dev);
break;
+ case STI_CUR:
+ layer = sti_cursor_create(dev);
+ break;
+ case STI_VDP:
+ layer = sti_hqvdp_create(dev);
+ break;
}
if (!layer) {
@@ -67,8 +78,11 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
return layer;
}
+EXPORT_SYMBOL(sti_layer_create);
-int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+int sti_layer_prepare(struct sti_layer *layer,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_display_mode *mode, int mixer_id,
int dest_x, int dest_y, int dest_w, int dest_h,
int src_x, int src_y, int src_w, int src_h)
@@ -88,6 +102,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
return 1;
}
+ layer->crtc = crtc;
layer->fb = fb;
layer->mode = mode;
layer->mixer_id = mixer_id;
@@ -100,6 +115,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
layer->src_w = src_w;
layer->src_h = src_h;
layer->format = fb->pixel_format;
+ layer->vaddr = cma_obj->vaddr;
layer->paddr = cma_obj->paddr;
for (i = 0; i < 4; i++) {
layer->pitches[i] = fb->pitches[i];
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
index 198c3774cc12..ceff497f557e 100644
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -22,7 +22,8 @@ enum sti_layer_type {
STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
- STI_BCK = 4 << STI_LAYER_TYPE_SHIFT
+ STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
+ STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
};
enum sti_layer_id_of_type {
@@ -39,6 +40,7 @@ enum sti_layer_desc {
STI_GDP_3 = STI_GDP | STI_ID_3,
STI_VID_0 = STI_VID | STI_ID_0,
STI_VID_1 = STI_VID | STI_ID_1,
+ STI_HQVDP_0 = STI_VDP | STI_ID_0,
STI_CURSOR = STI_CUR,
STI_BACK = STI_BCK
};
@@ -67,6 +69,7 @@ struct sti_layer_funcs {
*
* @plane: drm plane it is bound to (if any)
* @fb: drm fb it is bound to
+ * @crtc: crtc it is bound to
* @mode: display mode
* @desc: layer type & id
* @device: driver device
@@ -82,11 +85,13 @@ struct sti_layer_funcs {
* @format: format
* @pitches: pitch of 'planes' (eg: Y, U, V)
* @offsets: offset of 'planes'
+ * @vaddr: virtual address of the input buffer
* @paddr: physical address of the input buffer
*/
struct sti_layer {
struct drm_plane plane;
struct drm_framebuffer *fb;
+ struct drm_crtc *crtc;
struct drm_display_mode *mode;
enum sti_layer_desc desc;
struct device *dev;
@@ -102,12 +107,15 @@ struct sti_layer {
uint32_t format;
unsigned int pitches[4];
unsigned int offsets[4];
+ void *vaddr;
dma_addr_t paddr;
};
struct sti_layer *sti_layer_create(struct device *dev, int desc,
void __iomem *baseaddr);
-int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
+int sti_layer_prepare(struct sti_layer *layer,
+ struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_display_mode *mode,
int mixer_id,
int dest_x, int dest_y,
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 79f369db9fb6..13a4b84deab6 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -45,6 +45,7 @@ static const u32 mixerColorSpaceMatIdentity[] = {
#define GAM_CTL_GDP1_MASK BIT(4)
#define GAM_CTL_GDP2_MASK BIT(5)
#define GAM_CTL_GDP3_MASK BIT(6)
+#define GAM_CTL_CURSOR_MASK BIT(9)
const char *sti_mixer_to_str(struct sti_mixer *mixer)
{
@@ -122,11 +123,15 @@ int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
layer_id = GAM_DEPTH_GDP3_ID;
break;
case STI_VID_0:
+ case STI_HQVDP_0:
layer_id = GAM_DEPTH_VID0_ID;
break;
case STI_VID_1:
layer_id = GAM_DEPTH_VID1_ID;
break;
+ case STI_CURSOR:
+ /* no need to set depth for cursor */
+ return 0;
default:
DRM_ERROR("Unknown layer %d\n", layer->desc);
return 1;
@@ -185,9 +190,12 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
case STI_GDP_3:
return GAM_CTL_GDP3_MASK;
case STI_VID_0:
+ case STI_HQVDP_0:
return GAM_CTL_VID0_MASK;
case STI_VID_1:
return GAM_CTL_VID1_MASK;
+ case STI_CURSOR:
+ return GAM_CTL_CURSOR_MASK;
default:
return 0;
}
@@ -215,6 +223,15 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
return 0;
}
+void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
+{
+ u32 val;
+
+ DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
+ val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
+ sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
+}
+
void sti_mixer_set_matrix(struct sti_mixer *mixer)
{
unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 874372102e52..b97282182908 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -23,6 +23,7 @@
* @id: id of the mixer
* @drm_crtc: crtc object link to the mixer
* @pending_event: set if a flip event is pending on crtc
+ * @enabled: to know if the mixer is active or not
*/
struct sti_mixer {
struct device *dev;
@@ -30,6 +31,7 @@ struct sti_mixer {
int id;
struct drm_crtc drm_crtc;
struct drm_pending_vblank_event *pending_event;
+ bool enabled;
};
const char *sti_mixer_to_str(struct sti_mixer *mixer);
@@ -39,6 +41,7 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id,
int sti_mixer_set_layer_status(struct sti_mixer *mixer,
struct sti_layer *layer, bool status);
+void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
int sti_mixer_active_video_area(struct sti_mixer *mixer,
struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index b8afe490356a..cb924aa2b321 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,6 +16,8 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include "sti_drm_crtc.h"
+
/* glue registers */
#define TVO_CSC_MAIN_M0 0x000
#define TVO_CSC_MAIN_M1 0x004
@@ -96,7 +98,7 @@
#define TVO_SYNC_HD_DCS_SHIFT 8
-#define ENCODER_MAIN_CRTC_MASK BIT(0)
+#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
/* enum listing the supported output data format */
enum sti_tvout_video_out_type {
@@ -149,14 +151,15 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
* Set the clipping mode of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @cr_r:
* @y_g:
* @cb_b:
*/
-static void tvout_vip_set_color_order(struct sti_tvout *tvout,
+static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
u32 cr_r, u32 y_g, u32 cb_b)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
@@ -165,52 +168,58 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout,
val |= y_g << TVO_VIP_REORDER_G_SHIFT;
val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Set the clipping mode of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @range: clipping range
*/
-static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range)
+static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
val |= range << TVO_VIP_CLIP_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Set the rounded value of a VIP
*
* @tvout: tvout structure
+ * @reg: register to set
* @rnd: rounded val per component
*/
-static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd)
+static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
val |= rnd << TVO_VIP_RND_SHIFT;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Select the VIP input
*
* @tvout: tvout structure
+ * @reg: register to set
+ * @main_path: main or auxiliary path
+ * @sel_input_logic_inverted: need to invert the logic
* @sel_input: selected_input (main/aux + conv)
*/
static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
+ int reg,
bool main_path,
bool sel_input_logic_inverted,
enum sti_tvout_video_out_type video_out)
{
u32 sel_input;
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
if (main_path)
sel_input = TVO_VIP_SEL_INPUT_MAIN;
@@ -232,22 +241,24 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
val &= ~TVO_VIP_SEL_INPUT_MASK;
val |= sel_input;
- tvout_write(tvout, val, TVO_VIP_HDMI);
+ tvout_write(tvout, val, reg);
}
/**
* Select the input video signed or unsigned
*
* @tvout: tvout structure
+ * @reg: register to set
* @in_vid_signed: used video input format
*/
-static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt)
+static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
+ int reg, u32 in_vid_fmt)
{
- u32 val = tvout_read(tvout, TVO_VIP_HDMI);
+ u32 val = tvout_read(tvout, reg);
val &= ~TVO_IN_FMT_SIGNED;
val |= in_vid_fmt;
- tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT);
+ tvout_write(tvout, val, reg);
}
/**
@@ -261,6 +272,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
{
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
dev_dbg(tvout->dev, "%s\n", __func__);
@@ -268,33 +280,36 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
DRM_DEBUG_DRIVER("main vip for hdmi\n");
/* select the input sync for hdmi = VTG set 1 */
tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
} else {
DRM_DEBUG_DRIVER("aux vip for hdmi\n");
/* select the input sync for hdmi = VTG set 1 */
tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
/* set color channel order */
- tvout_vip_set_color_order(tvout,
+ tvout_vip_set_color_order(tvout, TVO_VIP_HDMI,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
/* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
+ TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
/* set round mode (rounded to 8-bit per component) */
- tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED);
+ tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
if (of_device_is_compatible(node, "st,stih407-tvout")) {
/* set input video format */
- tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT,
- TVO_IN_FMT_SIGNED);
+ tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
+ TVO_IN_FMT_SIGNED);
sel_input_logic_inverted = true;
}
/* input selection */
- tvout_vip_set_sel_input(tvout, main_path,
+ tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
}
@@ -309,48 +324,47 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
{
struct device_node *node = tvout->dev->of_node;
bool sel_input_logic_inverted = false;
+ u32 tvo_in_vid_format;
+ int val;
dev_dbg(tvout->dev, "%s\n", __func__);
- if (!main_path) {
- DRM_ERROR("HD Analog on aux not implemented\n");
- return;
+ if (main_path) {
+ val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_MAIN_VTG_SET_3;
+ tvout_write(tvout, val, TVO_HD_SYNC_SEL);
+ tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
+ } else {
+ val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
+ val |= TVO_SYNC_AUX_VTG_SET_3;
+ tvout_write(tvout, val, TVO_HD_SYNC_SEL);
+ tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
}
- DRM_DEBUG_DRIVER("main vip for HDF\n");
-
/* set color channel order */
- tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF,
+ tvout_vip_set_color_order(tvout, TVO_VIP_HDF,
TVO_VIP_REORDER_CR_R_SEL,
TVO_VIP_REORDER_Y_G_SEL,
TVO_VIP_REORDER_CB_B_SEL);
- /* set clipping mode (Limited range RGB/Y) */
- tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF,
- TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
+ /* set clipping mode (EAV/SAV clipping) */
+ tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
/* set round mode (rounded to 10-bit per component) */
- tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
+ tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
if (of_device_is_compatible(node, "st,stih407-tvout")) {
/* set input video format */
- tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED);
+ tvout_vip_set_in_vid_fmt(tvout,
+ tvo_in_vid_format, TVO_IN_FMT_SIGNED);
sel_input_logic_inverted = true;
}
/* Input selection */
- tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF,
- main_path,
+ tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
sel_input_logic_inverted,
STI_TVOUT_VIDEO_OUT_YUV);
- /* select the input sync for HD analog = VTG set 3
- * and HD DCS = VTG set 2 */
- tvout_write(tvout,
- (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
- | TVO_SYNC_MAIN_VTG_SET_3,
- TVO_HD_SYNC_SEL);
-
/* power up HD DAC */
tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
}
@@ -392,7 +406,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hda_start(tvout, true);
+ tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
}
static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -429,7 +443,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
drm_encoder = (struct drm_encoder *) encoder;
- drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder->possible_clones = 1 << 0;
drm_encoder_init(dev, drm_encoder,
@@ -444,7 +458,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
{
struct sti_tvout *tvout = to_sti_tvout(encoder);
- tvout_hdmi_start(tvout, true);
+ tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
}
static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -478,7 +492,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
drm_encoder = (struct drm_encoder *) encoder;
- drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK;
+ drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
drm_encoder->possible_clones = 1 << 1;
drm_encoder_init(dev, drm_encoder,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 740d6e347a62..9564f2568e2c 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -51,10 +51,19 @@
#define VTG_TOP_V_HD_3 0x010C
#define VTG_BOT_V_HD_3 0x0110
+#define VTG_H_HD_4 0x0120
+#define VTG_TOP_V_VD_4 0x0124
+#define VTG_BOT_V_VD_4 0x0128
+#define VTG_TOP_V_HD_4 0x012c
+#define VTG_BOT_V_HD_4 0x0130
+
#define VTG_IRQ_BOTTOM BIT(0)
#define VTG_IRQ_TOP BIT(1)
#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
+/* Delay introduced by the HDMI in nb of pixel */
+#define HDMI_DELAY (6)
+
/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
#define AWG_DELAY_HD (-9)
#define AWG_DELAY_ED (-8)
@@ -133,10 +142,10 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(tmp, vtg->regs + VTG_VID_TFS);
writel(tmp, vtg->regs + VTG_VID_BFS);
- /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */
- tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ /* prepare VTG set 1 for HDMI */
+ tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
+ tmp |= HDMI_DELAY;
writel(tmp, vtg->regs + VTG_H_HD_1);
- writel(tmp, vtg->regs + VTG_H_HD_2);
tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
tmp |= 1;
@@ -146,6 +155,11 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(0, vtg->regs + VTG_BOT_V_HD_1);
/* prepare VTG set 2 for for HD DCS */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_2);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
writel(0, vtg->regs + VTG_TOP_V_HD_2);
@@ -166,6 +180,17 @@ static void vtg_set_mode(struct sti_vtg *vtg,
writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
+ /* Prepare VTG set 4 for DVO */
+ tmp = (mode->hsync_end - mode->hsync_start) << 16;
+ writel(tmp, vtg->regs + VTG_H_HD_4);
+
+ tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
+ tmp |= 1;
+ writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
+ writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
+ writel(0, vtg->regs + VTG_TOP_V_HD_4);
+ writel(0, vtg->regs + VTG_BOT_V_HD_4);
+
/* mode */
writel(type, vtg->regs + VTG_MODE);
}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8ce508e76208..3820ae97a030 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
*/
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr)
+ struct list_head *list, bool intr,
+ struct list_head *dups)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
__ttm_bo_unreserve(bo);
ret = -EBUSY;
+
+ } else if (ret == -EALREADY && dups) {
+ struct ttm_validate_buffer *safe = entry;
+ entry = list_prev_entry(entry, head);
+ list_del(&safe->head);
+ list_add(&safe->head, dups);
+ continue;
}
if (!ret) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index db7621828bc7..7b5d22110f25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1062,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
vmaster = vmw_master_check(dev, file_priv, flags);
if (unlikely(IS_ERR(vmaster))) {
- DRM_INFO("IOCTL ERROR %d\n", nr);
- return PTR_ERR(vmaster);
+ ret = PTR_ERR(vmaster);
+
+ if (ret != -ERESTARTSYS)
+ DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
+ nr, ret);
+ return ret;
}
ret = ioctl_func(filp, cmd, arg);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 596cd6dafd33..33176d05db35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_err_nores;
- ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
+ ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+ true, NULL);
if (unlikely(ret != 0))
goto out_err;
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
query_val.shared = false;
list_add_tail(&query_val.head, &validate_list);
- ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
+ ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
+ false, NULL);
if (unlikely(ret != 0)) {
vmw_execbuf_unpin_panic(dev_priv);
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 197164fd7803..b7594cb758af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
- struct vmw_fence_manager *fman = fman_from_fence(fence);
-
fence_free(&fence->base);
-
- /*
- * Free kernel space accounting.
- */
- ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
- fman->fence_size);
}
int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
struct vmw_fence_obj **p_fence)
{
- struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
struct vmw_fence_obj *fence;
int ret;
- ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (unlikely(fence == NULL)) {
- ret = -ENOMEM;
- goto out_no_object;
- }
+ if (unlikely(fence == NULL))
+ return -ENOMEM;
ret = vmw_fence_obj_init(fman, fence, seqno,
vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
out_err_init:
kfree(fence);
-out_no_object:
- ttm_mem_global_free(mem_glob, fman->fence_size);
return ret;
}
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
if (ret != 0)
goto out_no_queue;
+ return 0;
+
out_no_queue:
event->base.destroy(&event->base);
out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
BUG_ON(fence == NULL);
- if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
- else
- ret = vmw_event_fence_action_create(file_priv, fence,
- arg->flags,
- arg->user_data,
- true);
-
+ ret = vmw_event_fence_action_create(file_priv, fence,
+ arg->flags,
+ arg->user_data,
+ true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to attach event to fence.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 026de7cea0f6..210ef15b1d09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
val_buf->bo = ttm_bo_reference(&res->backup->base);
val_buf->shared = false;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
if (unlikely(ret != 0))
goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 8719fb3cccc9..6a4584a43aa6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
cmd->header.size = sizeof(cmd->body);
cmd->body.shid = res->id;
cmd->body.mobid = bo->mem.start;
- cmd->body.offsetInBytes = 0;
+ cmd->body.offsetInBytes = res->backup_offset;
res->backup_dirty = false;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 63f3f03ecc9b..c604f4c3ac0d 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -111,6 +111,8 @@
#define CDNS_I2C_DIVA_MAX 4
#define CDNS_I2C_DIVB_MAX 64
+#define CDNS_I2C_TIMEOUT_MAX 0xFF
+
#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
@@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
goto err_clk_dis;
}
+ /*
+ * Cadence I2C controller has a bug wherein it generates
+ * invalid read transaction after HW timeout in master receiver mode.
+ * HW timeout is not used by this driver and the interrupt is disabled.
+ * But the feature itself cannot be disabled. Hence maximum value
+ * is written to this register to reduce the chances of error.
+ */
+ cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
+
dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index d15b7c9b9219..01f0cd87a4a5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -407,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
return msg->len;
- if (stop) {
- w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
- w |= DAVINCI_I2C_MDR_STP;
- davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
- }
+ w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+ w |= DAVINCI_I2C_MDR_STP;
+ davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
return -EREMOTEIO;
}
return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index edca99dbba23..23628b7bfb8d 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -359,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
}
/* Configure Tx/Rx FIFO threshold levels */
- dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL);
+ dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
dw_writel(dev, 0, DW_IC_RX_TL);
/* configure the i2c master */
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 26942c159de1..277a2288d4a8 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -922,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
if (stat & OMAP_I2C_STAT_NACK) {
err |= OMAP_I2C_STAT_NACK;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
- break;
}
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
- break;
}
/*
@@ -954,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
if (dev->fifo_size)
num_bytes = dev->buf_len;
- omap_i2c_receive_data(dev, num_bytes, true);
-
- if (dev->errata & I2C_OMAP_ERRATA_I207)
+ if (dev->errata & I2C_OMAP_ERRATA_I207) {
i2c_omap_errata_i207(dev, stat);
+ num_bytes = (omap_i2c_read_reg(dev,
+ OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
+ }
+ omap_i2c_receive_data(dev, num_bytes, true);
omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
continue;
}
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index bc203485716d..8afa28e4570e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
err_free_client:
evdev_detach_client(evdev, client);
- kfree(client);
+ kvfree(client);
return error;
}
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index dd5112265cc9..d0a1261eb1ba 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -152,6 +152,18 @@ config OMAP_IOMMU_DEBUG
Say N unless you know you need this.
+config ROCKCHIP_IOMMU
+ bool "Rockchip IOMMU Support"
+ depends on ARCH_ROCKCHIP
+ select IOMMU_API
+ select ARM_DMA_USE_IOMMU
+ help
+ Support for IOMMUs found on Rockchip rk32xx SOCs.
+ These IOMMUs allow virtualization of the address space used by most
+ cores within the multimedia subsystem.
+ Say Y here if you are using a Rockchip SoC that includes an IOMMU
+ device.
+
config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 16edef74b8ee..3e47ef35a35f 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
+obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644
index 000000000000..b2023af384b9
--- /dev/null
+++ b/drivers/iommu/rockchip-iommu.c
@@ -0,0 +1,1038 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/cacheflush.h>
+#include <asm/pgtable.h>
+#include <linux/compiler.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iommu.h>
+#include <linux/jiffies.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/** MMU register offsets */
+#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
+#define RK_MMU_STATUS 0x04
+#define RK_MMU_COMMAND 0x08
+#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
+#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
+#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
+#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
+#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
+#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
+#define RK_MMU_AUTO_GATING 0x24
+
+#define DTE_ADDR_DUMMY 0xCAFEBABE
+#define FORCE_RESET_TIMEOUT 100 /* ms */
+
+/* RK_MMU_STATUS fields */
+#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define RK_MMU_STATUS_IDLE BIT(3)
+#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
+
+/* RK_MMU_COMMAND command values */
+#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
+#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
+#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
+#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
+#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
+#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
+#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
+
+/* RK_MMU_INT_* register fields */
+#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
+#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
+#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
+
+#define NUM_DT_ENTRIES 1024
+#define NUM_PT_ENTRIES 1024
+
+#define SPAGE_ORDER 12
+#define SPAGE_SIZE (1 << SPAGE_ORDER)
+
+ /*
+ * Support mapping any size that fits in one page table:
+ * 4 KiB to 4 MiB
+ */
+#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
+
+#define IOMMU_REG_POLL_COUNT_FAST 1000
+
+struct rk_iommu_domain {
+ struct list_head iommus;
+ u32 *dt; /* page directory table */
+ spinlock_t iommus_lock; /* lock for iommus list */
+ spinlock_t dt_lock; /* lock for modifying page directory table */
+};
+
+struct rk_iommu {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+ struct list_head node; /* entry in rk_iommu_domain.iommus */
+ struct iommu_domain *domain; /* domain to which iommu is attached */
+};
+
+static inline void rk_table_flush(u32 *va, unsigned int count)
+{
+ phys_addr_t pa_start = virt_to_phys(va);
+ phys_addr_t pa_end = virt_to_phys(va + count);
+ size_t size = pa_end - pa_start;
+
+ __cpuc_flush_dcache_area(va, size);
+ outer_flush_range(pa_start, pa_end);
+}
+
+/**
+ * Inspired by _wait_for in intel_drv.h
+ * This is NOT safe for use in interrupt context.
+ *
+ * Note that it's important that we check the condition again after having
+ * timed out, since the timeout could be due to preemption or similar and
+ * we've never had a chance to check the condition before the timeout.
+ */
+#define rk_wait_for(COND, MS) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
+ int ret__ = 0; \
+ while (!(COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = (COND) ? 0 : -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(50, 100); \
+ } \
+ ret__; \
+})
+
+/*
+ * The Rockchip rk3288 iommu uses a 2-level page table.
+ * The first level is the "Directory Table" (DT).
+ * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
+ * to a "Page Table".
+ * The second level is the 1024 Page Tables (PT).
+ * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
+ * a 4 KB page of physical memory.
+ *
+ * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
+ * Each iommu device has a MMU_DTE_ADDR register that contains the physical
+ * address of the start of the DT page.
+ *
+ * The structure of the page table is as follows:
+ *
+ * DT
+ * MMU_DTE_ADDR -> +-----+
+ * | |
+ * +-----+ PT
+ * | DTE | -> +-----+
+ * +-----+ | | Memory
+ * | | +-----+ Page
+ * | | | PTE | -> +-----+
+ * +-----+ +-----+ | |
+ * | | | |
+ * | | | |
+ * +-----+ | |
+ * | |
+ * | |
+ * +-----+
+ */
+
+/*
+ * Each DTE has a PT address and a valid bit:
+ * +---------------------+-----------+-+
+ * | PT address | Reserved |V|
+ * +---------------------+-----------+-+
+ * 31:12 - PT address (PTs always starts on a 4 KB boundary)
+ * 11: 1 - Reserved
+ * 0 - 1 if PT @ PT address is valid
+ */
+#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
+#define RK_DTE_PT_VALID BIT(0)
+
+static inline phys_addr_t rk_dte_pt_address(u32 dte)
+{
+ return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
+}
+
+static inline bool rk_dte_is_pt_valid(u32 dte)
+{
+ return dte & RK_DTE_PT_VALID;
+}
+
+static u32 rk_mk_dte(u32 *pt)
+{
+ phys_addr_t pt_phys = virt_to_phys(pt);
+ return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
+}
+
+/*
+ * Each PTE has a Page address, some flags and a valid bit:
+ * +---------------------+---+-------+-+
+ * | Page address |Rsv| Flags |V|
+ * +---------------------+---+-------+-+
+ * 31:12 - Page address (Pages always start on a 4 KB boundary)
+ * 11: 9 - Reserved
+ * 8: 1 - Flags
+ * 8 - Read allocate - allocate cache space on read misses
+ * 7 - Read cache - enable cache & prefetch of data
+ * 6 - Write buffer - enable delaying writes on their way to memory
+ * 5 - Write allocate - allocate cache space on write misses
+ * 4 - Write cache - different writes can be merged together
+ * 3 - Override cache attributes
+ * if 1, bits 4-8 control cache attributes
+ * if 0, the system bus defaults are used
+ * 2 - Writable
+ * 1 - Readable
+ * 0 - 1 if Page @ Page address is valid
+ */
+#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
+#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
+#define RK_PTE_PAGE_WRITABLE BIT(2)
+#define RK_PTE_PAGE_READABLE BIT(1)
+#define RK_PTE_PAGE_VALID BIT(0)
+
+static inline phys_addr_t rk_pte_page_address(u32 pte)
+{
+ return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
+}
+
+static inline bool rk_pte_is_page_valid(u32 pte)
+{
+ return pte & RK_PTE_PAGE_VALID;
+}
+
+/* TODO: set cache flags per prot IOMMU_CACHE */
+static u32 rk_mk_pte(phys_addr_t page, int prot)
+{
+ u32 flags = 0;
+ flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
+ flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
+ page &= RK_PTE_PAGE_ADDRESS_MASK;
+ return page | flags | RK_PTE_PAGE_VALID;
+}
+
+static u32 rk_mk_pte_invalid(u32 pte)
+{
+ return pte & ~RK_PTE_PAGE_VALID;
+}
+
+/*
+ * rk3288 iova (IOMMU Virtual Address) format
+ * 31 22.21 12.11 0
+ * +-----------+-----------+-------------+
+ * | DTE index | PTE index | Page offset |
+ * +-----------+-----------+-------------+
+ * 31:22 - DTE index - index of DTE in DT
+ * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
+ * 11: 0 - Page offset - offset into page @ PTE.page_address
+ */
+#define RK_IOVA_DTE_MASK 0xffc00000
+#define RK_IOVA_DTE_SHIFT 22
+#define RK_IOVA_PTE_MASK 0x003ff000
+#define RK_IOVA_PTE_SHIFT 12
+#define RK_IOVA_PAGE_MASK 0x00000fff
+#define RK_IOVA_PAGE_SHIFT 0
+
+static u32 rk_iova_dte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
+}
+
+static u32 rk_iova_pte_index(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
+}
+
+static u32 rk_iova_page_offset(dma_addr_t iova)
+{
+ return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
+}
+
+static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
+{
+ return readl(iommu->base + offset);
+}
+
+static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
+{
+ writel(value, iommu->base + offset);
+}
+
+static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
+{
+ writel(command, iommu->base + RK_MMU_COMMAND);
+}
+
+static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
+ size_t size)
+{
+ dma_addr_t iova_end = iova + size;
+ /*
+ * TODO(djkurtz): Figure out when it is more efficient to shootdown the
+ * entire iotlb rather than iterate over individual iovas.
+ */
+ for (; iova < iova_end; iova += SPAGE_SIZE)
+ rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
+}
+
+static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
+}
+
+static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
+{
+ return rk_iommu_read(iommu, RK_MMU_STATUS) &
+ RK_MMU_STATUS_PAGING_ENABLED;
+}
+
+static int rk_iommu_enable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ /* Stall can only be enabled if paging is enabled */
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
+
+ ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_stall(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_stall_active(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
+
+ ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_enable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
+
+ ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_disable_paging(struct rk_iommu *iommu)
+{
+ int ret;
+
+ if (!rk_iommu_is_paging_enabled(iommu))
+ return 0;
+
+ rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
+
+ ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
+ if (ret)
+ dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
+ rk_iommu_read(iommu, RK_MMU_STATUS));
+
+ return ret;
+}
+
+static int rk_iommu_force_reset(struct rk_iommu *iommu)
+{
+ int ret;
+ u32 dte_addr;
+
+ /*
+ * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
+ * and verifying that upper 5 nybbles are read back.
+ */
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
+
+ dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
+ dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
+ return -EFAULT;
+ }
+
+ rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
+
+ ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
+ FORCE_RESET_TIMEOUT);
+ if (ret)
+ dev_err(iommu->dev, "FORCE_RESET command timed out\n");
+
+ return ret;
+}
+
+static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
+{
+ u32 dte_index, pte_index, page_offset;
+ u32 mmu_dte_addr;
+ phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
+ u32 *dte_addr;
+ u32 dte;
+ phys_addr_t pte_addr_phys = 0;
+ u32 *pte_addr = NULL;
+ u32 pte = 0;
+ phys_addr_t page_addr_phys = 0;
+ u32 page_flags = 0;
+
+ dte_index = rk_iova_dte_index(iova);
+ pte_index = rk_iova_pte_index(iova);
+ page_offset = rk_iova_page_offset(iova);
+
+ mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
+ mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
+
+ dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
+ dte_addr = phys_to_virt(dte_addr_phys);
+ dte = *dte_addr;
+
+ if (!rk_dte_is_pt_valid(dte))
+ goto print_it;
+
+ pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
+ pte_addr = phys_to_virt(pte_addr_phys);
+ pte = *pte_addr;
+
+ if (!rk_pte_is_page_valid(pte))
+ goto print_it;
+
+ page_addr_phys = rk_pte_page_address(pte) + page_offset;
+ page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
+
+print_it:
+ dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
+ &iova, dte_index, pte_index, page_offset);
+ dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
+ &mmu_dte_addr_phys, &dte_addr_phys, dte,
+ rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
+ rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
+}
+
+static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
+{
+ struct rk_iommu *iommu = dev_id;
+ u32 status;
+ u32 int_status;
+ dma_addr_t iova;
+
+ int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
+ if (int_status == 0)
+ return IRQ_NONE;
+
+ iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
+
+ if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
+ int flags;
+
+ status = rk_iommu_read(iommu, RK_MMU_STATUS);
+ flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
+ IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+
+ dev_err(iommu->dev, "Page fault at %pad of type %s\n",
+ &iova,
+ (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
+
+ log_iova(iommu, iova);
+
+ /*
+ * Report page fault to any installed handlers.
+ * Ignore the return code, though, since we always zap cache
+ * and clear the page fault anyway.
+ */
+ if (iommu->domain)
+ report_iommu_fault(iommu->domain, iommu->dev, iova,
+ flags);
+ else
+ dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
+
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
+ }
+
+ if (int_status & RK_MMU_IRQ_BUS_ERROR)
+ dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
+
+ if (int_status & ~RK_MMU_IRQ_MASK)
+ dev_err(iommu->dev, "unexpected int_status: %#08x\n",
+ int_status);
+
+ rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
+
+ return IRQ_HANDLED;
+}
+
+static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ phys_addr_t pt_phys, phys = 0;
+ u32 dte, pte;
+ u32 *page_table;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ if (!rk_dte_is_pt_valid(dte))
+ goto out;
+
+ pt_phys = rk_dte_pt_address(dte);
+ page_table = (u32 *)phys_to_virt(pt_phys);
+ pte = page_table[rk_iova_pte_index(iova)];
+ if (!rk_pte_is_page_valid(pte))
+ goto out;
+
+ phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
+out:
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return phys;
+}
+
+static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova, size_t size)
+{
+ struct list_head *pos;
+ unsigned long flags;
+
+ /* shootdown these iova from all iommus using this domain */
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_for_each(pos, &rk_domain->iommus) {
+ struct rk_iommu *iommu;
+ iommu = list_entry(pos, struct rk_iommu, node);
+ rk_iommu_zap_lines(iommu, iova, size);
+ }
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+}
+
+static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
+ dma_addr_t iova)
+{
+ u32 *page_table, *dte_addr;
+ u32 dte;
+ phys_addr_t pt_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
+ dte = *dte_addr;
+ if (rk_dte_is_pt_valid(dte))
+ goto done;
+
+ page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
+ if (!page_table)
+ return ERR_PTR(-ENOMEM);
+
+ dte = rk_mk_dte(page_table);
+ *dte_addr = dte;
+
+ rk_table_flush(page_table, NUM_PT_ENTRIES);
+ rk_table_flush(dte_addr, 1);
+
+ /*
+ * Zap the first iova of newly allocated page table so iommu evicts
+ * old cached value of new dte from the iotlb.
+ */
+ rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
+
+done:
+ pt_phys = rk_dte_pt_address(dte);
+ return (u32 *)phys_to_virt(pt_phys);
+}
+
+static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
+ u32 *pte_addr, dma_addr_t iova, size_t size)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+ if (!rk_pte_is_page_valid(pte))
+ break;
+
+ pte_addr[pte_count] = rk_mk_pte_invalid(pte);
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return pte_count * SPAGE_SIZE;
+}
+
+static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
+ dma_addr_t iova, phys_addr_t paddr, size_t size,
+ int prot)
+{
+ unsigned int pte_count;
+ unsigned int pte_total = size / SPAGE_SIZE;
+ phys_addr_t page_phys;
+
+ assert_spin_locked(&rk_domain->dt_lock);
+
+ for (pte_count = 0; pte_count < pte_total; pte_count++) {
+ u32 pte = pte_addr[pte_count];
+
+ if (rk_pte_is_page_valid(pte))
+ goto unwind;
+
+ pte_addr[pte_count] = rk_mk_pte(paddr, prot);
+
+ paddr += SPAGE_SIZE;
+ }
+
+ rk_table_flush(pte_addr, pte_count);
+
+ return 0;
+unwind:
+ /* Unmap the range of iovas that we just mapped */
+ rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
+
+ iova += pte_count * SPAGE_SIZE;
+ page_phys = rk_pte_page_address(pte_addr[pte_count]);
+ pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
+ &iova, &page_phys, &paddr, prot);
+
+ return -EADDRINUSE;
+}
+
+static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
+ phys_addr_t paddr, size_t size, int prot)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ u32 *page_table, *pte_addr;
+ int ret;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_map() guarantees that both iova and size will be
+ * aligned, we will always only be mapping from a single dte here.
+ */
+ page_table = rk_dte_get_page_table(rk_domain, iova);
+ if (IS_ERR(page_table)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return PTR_ERR(page_table);
+ }
+
+ pte_addr = &page_table[rk_iova_pte_index(iova)];
+ ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ return ret;
+}
+
+static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
+ size_t size)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ dma_addr_t iova = (dma_addr_t)_iova;
+ phys_addr_t pt_phys;
+ u32 dte;
+ u32 *pte_addr;
+ size_t unmap_size;
+
+ spin_lock_irqsave(&rk_domain->dt_lock, flags);
+
+ /*
+ * pgsize_bitmap specifies iova sizes that fit in one page table
+ * (1024 4-KiB pages = 4 MiB).
+ * So, size will always be 4096 <= size <= 4194304.
+ * Since iommu_unmap() guarantees that both iova and size will be
+ * aligned, we will always only be unmapping from a single dte here.
+ */
+ dte = rk_domain->dt[rk_iova_dte_index(iova)];
+ /* Just return 0 if iova is unmapped */
+ if (!rk_dte_is_pt_valid(dte)) {
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+ return 0;
+ }
+
+ pt_phys = rk_dte_pt_address(dte);
+ pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
+ unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
+
+ spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
+
+ /* Shootdown iotlb entries for iova range that was just unmapped */
+ rk_iommu_zap_iova(rk_domain, iova, unmap_size);
+
+ return unmap_size;
+}
+
+static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
+{
+ struct iommu_group *group;
+ struct device *iommu_dev;
+ struct rk_iommu *rk_iommu;
+
+ group = iommu_group_get(dev);
+ if (!group)
+ return NULL;
+ iommu_dev = iommu_group_get_iommudata(group);
+ rk_iommu = dev_get_drvdata(iommu_dev);
+ iommu_group_put(group);
+
+ return rk_iommu;
+}
+
+static int rk_iommu_attach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+ int ret;
+ phys_addr_t dte_addr;
+
+ /*
+ * Allow 'virtual devices' (e.g., drm) to attach to domain.
+ * Such a device does not belong to an iommu group.
+ */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return 0;
+
+ ret = rk_iommu_enable_stall(iommu);
+ if (ret)
+ return ret;
+
+ ret = rk_iommu_force_reset(iommu);
+ if (ret)
+ return ret;
+
+ iommu->domain = domain;
+
+ ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
+ IRQF_SHARED, dev_name(dev), iommu);
+ if (ret)
+ return ret;
+
+ dte_addr = virt_to_phys(rk_domain->dt);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
+ rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
+
+ ret = rk_iommu_enable_paging(iommu);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_add_tail(&iommu->node, &rk_domain->iommus);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ dev_info(dev, "Attached to iommu domain\n");
+
+ rk_iommu_disable_stall(iommu);
+
+ return 0;
+}
+
+static void rk_iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct rk_iommu *iommu;
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ unsigned long flags;
+
+ /* Allow 'virtual devices' (eg drm) to detach from domain */
+ iommu = rk_iommu_from_dev(dev);
+ if (!iommu)
+ return;
+
+ spin_lock_irqsave(&rk_domain->iommus_lock, flags);
+ list_del_init(&iommu->node);
+ spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
+
+ /* Ignore error while disabling, just keep going */
+ rk_iommu_enable_stall(iommu);
+ rk_iommu_disable_paging(iommu);
+ rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
+ rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
+ rk_iommu_disable_stall(iommu);
+
+ devm_free_irq(dev, iommu->irq, iommu);
+
+ iommu->domain = NULL;
+
+ dev_info(dev, "Detached from iommu domain\n");
+}
+
+static int rk_iommu_domain_init(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain;
+
+ rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
+ if (!rk_domain)
+ return -ENOMEM;
+
+ /*
+ * rk32xx iommus use a 2 level pagetable.
+ * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
+ * Allocate one 4 KiB page for each table.
+ */
+ rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
+ if (!rk_domain->dt)
+ goto err_dt;
+
+ rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
+
+ spin_lock_init(&rk_domain->iommus_lock);
+ spin_lock_init(&rk_domain->dt_lock);
+ INIT_LIST_HEAD(&rk_domain->iommus);
+
+ domain->priv = rk_domain;
+
+ return 0;
+err_dt:
+ kfree(rk_domain);
+ return -ENOMEM;
+}
+
+static void rk_iommu_domain_destroy(struct iommu_domain *domain)
+{
+ struct rk_iommu_domain *rk_domain = domain->priv;
+ int i;
+
+ WARN_ON(!list_empty(&rk_domain->iommus));
+
+ for (i = 0; i < NUM_DT_ENTRIES; i++) {
+ u32 dte = rk_domain->dt[i];
+ if (rk_dte_is_pt_valid(dte)) {
+ phys_addr_t pt_phys = rk_dte_pt_address(dte);
+ u32 *page_table = phys_to_virt(pt_phys);
+ free_page((unsigned long)page_table);
+ }
+ }
+
+ free_page((unsigned long)rk_domain->dt);
+ kfree(domain->priv);
+ domain->priv = NULL;
+}
+
+static bool rk_iommu_is_dev_iommu_master(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
+ return (ret > 0);
+}
+
+static int rk_iommu_group_set_iommudata(struct iommu_group *group,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct platform_device *pd;
+ int ret;
+ struct of_phandle_args args;
+
+ /*
+ * An iommu master has an iommus property containing a list of phandles
+ * to iommu nodes, each with an #iommu-cells property with value 0.
+ */
+ ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
+ &args);
+ if (ret) {
+ dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
+ np->full_name, ret);
+ return ret;
+ }
+ if (args.args_count != 0) {
+ dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
+ args.np->full_name, args.args_count);
+ return -EINVAL;
+ }
+
+ pd = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!pd) {
+ dev_err(dev, "iommu %s not found\n", args.np->full_name);
+ return -EPROBE_DEFER;
+ }
+
+ /* TODO(djkurtz): handle multiple slave iommus for a single master */
+ iommu_group_set_iommudata(group, &pd->dev, NULL);
+
+ return 0;
+}
+
+static int rk_iommu_add_device(struct device *dev)
+{
+ struct iommu_group *group;
+ int ret;
+
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return -ENODEV;
+
+ group = iommu_group_get(dev);
+ if (!group) {
+ group = iommu_group_alloc();
+ if (IS_ERR(group)) {
+ dev_err(dev, "Failed to allocate IOMMU group\n");
+ return PTR_ERR(group);
+ }
+ }
+
+ ret = iommu_group_add_device(group, dev);
+ if (ret)
+ goto err_put_group;
+
+ ret = rk_iommu_group_set_iommudata(group, dev);
+ if (ret)
+ goto err_remove_device;
+
+ iommu_group_put(group);
+
+ return 0;
+
+err_remove_device:
+ iommu_group_remove_device(dev);
+err_put_group:
+ iommu_group_put(group);
+ return ret;
+}
+
+static void rk_iommu_remove_device(struct device *dev)
+{
+ if (!rk_iommu_is_dev_iommu_master(dev))
+ return;
+
+ iommu_group_remove_device(dev);
+}
+
+static const struct iommu_ops rk_iommu_ops = {
+ .domain_init = rk_iommu_domain_init,
+ .domain_destroy = rk_iommu_domain_destroy,
+ .attach_dev = rk_iommu_attach_device,
+ .detach_dev = rk_iommu_detach_device,
+ .map = rk_iommu_map,
+ .unmap = rk_iommu_unmap,
+ .add_device = rk_iommu_add_device,
+ .remove_device = rk_iommu_remove_device,
+ .iova_to_phys = rk_iommu_iova_to_phys,
+ .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
+};
+
+static int rk_iommu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rk_iommu *iommu;
+ struct resource *res;
+
+ iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
+ if (!iommu)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, iommu);
+ iommu->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ iommu->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(iommu->base))
+ return PTR_ERR(iommu->base);
+
+ iommu->irq = platform_get_irq(pdev, 0);
+ if (iommu->irq < 0) {
+ dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+static int rk_iommu_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id rk_iommu_dt_ids[] = {
+ { .compatible = "rockchip,iommu" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
+#endif
+
+static struct platform_driver rk_iommu_driver = {
+ .probe = rk_iommu_probe,
+ .remove = rk_iommu_remove,
+ .driver = {
+ .name = "rk_iommu",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+ },
+};
+
+static int __init rk_iommu_init(void)
+{
+ int ret;
+
+ ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
+ if (ret)
+ return ret;
+
+ return platform_driver_register(&rk_iommu_driver);
+}
+static void __exit rk_iommu_exit(void)
+{
+ platform_driver_unregister(&rk_iommu_driver);
+}
+
+subsys_initcall(rk_iommu_init);
+module_exit(rk_iommu_exit);
+
+MODULE_DESCRIPTION("IOMMU API for Rockchip");
+MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
+MODULE_ALIAS("platform:rockchip-iommu");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 932ed9be9ff3..b10aaeda2bb4 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2190,7 +2190,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
ret = smiapp_set_compose(subdev, fh, sel);
break;
default:
- BUG();
+ ret = -EINVAL;
}
mutex_unlock(&sensor->mutex);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 331eddac7222..3bd386c371f7 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
if (lpi && line > 0 && !(line % lpi))
@@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg)-offset);
offset = 0;
- sg++;
+ sg = sg_next(sg);
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE|
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
- sg++;
+ sg = sg_next(sg);
}
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 172583d736fe..8cbe6b49f4c2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
if (!status)
return IRQ_NONE;
- if (status & ~solo_dev->irq_mask) {
- solo_reg_write(solo_dev, SOLO_IRQ_STAT,
- status & ~solo_dev->irq_mask);
- status &= solo_dev->irq_mask;
- }
+ /* Acknowledge all interrupts immediately */
+ solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
if (status & SOLO_IRQ_PCI_ERR)
solo_p2m_error_isr(solo_dev);
@@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
if (status & SOLO_IRQ_G723)
solo_g723_isr(solo_dev);
- /* Clear all interrupts handled */
- solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
-
return IRQ_HANDLED;
}
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f1f098e22f7e..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -259,8 +259,8 @@ again:
case 32:
if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
protocol = RC_TYPE_RC6_MCE;
- scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
+ scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
} else {
protocol = RC_BIT_RC6_6A_32;
toggle = 0;
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index ccc00099b261..1c0dbf428a3a 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -632,7 +632,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
break;
case V4L2_PIX_FMT_JPEG:
case V4L2_PIX_FMT_MJPEG:
- buf->vb.v4l2_buf.length = jpgsize;
+ vb2_set_plane_payload(&buf->vb, 0, jpgsize);
memcpy(vbuf, tmpbuf, jpgsize);
break;
case V4L2_PIX_FMT_YUV422P:
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c13d83e15ace..45f09a66e6c9 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
bond_option_arp_ip_targets_clear(bond);
nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
- __be32 target = nla_get_be32(attr);
+ __be32 target;
+
+ if (nla_len(attr) < sizeof(target))
+ return -EINVAL;
+
+ target = nla_get_be32(attr);
bond_opt_initval(&newval, (__force u64)target);
err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8520d5529df8..279873cb6e3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2442,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
SUPPORTED_10000baseKX4_Full;
else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
+ type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
v |= SUPPORTED_FIBRE;
- else if (type == FW_PORT_TYPE_BP40_BA)
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA)
v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 60e9c2cd051e..b5db6b3f939f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
return ret;
}
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
static void sh_eth_set_receive_align(struct sk_buff *skb)
{
- int reserve;
+ uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
- reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
if (reserve)
- skb_reserve(skb, reserve);
+ skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
}
-#else
-static void sh_eth_set_receive_align(struct sk_buff *skb)
-{
- skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
-}
-#endif
/* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
struct sh_eth_txdesc *txdesc = NULL;
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
for (i = 0; i < mdp->num_rx_ring; i++) {
/* skb */
mdp->rx_skbuff[i] = NULL;
- skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+ skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
- dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
+ /* The size of the buffer is a multiple of 16 bytes. */
+ rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
+ dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
- /* The size of the buffer is 16 byte boundary. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
/* Rx descriptor address set */
if (i == 0) {
sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
struct sk_buff *skb;
u16 pkt_len = 0;
u32 desc_status;
+ int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
rxdesc = &mdp->rx_ring[entry];
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
- mdp->rx_buf_sz,
+ ALIGN(mdp->rx_buf_sz, 16),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
if (mdp->rx_skbuff[entry] == NULL) {
- skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
+ skb = netdev_alloc_skb(ndev, skbuff_size);
mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
- dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
- DMA_FROM_DEVICE);
sh_eth_set_receive_align(skb);
+ dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length, DMA_FROM_DEVICE);
skb_checksum_none_assert(skb);
rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
+ mdp->is_opened = 1;
+
return ret;
out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+
+ if (sh_eth_is_rz_fast_ether(mdp))
+ return &ndev->stats;
+
+ if (!mdp->is_opened)
+ return &ndev->stats;
+
+ ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
+ sh_eth_write(ndev, 0, TROCR); /* (write clear) */
+ ndev->stats.collisions += sh_eth_read(ndev, CDCR);
+ sh_eth_write(ndev, 0, CDCR); /* (write clear) */
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
+ sh_eth_write(ndev, 0, LCCR); /* (write clear) */
+
+ if (sh_eth_is_gether(mdp)) {
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
+ sh_eth_write(ndev, 0, CERCR); /* (write clear) */
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
+ sh_eth_write(ndev, 0, CEECR); /* (write clear) */
+ } else {
+ ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
+ sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
+ }
+
+ return &ndev->stats;
+}
+
/* device close function */
static int sh_eth_close(struct net_device *ndev)
{
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
sh_eth_write(ndev, 0, EDTRR);
sh_eth_write(ndev, 0, EDRRR);
+ sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
pm_runtime_put_sync(&mdp->pdev->dev);
- return 0;
-}
-
-static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- if (sh_eth_is_rz_fast_ether(mdp))
- return &ndev->stats;
+ mdp->is_opened = 0;
- pm_runtime_get_sync(&mdp->pdev->dev);
-
- ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
- sh_eth_write(ndev, 0, TROCR); /* (write clear) */
- ndev->stats.collisions += sh_eth_read(ndev, CDCR);
- sh_eth_write(ndev, 0, CDCR); /* (write clear) */
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
- sh_eth_write(ndev, 0, LCCR); /* (write clear) */
- if (sh_eth_is_gether(mdp)) {
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
- sh_eth_write(ndev, 0, CERCR); /* (write clear) */
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
- sh_eth_write(ndev, 0, CEECR); /* (write clear) */
- } else {
- ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
- sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
- }
- pm_runtime_put_sync(&mdp->pdev->dev);
-
- return &ndev->stats;
+ return 0;
}
/* ioctl to device function */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index b37c427144ee..22301bf9c21d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -162,9 +162,9 @@ enum {
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
-#define SH4_SKB_RX_ALIGN 32
+#define SH_ETH_RX_ALIGN 32
#else
-#define SH2_SH3_SKB_RX_ALIGN 2
+#define SH_ETH_RX_ALIGN 2
#endif
/* Register's bits
@@ -522,6 +522,7 @@ struct sh_eth_private {
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
+ unsigned is_opened:1;
};
static inline void sh_eth_soft_swap(char *src, int len)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5b0da3986216..58a1a0a423d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -265,6 +265,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
plat_dat = dev_get_platdata(&pdev->dev);
+ if (!plat_dat)
+ plat_dat = devm_kzalloc(&pdev->dev,
+ sizeof(struct plat_stmmacenet_data),
+ GFP_KERNEL);
+ if (!plat_dat) {
+ pr_err("%s: ERROR: no memory", __func__);
+ return -ENOMEM;
+ }
+
/* Set default value for multicast hash bins */
plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
@@ -272,15 +281,6 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
plat_dat->unicast_filter_entries = 1;
if (pdev->dev.of_node) {
- if (!plat_dat)
- plat_dat = devm_kzalloc(&pdev->dev,
- sizeof(struct plat_stmmacenet_data),
- GFP_KERNEL);
- if (!plat_dat) {
- pr_err("%s: ERROR: no memory", __func__);
- return -ENOMEM;
- }
-
ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
if (ret) {
pr_err("%s: main dt probe failed", __func__);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cca871346a0f..ece8d1804d13 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
len = skb_frag_size(frag);
offset = frag->page_offset;
- /* Data must not cross a page boundary. */
- BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
-
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
while (len > 0) {
unsigned long bytes;
- BUG_ON(offset >= PAGE_SIZE);
-
bytes = PAGE_SIZE - offset;
if (bytes > len)
bytes = len;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 30e97bcc4f88..d134710de96d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
phys_addr_t size, bool nomap)
{
- if (memblock_is_region_reserved(base, size))
- return -EBUSY;
if (nomap)
return memblock_remove(base, size);
return memblock_reserve(base, size);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 3d43874319be..19bb19c7db4a 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -276,6 +276,7 @@ struct tegra_pcie {
struct resource all;
struct resource io;
+ struct resource pio;
struct resource mem;
struct resource prefetch;
struct resource busn;
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct tegra_pcie *pcie = sys_to_pcie(sys);
int err;
- phys_addr_t io_start;
err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
if (err < 0)
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
if (err)
return err;
- io_start = pci_pio_to_address(pcie->io.start);
-
pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
pci_add_resource_offset(&sys->resources, &pcie->prefetch,
sys->mem_offset);
pci_add_resource(&sys->resources, &pcie->busn);
- pci_ioremap_io(nr * SZ_64K, io_start);
+ pci_ioremap_io(pcie->pio.start, pcie->io.start);
return 1;
}
@@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
{
u32 fpci_bar, size, axi_address;
- phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
/* Bar 0: type 1 extended configuration space */
fpci_bar = 0xfe100000;
@@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
/* Bar 1: downstream IO bar */
fpci_bar = 0xfdfc0000;
size = resource_size(&pcie->io);
- axi_address = io_start;
+ axi_address = pcie->io.start;
afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
switch (res.flags & IORESOURCE_TYPE_BITS) {
case IORESOURCE_IO:
- memcpy(&pcie->io, &res, sizeof(res));
- pcie->io.name = np->full_name;
+ memcpy(&pcie->pio, &res, sizeof(res));
+ pcie->pio.name = np->full_name;
+
+ /*
+ * The Tegra PCIe host bridge uses this to program the
+ * mapping of the I/O space to the physical address,
+ * so we override the .start and .end fields here that
+ * of_pci_range_to_resource() converted to I/O space.
+ * We also set the IORESOURCE_MEM type to clarify that
+ * the resource is in the physical memory space.
+ */
+ pcie->io.start = range.cpu_addr;
+ pcie->io.end = range.cpu_addr + range.size - 1;
+ pcie->io.flags = IORESOURCE_MEM;
+ pcie->io.name = "I/O";
+
+ memcpy(&res, &pcie->io, sizeof(res));
break;
case IORESOURCE_MEM:
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 8532c3e2aea7..1626dc66e763 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -161,7 +161,7 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
static const struct s3c2410_wdt_variant drv_data_exynos7 = {
.disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
.mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
- .mask_bit = 0,
+ .mask_bit = 23,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 23, /* A57 WDTRESET */
.quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6df8d3d885e5..b8b92c2f9683 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
}
alias = d_find_alias(inode);
- if (alias && !vfat_d_anon_disconn(alias)) {
+ /*
+ * Checking "alias->d_parent == dentry->d_parent" to make sure
+ * FS is not corrupted (especially double linked dir).
+ */
+ if (alias && alias->d_parent == dentry->d_parent &&
+ !vfat_d_anon_disconn(alias)) {
/*
* This inode has non anonymous-DCACHE_DISCONNECTED
* dentry. This means, the user did ->lookup() by an
@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
- dentry->d_time = dentry->d_parent->d_inode->i_version;
- dentry = d_splice_alias(inode, dentry);
- if (dentry)
- dentry->d_time = dentry->d_parent->d_inode->i_version;
- return dentry;
-
+ if (!inode)
+ dentry->d_time = dir->i_version;
+ return d_splice_alias(inode, dentry);
error:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
return ERR_PTR(err);
@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
- dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
+ dentry->d_time = dir->i_version;
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
+ dentry->d_time = dir->i_version;
out:
mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
- dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
mutex_unlock(&MSDOS_SB(sb)->s_lock);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e4dc74713a43..1df94fabe4eb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
journal->j_chksum_driver = NULL;
return 0;
}
- }
- /* Precompute checksum seed for all metadata */
- if (jbd2_journal_has_csum_v2or3(journal))
+ /* Precompute checksum seed for all metadata */
journal->j_csum_seed = jbd2_chksum(journal, ~0,
sb->s_uuid,
sizeof(sb->s_uuid));
+ }
}
/* If enabling v1 checksums, downgrade superblock */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 969da0f1ded8..d21135dc7a22 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -137,6 +137,14 @@ struct drm_display_info {
u8 cea_rev;
};
+/* data corresponds to displayid vend/prod/serial */
+struct drm_tile_group {
+ struct kref refcount;
+ struct drm_device *dev;
+ int id;
+ u8 group_data[8];
+};
+
struct drm_framebuffer_funcs {
/* note: use drm_framebuffer_remove() */
void (*destroy)(struct drm_framebuffer *framebuffer);
@@ -599,6 +607,15 @@ struct drm_encoder {
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
* @debugfs_entry: debugfs directory for this connector
* @state: current atomic state for this connector
+ * @has_tile: is this connector connected to a tiled monitor
+ * @tile_group: tile group for the connected monitor
+ * @tile_is_single_monitor: whether the tile is one monitor housing
+ * @num_h_tile: number of horizontal tiles in the tile group
+ * @num_v_tile: number of vertical tiles in the tile group
+ * @tile_h_loc: horizontal location of this tile
+ * @tile_v_loc: vertical location of this tile
+ * @tile_h_size: horizontal size of this tile.
+ * @tile_v_size: vertical size of this tile.
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -634,6 +651,8 @@ struct drm_connector {
struct drm_property_blob *path_blob_ptr;
+ struct drm_property_blob *tile_blob_ptr;
+
uint8_t polled; /* DRM_CONNECTOR_POLL_* */
/* requested DPMS state */
@@ -661,6 +680,15 @@ struct drm_connector {
struct dentry *debugfs_entry;
struct drm_connector_state *state;
+
+ /* DisplayID bits */
+ bool has_tile;
+ struct drm_tile_group *tile_group;
+ bool tile_is_single_monitor;
+
+ uint8_t num_h_tile, num_v_tile;
+ uint8_t tile_h_loc, tile_v_loc;
+ uint16_t tile_h_size, tile_v_size;
};
/**
@@ -978,6 +1006,7 @@ struct drm_mode_config {
struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
struct mutex idr_mutex; /* for IDR management */
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
+ struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
/* this is limited to one for now */
struct mutex fb_lock; /* proctects global and per-file fb lists */
@@ -1021,6 +1050,7 @@ struct drm_mode_config {
struct drm_property *edid_property;
struct drm_property *dpms_property;
struct drm_property *path_property;
+ struct drm_property *tile_property;
struct drm_property *plane_type_property;
struct drm_property *rotation_property;
@@ -1192,6 +1222,7 @@ extern void drm_mode_config_cleanup(struct drm_device *dev);
extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
const char *path);
+int drm_mode_connector_set_tile_property(struct drm_connector *connector);
extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
@@ -1328,6 +1359,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
extern int drm_edid_header_is_valid(const u8 *raw_edid);
extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
extern bool drm_edid_is_valid(struct edid *edid);
+
+extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
+ char topology[8]);
+extern void drm_mode_put_tile_group(struct drm_device *dev,
+ struct drm_tile_group *tg);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 000000000000..623b4e98e748
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright © 2014 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef DRM_DISPLAYID_H
+#define DRM_DISPLAYID_H
+
+#define DATA_BLOCK_PRODUCT_ID 0x00
+#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
+#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
+#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
+#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
+#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
+#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
+#define DATA_BLOCK_VESA_TIMING 0x07
+#define DATA_BLOCK_CEA_TIMING 0x08
+#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
+#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
+#define DATA_BLOCK_GP_ASCII_STRING 0x0b
+#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
+#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
+#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
+#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
+#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
+#define DATA_BLOCK_TILED_DISPLAY 0x12
+
+#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
+
+#define PRODUCT_TYPE_EXTENSION 0
+#define PRODUCT_TYPE_TEST 1
+#define PRODUCT_TYPE_PANEL 2
+#define PRODUCT_TYPE_MONITOR 3
+#define PRODUCT_TYPE_TV 4
+#define PRODUCT_TYPE_REPEATER 5
+#define PRODUCT_TYPE_DIRECT_DRIVE 6
+
+struct displayid_hdr {
+ u8 rev;
+ u8 bytes;
+ u8 prod_id;
+ u8 ext_count;
+} __packed;
+
+struct displayid_block {
+ u8 tag;
+ u8 rev;
+ u8 num_bytes;
+} __packed;
+
+struct displayid_tiled_block {
+ struct displayid_block base;
+ u8 tile_cap;
+ u8 topo[3];
+ u8 tile_size[4];
+ u8 tile_pixel_bezel[5];
+ u8 topology_id[8];
+} __packed;
+
+#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index cec6383bbdb8..00c1da927245 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
struct drm_dp_vcpi vcpi;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
+
+ struct edid *cached_edid; /* for DP logical ports - make tiling work */
};
/**
@@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index d59240ffb1f7..87d85e81d3a7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -27,12 +27,14 @@
#define EDID_LENGTH 128
#define DDC_ADDR 0x50
+#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
#define CEA_EXT 0x02
#define VTB_EXT 0x10
#define DI_EXT 0x40
#define LS_EXT 0x50
#define MI_EXT 0x60
+#define DISPLAYID_EXT 0x70
struct est_timings {
u8 t1;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f4ad254e3488..b597068103aa 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,9 +34,14 @@ struct drm_fb_helper;
#include <linux/kgdb.h>
+struct drm_fb_offset {
+ int x, y;
+};
+
struct drm_fb_helper_crtc {
struct drm_mode_set mode_set;
struct drm_display_mode *desired_mode;
+ int x, y;
};
struct drm_fb_helper_surface_size {
@@ -72,6 +77,7 @@ struct drm_fb_helper_funcs {
bool (*initial_config)(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **crtcs,
struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
bool *enabled, int width, int height);
};
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 460441714413..b620c317c772 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* non-blocking reserves should be tried.
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
+ * @dups: [out] optional list of duplicates.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
* calling process receives a signal while waiting. In that case, no
* buffers on the list will be reserved upon return.
*
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
* Buffers reserved by this function should be unreserved by
* a call to either ttm_eu_backoff_reservation() or
* ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
*/
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
- struct list_head *list, bool intr);
+ struct list_head *list, bool intr,
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 4c94f31a8c99..8523f9bb72f2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -427,7 +427,7 @@ header-y += virtio_net.h
header-y += virtio_pci.h
header-y += virtio_ring.h
header-y += virtio_rng.h
-header=y += vm_sockets.h
+header-y += vm_sockets.h
header-y += vt.h
header-y += wait.h
header-y += wanrouter.h
diff --git a/ipc/sem.c b/ipc/sem.c
index 454f6c6020a8..53c3310f41c6 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
return retval;
}
- id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
- if (id < 0) {
- ipc_rcu_putref(sma, sem_rcu_free);
- return id;
- }
- ns->used_sems += nsems;
-
sma->sem_base = (struct sem *) &sma[1];
for (i = 0; i < nsems; i++) {
@@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
+
+ id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+ if (id < 0) {
+ ipc_rcu_putref(sma, sem_rcu_free);
+ return id;
+ }
+ ns->used_sems += nsems;
+
sem_unlock(sma, -1);
rcu_read_unlock();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 24beb9bb4c3e..89e7283015a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2874,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
* or we have been woken up remotely but the IPI has not yet arrived,
* we haven't yet exited the RCU idle mode. Do it here manually until
* we find a better solution.
+ *
+ * NB: There are buggy callers of this function. Ideally we
+ * should warn if prev_state != IN_USER, but that will trigger
+ * too frequently to make sense yet.
*/
- user_exit();
+ enum ctx_state prev_state = exception_enter();
schedule();
- user_enter();
+ exception_exit(prev_state);
}
#endif
diff --git a/lib/genalloc.c b/lib/genalloc.c
index cce4dd68c40d..2e65d206b01c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
return pool;
}
+EXPORT_SYMBOL(devm_gen_pool_create);
/**
* dev_get_gen_pool - Obtain the gen_pool (if any) for a device
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 09225796991a..5e256271b47b 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
continue;
total += zone->present_pages;
- reserved = zone->present_pages - zone->managed_pages;
+ reserved += zone->present_pages - zone->managed_pages;
if (is_highmem_idx(zoneid))
highmem += zone->present_pages;
diff --git a/mm/frontswap.c b/mm/frontswap.c
index c30eec536f03..f2a3571c6e22 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
the (older) page from frontswap
*/
inc_frontswap_failed_stores();
- if (dup)
+ if (dup) {
__frontswap_clear(sis, offset);
+ frontswap_ops->invalidate_page(type, offset);
+ }
}
if (frontswap_writethrough_enabled)
/* report failure so swap also writes to swap device */
diff --git a/mm/memory.c b/mm/memory.c
index 655fd3d34bb0..d3cb2ef66ee2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -816,20 +816,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (!pte_file(pte)) {
swp_entry_t entry = pte_to_swp_entry(pte);
- if (swap_duplicate(entry) < 0)
- return entry.val;
-
- /* make sure dst_mm is on swapoff's mmlist. */
- if (unlikely(list_empty(&dst_mm->mmlist))) {
- spin_lock(&mmlist_lock);
- if (list_empty(&dst_mm->mmlist))
- list_add(&dst_mm->mmlist,
- &src_mm->mmlist);
- spin_unlock(&mmlist_lock);
- }
- if (likely(!non_swap_entry(entry)))
+ if (likely(!non_swap_entry(entry))) {
+ if (swap_duplicate(entry) < 0)
+ return entry.val;
+
+ /* make sure dst_mm is on swapoff's mmlist. */
+ if (unlikely(list_empty(&dst_mm->mmlist))) {
+ spin_lock(&mmlist_lock);
+ if (list_empty(&dst_mm->mmlist))
+ list_add(&dst_mm->mmlist,
+ &src_mm->mmlist);
+ spin_unlock(&mmlist_lock);
+ }
rss[MM_SWAPENTS]++;
- else if (is_migration_entry(entry)) {
+ } else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
if (PageAnon(page))
diff --git a/mm/mmap.c b/mm/mmap.c
index 87e82b38453c..ae919891a087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end);
* shrinking vma had, to cover any anon pages imported.
*/
if (exporter && exporter->anon_vma && !importer->anon_vma) {
- if (anon_vma_clone(importer, exporter))
- return -ENOMEM;
+ int error;
+
+ error = anon_vma_clone(importer, exporter);
+ if (error)
+ return error;
importer->anon_vma = exporter->anon_vma;
}
}
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (err)
goto out_free_vma;
- if (anon_vma_clone(new, vma))
+ err = anon_vma_clone(new, vma);
+ if (err)
goto out_free_mpol;
if (new->vm_file)
diff --git a/mm/rmap.c b/mm/rmap.c
index d3eb1e02d1c6..a2a1eab077b0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
{
struct anon_vma_chain *avc;
struct anon_vma *anon_vma;
+ int error;
/* Don't bother if the parent process has no anon_vma here. */
if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
*/
- if (anon_vma_clone(vma, pvma))
- return -ENOMEM;
+ error = anon_vma_clone(vma, pvma);
+ if (error)
+ return error;
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
diff --git a/mm/slab.c b/mm/slab.c
index eb2b2ea30130..f34e053ec46e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
void *obj;
int x;
- VM_BUG_ON(nodeid > num_online_nodes());
+ VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
n = get_node(cachep, nodeid);
BUG_ON(!n);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index d4042e75f7c7..c5afd573d7da 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
unsigned long scanned;
unsigned long reclaimed;
+ spin_lock(&vmpr->sr_lock);
/*
* Several contexts might be calling vmpressure(), so it is
* possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
* here. No need for any locks here since we don't care if
* vmpr->reclaimed is in sync.
*/
- if (!vmpr->scanned)
+ scanned = vmpr->scanned;
+ if (!scanned) {
+ spin_unlock(&vmpr->sr_lock);
return;
+ }
- spin_lock(&vmpr->sr_lock);
- scanned = vmpr->scanned;
reclaimed = vmpr->reclaimed;
vmpr->scanned = 0;
vmpr->reclaimed = 0;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b9b7dfaf202b..76321ea442c3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
goto errout;
}
if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+ put_net(net);
err = -EPERM;
goto errout;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index b8960c4959a5..200e37867336 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -117,6 +117,7 @@ struct keyring_search_context {
#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
+#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
int (*iterator)(const void *object, void *iterator_data);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index eff88a5f5d40..4743d71e4aa6 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -26,6 +26,8 @@
#include <asm/uaccess.h>
#include "internal.h"
+#define KEY_MAX_DESC_SIZE 4096
+
static int key_get_type_from_user(char *type,
const char __user *_type,
unsigned len)
@@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
description = NULL;
if (_description) {
- description = strndup_user(_description, PAGE_SIZE);
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
@@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
goto error;
/* pull the description into kernel space */
- description = strndup_user(_description, PAGE_SIZE);
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
@@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name)
/* fetch the name from userspace */
name = NULL;
if (_name) {
- name = strndup_user(_name, PAGE_SIZE);
+ name = strndup_user(_name, KEY_MAX_DESC_SIZE);
if (IS_ERR(name)) {
ret = PTR_ERR(name);
goto error;
@@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid,
{
struct key *key, *instkey;
key_ref_t key_ref;
- char *tmpbuf;
+ char *infobuf;
long ret;
+ int desclen, infolen;
key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
@@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid,
}
okay:
- /* calculate how much description we're going to return */
- ret = -ENOMEM;
- tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!tmpbuf)
- goto error2;
-
key = key_ref_to_ptr(key_ref);
+ desclen = strlen(key->description);
- ret = snprintf(tmpbuf, PAGE_SIZE - 1,
- "%s;%d;%d;%08x;%s",
- key->type->name,
- from_kuid_munged(current_user_ns(), key->uid),
- from_kgid_munged(current_user_ns(), key->gid),
- key->perm,
- key->description ?: "");
-
- /* include a NUL char at the end of the data */
- if (ret > PAGE_SIZE - 1)
- ret = PAGE_SIZE - 1;
- tmpbuf[ret] = 0;
- ret++;
+ /* calculate how much information we're going to return */
+ ret = -ENOMEM;
+ infobuf = kasprintf(GFP_KERNEL,
+ "%s;%d;%d;%08x;",
+ key->type->name,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
+ key->perm);
+ if (!infobuf)
+ goto error2;
+ infolen = strlen(infobuf);
+ ret = infolen + desclen + 1;
/* consider returning the data */
- if (buffer && buflen > 0) {
- if (buflen > ret)
- buflen = ret;
-
- if (copy_to_user(buffer, tmpbuf, buflen) != 0)
+ if (buffer && buflen >= ret) {
+ if (copy_to_user(buffer, infobuf, infolen) != 0 ||
+ copy_to_user(buffer + infolen, key->description,
+ desclen + 1) != 0)
ret = -EFAULT;
}
- kfree(tmpbuf);
+ kfree(infobuf);
error2:
key_ref_put(key_ref);
error:
@@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid,
if (ret < 0)
goto error;
- description = strndup_user(_description, PAGE_SIZE);
+ description = strndup_user(_description, KEY_MAX_DESC_SIZE);
if (IS_ERR(description)) {
ret = PTR_ERR(description);
goto error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8177010174f7..e72548b5897e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
}
if (key->expiry && ctx->now.tv_sec >= key->expiry) {
- ctx->result = ERR_PTR(-EKEYEXPIRED);
+ if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
kleave(" = %d [expire]", ctx->skipped_ret);
goto skipped;
}
@@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring,
ctx->index_key.type->name,
ctx->index_key.description);
+#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
+ BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
+ (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
+
if (ctx->index_key.description)
ctx->index_key.desc_len = strlen(ctx->index_key.description);
@@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring,
if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
keyring_compare_object(keyring, &ctx->index_key)) {
ctx->skipped_ret = 2;
- ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
case 1:
goto found;
@@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring,
}
ctx->skipped_ret = 0;
- if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
- ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
/* Start processing a new keyring */
descend_to_keyring:
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index bb4337c7ae1b..0c7aea4dea54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type,
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
+ KEYRING_SEARCH_SKIP_EXPIRED),
};
struct key *key;
key_ref_t key_ref;
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6639e2cb8853..5d672f7580dd 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
.match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .flags = KEYRING_SEARCH_DO_STATE_CHECK,
};
struct key *authkey;
key_ref_t authkey_ref;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 14f16be3f374..b118a5be18df 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4790,6 +4790,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),