diff options
853 files changed, 23817 insertions, 17042 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl index 7ac7d7262bb7..3c3514815cd5 100644 --- a/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl +++ b/Documentation/ABI/testing/sysfs-platform-i2c-demux-pinctrl @@ -1,23 +1,18 @@ -What: /sys/devices/platform/<i2c-demux-name>/cur_master +What: /sys/devices/platform/<i2c-demux-name>/available_masters Date: January 2016 KernelVersion: 4.6 Contact: Wolfram Sang <wsa@the-dreams.de> Description: + Reading the file will give you a list of masters which can be + selected for a demultiplexed bus. The format is + "<index>:<name>". Example from a Renesas Lager board: -This file selects the active I2C master for a demultiplexed bus. + 0:/i2c@e6500000 1:/i2c@e6508000 -Write 0 there for the first master, 1 for the second etc. Reading the file will -give you a list with the active master marked. Example from a Renesas Lager -board: - -root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master -* 0 - /i2c@9 - 1 - /i2c@e6520000 - 2 - /i2c@e6530000 - -root@Lager:~# echo 2 > /sys/devices/platform/i2c@8/cur_master - -root@Lager:~# cat /sys/devices/platform/i2c@8/cur_master - 0 - /i2c@9 - 1 - /i2c@e6520000 -* 2 - /i2c@e6530000 +What: /sys/devices/platform/<i2c-demux-name>/current_master +Date: January 2016 +KernelVersion: 4.6 +Contact: Wolfram Sang <wsa@the-dreams.de> +Description: + This file selects/shows the active I2C master for a demultiplexed + bus. It uses the <index> value from the file 'available_masters'. diff --git a/Documentation/DocBook/gpu.tmpl b/Documentation/DocBook/gpu.tmpl index 1692c4dd5487..1464fb2f3c46 100644 --- a/Documentation/DocBook/gpu.tmpl +++ b/Documentation/DocBook/gpu.tmpl @@ -2153,7 +2153,11 @@ void intel_crt_init(struct drm_device *dev) <td valign="top" >ENUM</td> <td valign="top" >{ "Automatic", "Full", "Limited 16:235" }</td> <td valign="top" >Connector</td> - <td valign="top" >TBD</td> + <td valign="top" >When this property is set to Limited 16:235 + and CTM is set, the hardware will be programmed with the + result of the multiplication of CTM by the limited range + matrix to ensure the pixels normaly in the range 0..1.0 are + remapped to the range 16/255..235/255.</td> </tr> <tr> <td valign="top" >“audio”</td> @@ -3334,7 +3338,7 @@ int num_ioctls;</synopsis> <title>Video BIOS Table (VBT)</title> !Pdrivers/gpu/drm/i915/intel_bios.c Video BIOS Table (VBT) !Idrivers/gpu/drm/i915/intel_bios.c -!Idrivers/gpu/drm/i915/intel_bios.h +!Idrivers/gpu/drm/i915/intel_vbt_defs.h </sect2> </sect1> diff --git a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt index e0fc2c11dd00..241fb0545b9e 100644 --- a/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt +++ b/Documentation/devicetree/bindings/clock/qca,ath79-pll.txt @@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9XXX PLL controller The PPL controller provides the 3 main clocks of the SoC: CPU, DDR and AHB. Required Properties: -- compatible: has to be "qca,<soctype>-cpu-intc" and one of the following +- compatible: has to be "qca,<soctype>-pll" and one of the following fallbacks: - "qca,ar7100-pll" - "qca,ar7240-pll" @@ -21,8 +21,8 @@ Optional properties: Example: - memory-controller@18050000 { - compatible = "qca,ar9132-ppl", "qca,ar9130-pll"; + pll-controller@18050000 { + compatible = "qca,ar9132-pll", "qca,ar9130-pll"; reg = <0x18050000 0x20>; clock-names = "ref"; diff --git a/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt new file mode 100644 index 000000000000..4f2ba8c13d92 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/analogix_dp.txt @@ -0,0 +1,52 @@ +Analogix Display Port bridge bindings + +Required properties for dp-controller: + -compatible: + platform specific such as: + * "samsung,exynos5-dp" + * "rockchip,rk3288-dp" + -reg: + physical base address of the controller and length + of memory mapped region. + -interrupts: + interrupt combiner values. + -clocks: + from common clock binding: handle to dp clock. + -clock-names: + from common clock binding: Shall be "dp". + -interrupt-parent: + phandle to Interrupt combiner node. + -phys: + from general PHY binding: the phandle for the PHY device. + -phy-names: + from general PHY binding: Should be "dp". + +Optional properties for dp-controller: + -force-hpd: + Indicate driver need force hpd when hpd detect failed, this + is used for some eDP screen which don't have hpd signal. + -hpd-gpios: + Hotplug detect GPIO. + Indicates which GPIO should be used for hotplug detection + -port@[X]: SoC specific port nodes with endpoint definitions as defined + in Documentation/devicetree/bindings/media/video-interfaces.txt, + please refer to the SoC specific binding document: + * Documentation/devicetree/bindings/display/exynos/exynos_dp.txt + * Documentation/devicetree/bindings/video/analogix_dp-rockchip.txt + +[1]: Documentation/devicetree/bindings/media/video-interfaces.txt +------------------------------------------------------------------------------- + +Example: + + dp-controller { + compatible = "samsung,exynos5-dp"; + reg = <0x145b0000 0x10000>; + interrupts = <10 3>; + interrupt-parent = <&combiner>; + clocks = <&clock 342>; + clock-names = "dp"; + + phys = <&dp_phy>; + phy-names = "dp"; + }; diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt index fe4a7a2dea9c..ade5d8eebf85 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dp.txt @@ -1,20 +1,3 @@ -Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP) - -DisplayPort is industry standard to accommodate the growing board adoption -of digital display technology within the PC and CE industries. -It consolidates the internal and external connection methods to reduce device -complexity and cost. It also supports necessary features for important cross -industry applications and provides performance scalability to enable the next -generation of displays that feature higher color depths, refresh rates, and -display resolutions. - -eDP (embedded display port) device is compliant with Embedded DisplayPort -standard as follows, -- DisplayPort standard 1.1a for Exynos5250 and Exynos5260. -- DisplayPort standard 1.3 for Exynos5422s and Exynos5800. - -eDP resides between FIMD and panel or FIMD and bridge such as LVDS. - The Exynos display port interface should be configured based on the type of panel connected to it. @@ -48,26 +31,6 @@ Required properties for dp-controller: from general PHY binding: the phandle for the PHY device. -phy-names: from general PHY binding: Should be "dp". - -samsung,color-space: - input video data format. - COLOR_RGB = 0, COLOR_YCBCR422 = 1, COLOR_YCBCR444 = 2 - -samsung,dynamic-range: - dynamic range for input video data. - VESA = 0, CEA = 1 - -samsung,ycbcr-coeff: - YCbCr co-efficients for input video. - COLOR_YCBCR601 = 0, COLOR_YCBCR709 = 1 - -samsung,color-depth: - number of bits per colour component. - COLOR_6 = 0, COLOR_8 = 1, COLOR_10 = 2, COLOR_12 = 3 - -samsung,link-rate: - link rate supported by the panel. - LINK_RATE_1_62GBPS = 0x6, LINK_RATE_2_70GBPS = 0x0A - -samsung,lane-count: - number of lanes supported by the panel. - LANE_COUNT1 = 1, LANE_COUNT2 = 2, LANE_COUNT4 = 4 - - display-timings: timings for the connected panel as described by - Documentation/devicetree/bindings/display/display-timing.txt Optional properties for dp-controller: -interlaced: @@ -83,17 +46,31 @@ Optional properties for dp-controller: Hotplug detect GPIO. Indicates which GPIO should be used for hotplug detection -Video interfaces: - Device node can contain video interface port nodes according to [1]. - The following are properties specific to those nodes: - - endpoint node connected to bridge or panel node: - - remote-endpoint: specifies the endpoint in panel or bridge node. - This node is required in all kinds of exynos dp - to represent the connection between dp and bridge - or dp and panel. - -[1]: Documentation/devicetree/bindings/media/video-interfaces.txt + -video interfaces: Device node can contain video interface port + nodes according to [1]. + - display-timings: timings for the connected panel as described by + Documentation/devicetree/bindings/display/panel/display-timing.txt + +For the below properties, please refer to Analogix DP binding document: + * Documentation/devicetree/bindings/display/bridge/analogix_dp.txt + -phys (required) + -phy-names (required) + -hpd-gpios (optional) + force-hpd (optional) + +Deprecated properties for DisplayPort: +-interlaced: deprecated prop that can parsed from drm_display_mode. +-vsync-active-high: deprecated prop that can parsed from drm_display_mode. +-hsync-active-high: deprecated prop that can parsed from drm_display_mode. +-samsung,ycbcr-coeff: deprecated prop that can parsed from drm_display_mode. +-samsung,dynamic-range: deprecated prop that can parsed from drm_display_mode. +-samsung,color-space: deprecated prop that can parsed from drm_display_info. +-samsung,color-depth: deprecated prop that can parsed from drm_display_info. +-samsung,link-rate: deprecated prop that can reading from monitor by dpcd method. +-samsung,lane-count: deprecated prop that can reading from monitor by dpcd method. +-samsung,hpd-gpio: deprecated name for hpd-gpios. + +------------------------------------------------------------------------------- Example: @@ -112,13 +89,6 @@ SOC specific portion: Board Specific portion: dp-controller { - samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; - samsung,color-depth = <1>; - samsung,link-rate = <0x0a>; - samsung,lane-count = <4>; - display-timings { native-mode = <&lcd_timing>; lcd_timing: 1366x768 { @@ -135,18 +105,9 @@ Board Specific portion: }; ports { - port { + port@0 { dp_out: endpoint { - remote-endpoint = <&dp_in>; - }; - }; - }; - - panel { - ... - port { - dp_in: endpoint { - remote-endpoint = <&dp_out>; + remote-endpoint = <&bridge_in>; }; }; }; diff --git a/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt new file mode 100644 index 000000000000..e832ff98fd61 --- /dev/null +++ b/Documentation/devicetree/bindings/display/rockchip/analogix_dp-rockchip.txt @@ -0,0 +1,92 @@ +Rockchip RK3288 specific extensions to the Analogix Display Port +================================ + +Required properties: +- compatible: "rockchip,rk3288-edp"; + +- reg: physical base address of the controller and length + +- clocks: from common clock binding: handle to dp clock. + of memory mapped region. + +- clock-names: from common clock binding: + Required elements: "dp" "pclk" + +- resets: Must contain an entry for each entry in reset-names. + See ../reset/reset.txt for details. + +- pinctrl-names: Names corresponding to the chip hotplug pinctrl states. +- pinctrl-0: pin-control mode. should be <&edp_hpd> + +- reset-names: Must include the name "dp" + +- rockchip,grf: this soc should set GRF regs, so need get grf here. + +- ports: there are 2 port nodes with endpoint definitions as defined in + Documentation/devicetree/bindings/media/video-interfaces.txt. + Port 0: contained 2 endpoints, connecting to the output of vop. + Port 1: contained 1 endpoint, connecting to the input of panel. + +For the below properties, please refer to Analogix DP binding document: + * Documentation/devicetree/bindings/drm/bridge/analogix_dp.txt +- phys (required) +- phy-names (required) +- hpd-gpios (optional) +- force-hpd (optional) +------------------------------------------------------------------------------- + +Example: + dp-controller: dp@ff970000 { + compatible = "rockchip,rk3288-dp"; + reg = <0xff970000 0x4000>; + interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&cru SCLK_EDP>, <&cru PCLK_EDP_CTRL>; + clock-names = "dp", "pclk"; + phys = <&dp_phy>; + phy-names = "dp"; + + rockchip,grf = <&grf>; + resets = <&cru 111>; + reset-names = "dp"; + + pinctrl-names = "default"; + pinctrl-0 = <&edp_hpd>; + + status = "disabled"; + + ports { + #address-cells = <1>; + #size-cells = <0>; + edp_in: port@0 { + reg = <0>; + #address-cells = <1>; + #size-cells = <0>; + edp_in_vopb: endpoint@0 { + reg = <0>; + remote-endpoint = <&vopb_out_edp>; + }; + edp_in_vopl: endpoint@1 { + reg = <1>; + remote-endpoint = <&vopl_out_edp>; + }; + }; + + edp_out: port@1 { + reg = <1>; + #address-cells = <1>; + #size-cells = <0>; + edp_out_panel: endpoint { + reg = <0>; + remote-endpoint = <&panel_in_edp> + }; + }; + }; + }; + + pinctrl { + edp { + edp_hpd: edp-hpd { + rockchip,pins = <7 11 RK_FUNC_2 &pcfg_pull_none>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt index 08a4a32c8eb0..0326154c7925 100644 --- a/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/img,pistachio-pinctrl.txt @@ -134,12 +134,12 @@ mfio80 ddr_debug, mips_trace_data, mips_debug mfio81 dreq0, mips_trace_data, eth_debug mfio82 dreq1, mips_trace_data, eth_debug mfio83 mips_pll_lock, mips_trace_data, usb_debug -mfio84 sys_pll_lock, mips_trace_data, usb_debug -mfio85 wifi_pll_lock, mips_trace_data, sdhost_debug -mfio86 bt_pll_lock, mips_trace_data, sdhost_debug -mfio87 rpu_v_pll_lock, dreq2, socif_debug -mfio88 rpu_l_pll_lock, dreq3, socif_debug -mfio89 audio_pll_lock, dreq4, dreq5 +mfio84 audio_pll_lock, mips_trace_data, usb_debug +mfio85 rpu_v_pll_lock, mips_trace_data, sdhost_debug +mfio86 rpu_l_pll_lock, mips_trace_data, sdhost_debug +mfio87 sys_pll_lock, dreq2, socif_debug +mfio88 wifi_pll_lock, dreq3, socif_debug +mfio89 bt_pll_lock, dreq4, dreq5 tck trstn tdi diff --git a/Documentation/filesystems/cramfs.txt b/Documentation/filesystems/cramfs.txt index 31f53f0ab957..4006298f6707 100644 --- a/Documentation/filesystems/cramfs.txt +++ b/Documentation/filesystems/cramfs.txt @@ -38,7 +38,7 @@ the update lasts only as long as the inode is cached in memory, after which the timestamp reverts to 1970, i.e. moves backwards in time. Currently, cramfs must be written and read with architectures of the -same endianness, and can be read only by kernels with PAGE_CACHE_SIZE +same endianness, and can be read only by kernels with PAGE_SIZE == 4096. At least the latter of these is a bug, but it hasn't been decided what the best fix is. For the moment if you have larger pages you can just change the #define in mkcramfs.c, so long as you don't diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt index d392e1505f17..d9c11d25bf02 100644 --- a/Documentation/filesystems/tmpfs.txt +++ b/Documentation/filesystems/tmpfs.txt @@ -60,7 +60,7 @@ size: The limit of allocated bytes for this tmpfs instance. The default is half of your physical RAM without swap. If you oversize your tmpfs instances the machine will deadlock since the OOM handler will not be able to free that memory. -nr_blocks: The same as size, but in blocks of PAGE_CACHE_SIZE. +nr_blocks: The same as size, but in blocks of PAGE_SIZE. nr_inodes: The maximum number of inodes for this instance. The default is half of the number of your physical RAM pages, or (on a machine with highmem) the number of lowmem RAM pages, diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt index b02a7d598258..4164bd6397a2 100644 --- a/Documentation/filesystems/vfs.txt +++ b/Documentation/filesystems/vfs.txt @@ -708,9 +708,9 @@ struct address_space_operations { from the address space. This generally corresponds to either a truncation, punch hole or a complete invalidation of the address space (in the latter case 'offset' will always be 0 and 'length' - will be PAGE_CACHE_SIZE). Any private data associated with the page + will be PAGE_SIZE). Any private data associated with the page should be updated to reflect this truncation. If offset is 0 and - length is PAGE_CACHE_SIZE, then the private data should be released, + length is PAGE_SIZE, then the private data should be released, because the page must be able to be completely discarded. This may be done by calling the ->releasepage function, but in this case the release MUST succeed. diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 7328cf85236c..1fd1fbe9ce95 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -586,6 +586,10 @@ drivers to make their ->remove() callbacks avoid races with runtime PM directly, but also it allows of more flexibility in the handling of devices during the removal of their drivers. +Drivers in ->remove() callback should undo the runtime PM changes done +in ->probe(). Usually this means calling pm_runtime_disable(), +pm_runtime_dont_use_autosuspend() etc. + The user space can effectively disallow the driver of the device to power manage it at run time by changing the value of its /sys/devices/.../power/control attribute to "on", which causes pm_runtime_forbid() to be called. In principle, diff --git a/Documentation/sound/alsa/timestamping.txt b/Documentation/sound/alsa/timestamping.txt index 0b191a23f534..1b6473f393a8 100644 --- a/Documentation/sound/alsa/timestamping.txt +++ b/Documentation/sound/alsa/timestamping.txt @@ -129,7 +129,7 @@ will be required to issue multiple queries and perform an interpolation of the results In some hardware-specific configuration, the system timestamp is -latched by a low-level audio subsytem, and the information provided +latched by a low-level audio subsystem, and the information provided back to the driver. Due to potential delays in the communication with the hardware, there is a risk of misalignment with the avail and delay information. To make sure applications are not confused, a diff --git a/MAINTAINERS b/MAINTAINERS index 1c32f8a3d6c4..61a323a6b2cf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4302,7 +4302,7 @@ F: drivers/net/ethernet/agere/ ETHERNET BRIDGE M: Stephen Hemminger <stephen@networkplumber.org> -L: bridge@lists.linux-foundation.org +L: bridge@lists.linux-foundation.org (moderated for non-subscribers) L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net:Bridge S: Maintained @@ -5751,7 +5751,7 @@ R: Don Skidmore <donald.c.skidmore@intel.com> R: Bruce Allan <bruce.w.allan@intel.com> R: John Ronciak <john.ronciak@intel.com> R: Mitch Williams <mitch.a.williams@intel.com> -L: intel-wired-lan@lists.osuosl.org +L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers) W: http://www.intel.com/support/feedback.htm W: http://e1000.sourceforge.net/ Q: http://patchwork.ozlabs.org/project/intel-wired-lan/list/ @@ -7576,7 +7576,7 @@ F: drivers/infiniband/hw/nes/ NETEM NETWORK EMULATOR M: Stephen Hemminger <stephen@networkplumber.org> -L: netem@lists.linux-foundation.org +L: netem@lists.linux-foundation.org (moderated for non-subscribers) S: Maintained F: net/sched/sch_netem.c @@ -8712,6 +8712,8 @@ F: drivers/pinctrl/sh-pfc/ PIN CONTROLLER - SAMSUNG M: Tomasz Figa <tomasz.figa@gmail.com> +M: Krzysztof Kozlowski <k.kozlowski@samsung.com> +M: Sylwester Nawrocki <s.nawrocki@samsung.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) S: Maintained @@ -9140,6 +9142,13 @@ T: git git://github.com/KrasnikovEugene/wcn36xx.git S: Supported F: drivers/net/wireless/ath/wcn36xx/ +QEMU MACHINE EMULATOR AND VIRTUALIZER SUPPORT +M: Gabriel Somlo <somlo@cmu.edu> +M: "Michael S. Tsirkin" <mst@redhat.com> +L: qemu-devel@nongnu.org +S: Maintained +F: drivers/firmware/qemu_fw_cfg.c + RADOS BLOCK DEVICE (RBD) M: Ilya Dryomov <idryomov@gmail.com> M: Sage Weil <sage@redhat.com> @@ -10586,6 +10595,14 @@ L: linux-tegra@vger.kernel.org S: Maintained F: drivers/staging/nvec/ +STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON) +M: Jens Frederich <jfrederich@gmail.com> +M: Daniel Drake <dsd@laptop.org> +M: Jon Nettleton <jon.nettleton@gmail.com> +W: http://wiki.laptop.org/go/DCON +S: Maintained +F: drivers/staging/olpc_dcon/ + STAGING - REALTEK RTL8712U DRIVERS M: Larry Finger <Larry.Finger@lwfinger.net> M: Florian Schilhabel <florian.c.schilhabel@googlemail.com>. @@ -12205,9 +12222,9 @@ S: Maintained F: drivers/media/tuners/tuner-xc2028.* XEN HYPERVISOR INTERFACE -M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> M: Boris Ostrovsky <boris.ostrovsky@oracle.com> M: David Vrabel <david.vrabel@citrix.com> +M: Juergen Gross <jgross@suse.com> L: xen-devel@lists.xenproject.org (moderated for non-subscribers) T: git git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git S: Supported @@ -12219,16 +12236,16 @@ F: include/xen/ F: include/uapi/xen/ XEN HYPERVISOR ARM -M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> +M: Stefano Stabellini <sstabellini@kernel.org> L: xen-devel@lists.xenproject.org (moderated for non-subscribers) -S: Supported +S: Maintained F: arch/arm/xen/ F: arch/arm/include/asm/xen/ XEN HYPERVISOR ARM64 -M: Stefano Stabellini <stefano.stabellini@eu.citrix.com> +M: Stefano Stabellini <sstabellini@kernel.org> L: xen-devel@lists.xenproject.org (moderated for non-subscribers) -S: Supported +S: Maintained F: arch/arm64/xen/ F: arch/arm64/include/asm/xen/ @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Blurry Fish Butt # *DOCUMENTATION* diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index d7709e3930a3..9e5eddbb856f 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c @@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page) /* kernel reading from page with U-mapping */ phys_addr_t paddr = (unsigned long)page_address(page); - unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; + unsigned long vaddr = page->index << PAGE_SHIFT; if (addr_not_cache_congruent(paddr, vaddr)) __flush_dcache_page(paddr, vaddr); diff --git a/arch/arm/boot/dts/exynos5250-arndale.dts b/arch/arm/boot/dts/exynos5250-arndale.dts index 8b2acc74aa76..85d819217d17 100644 --- a/arch/arm/boot/dts/exynos5250-arndale.dts +++ b/arch/arm/boot/dts/exynos5250-arndale.dts @@ -124,8 +124,6 @@ &dp { status = "okay"; samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x0a>; samsung,lane-count = <4>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index 0f5dcd418af8..f30c2dbba4f5 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -80,8 +80,6 @@ &dp { samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x0a>; samsung,lane-count = <4>; diff --git a/arch/arm/boot/dts/exynos5250-snow-common.dtsi b/arch/arm/boot/dts/exynos5250-snow-common.dtsi index 95210ef6a6b5..746808f401e5 100644 --- a/arch/arm/boot/dts/exynos5250-snow-common.dtsi +++ b/arch/arm/boot/dts/exynos5250-snow-common.dtsi @@ -236,12 +236,10 @@ pinctrl-names = "default"; pinctrl-0 = <&dp_hpd>; samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x0a>; samsung,lane-count = <2>; - samsung,hpd-gpio = <&gpx0 7 GPIO_ACTIVE_HIGH>; + hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>; ports { port@0 { diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts index 0f500cb1eb2d..c607bed575d9 100644 --- a/arch/arm/boot/dts/exynos5250-spring.dts +++ b/arch/arm/boot/dts/exynos5250-spring.dts @@ -74,12 +74,10 @@ pinctrl-names = "default"; pinctrl-0 = <&dp_hpd_gpio>; samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x0a>; samsung,lane-count = <1>; - samsung,hpd-gpio = <&gpc3 0 GPIO_ACTIVE_HIGH>; + hpd-gpios = <&gpc3 0 GPIO_ACTIVE_HIGH>; }; &ehci { diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts index 3981ddb25036..7ddb6a066b28 100644 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts @@ -157,12 +157,10 @@ pinctrl-names = "default"; pinctrl-0 = <&dp_hpd_gpio>; samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x06>; samsung,lane-count = <2>; - samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>; + hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>; ports { port@0 { diff --git a/arch/arm/boot/dts/exynos5420-smdk5420.dts b/arch/arm/boot/dts/exynos5420-smdk5420.dts index 0785fedf441e..288817daa16c 100644 --- a/arch/arm/boot/dts/exynos5420-smdk5420.dts +++ b/arch/arm/boot/dts/exynos5420-smdk5420.dts @@ -102,8 +102,6 @@ pinctrl-names = "default"; pinctrl-0 = <&dp_hpd>; samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x0a>; samsung,lane-count = <4>; diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts index 6e9edc1610c4..6ba9aec15485 100644 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts @@ -157,8 +157,6 @@ pinctrl-names = "default"; pinctrl-0 = <&dp_hpd_gpio>; samsung,color-space = <0>; - samsung,dynamic-range = <0>; - samsung,ycbcr-coeff = <0>; samsung,color-depth = <1>; samsung,link-rate = <0x0a>; samsung,lane-count = <2>; diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 7b84657fba35..194b69923389 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h @@ -19,7 +19,7 @@ * This may need to be greater than __NR_last_syscall+1 in order to * account for the padding in the syscall table */ -#define __NR_syscalls (392) +#define __NR_syscalls (396) #define __ARCH_WANT_STAT64 #define __ARCH_WANT_SYS_GETHOSTNAME diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 5dd2528e9e45..2cb9dc770e1d 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -418,6 +418,8 @@ #define __NR_membarrier (__NR_SYSCALL_BASE+389) #define __NR_mlock2 (__NR_SYSCALL_BASE+390) #define __NR_copy_file_range (__NR_SYSCALL_BASE+391) +#define __NR_preadv2 (__NR_SYSCALL_BASE+392) +#define __NR_pwritev2 (__NR_SYSCALL_BASE+393) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index dfc7cd6851ad..703fa0f3cd8f 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -399,8 +399,10 @@ CALL(sys_execveat) CALL(sys_userfaultfd) CALL(sys_membarrier) - CALL(sys_mlock2) +/* 390 */ CALL(sys_mlock2) CALL(sys_copy_file_range) + CALL(sys_preadv2) + CALL(sys_pwritev2) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 139791ed473d..a28fce0bdbbe 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -430,11 +430,13 @@ static void __init patch_aeabi_idiv(void) pr_info("CPU: div instructions available: patching division code\n"); fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1; + asm ("" : "+g" (fn_addr)); ((u32 *)fn_addr)[0] = udiv_instruction(); ((u32 *)fn_addr)[1] = bx_lr_instruction(); flush_icache_range(fn_addr, fn_addr + 8); fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1; + asm ("" : "+g" (fn_addr)); ((u32 *)fn_addr)[0] = sdiv_instruction(); ((u32 *)fn_addr)[1] = bx_lr_instruction(); flush_icache_range(fn_addr, fn_addr + 8); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 6accd66d26f0..b5384311dec4 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1061,15 +1061,27 @@ static void cpu_init_hyp_mode(void *dummy) kvm_arm_init_debug(); } +static void cpu_hyp_reinit(void) +{ + if (is_kernel_in_hyp_mode()) { + /* + * cpu_init_stage2() is safe to call even if the PM + * event was cancelled before the CPU was reset. + */ + cpu_init_stage2(NULL); + } else { + if (__hyp_get_vectors() == hyp_default_vectors) + cpu_init_hyp_mode(NULL); + } +} + static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: - if (__hyp_get_vectors() == hyp_default_vectors) - cpu_init_hyp_mode(NULL); - break; + cpu_hyp_reinit(); } return NOTIFY_OK; @@ -1084,9 +1096,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { - if (cmd == CPU_PM_EXIT && - __hyp_get_vectors() == hyp_default_vectors) { - cpu_init_hyp_mode(NULL); + if (cmd == CPU_PM_EXIT) { + cpu_hyp_reinit(); return NOTIFY_OK; } @@ -1128,6 +1139,22 @@ static int init_subsystems(void) int err; /* + * Register CPU Hotplug notifier + */ + cpu_notifier_register_begin(); + err = __register_cpu_notifier(&hyp_init_cpu_nb); + cpu_notifier_register_done(); + if (err) { + kvm_err("Cannot register KVM init CPU notifier (%d)\n", err); + return err; + } + + /* + * Register CPU lower-power notifier + */ + hyp_cpu_pm_init(); + + /* * Init HYP view of VGIC */ err = kvm_vgic_hyp_init(); @@ -1270,19 +1297,6 @@ static int init_hyp_mode(void) free_boot_hyp_pgd(); #endif - cpu_notifier_register_begin(); - - err = __register_cpu_notifier(&hyp_init_cpu_nb); - - cpu_notifier_register_done(); - - if (err) { - kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); - goto out_err; - } - - hyp_cpu_pm_init(); - /* set size of VMID supported by CPU */ kvm_vmid_bits = kvm_get_vmid_bits(); kvm_info("%d-bit VMID\n", kvm_vmid_bits); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index d0ba3551d49a..3cced8455727 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) */ if (mapping && cache_is_vipt_aliasing()) flush_pfn_alias(page_to_pfn(page), - page->index << PAGE_CACHE_SHIFT); + page->index << PAGE_SHIFT); } static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) @@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p * data in the current VM view associated with this page. * - aliasing VIPT: we only need to find one mapping of this page. */ - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff = page->index; flush_dcache_mmap_lock(mapping); vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0f8963a7e7d9..6fcaac8e200f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -281,12 +281,12 @@ __v7_ca17mp_setup: bl v7_invalidate_l1 ldmia r12, {r1-r6, lr} #ifdef CONFIG_SMP + orr r10, r10, #(1 << 6) @ Enable SMP/nAMP mode ALT_SMP(mrc p15, 0, r0, c1, c0, 1) - ALT_UP(mov r0, #(1 << 6)) @ fake it for UP - tst r0, #(1 << 6) @ SMP/nAMP mode enabled? - orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode - orreq r0, r0, r10 @ Enable CPU-specific SMP bits - mcreq p15, 0, r0, c1, c0, 1 + ALT_UP(mov r0, r10) @ fake it for UP + orr r10, r10, r0 @ Set required bits + teq r10, r0 @ Were they already set? + mcrne p15, 0, r10, c1, c0, 1 @ No, update register #endif b __v7_setup_cont diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 0e391dbfc420..4150fd8bae01 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -124,7 +124,9 @@ #define VTCR_EL2_SL0_LVL1 (1 << 6) #define VTCR_EL2_T0SZ_MASK 0x3f #define VTCR_EL2_T0SZ_40B 24 -#define VTCR_EL2_VS 19 +#define VTCR_EL2_VS_SHIFT 19 +#define VTCR_EL2_VS_8BIT (0 << VTCR_EL2_VS_SHIFT) +#define VTCR_EL2_VS_16BIT (1 << VTCR_EL2_VS_SHIFT) /* * We configure the Stage-2 page tables to always restrict the IPA space to be diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1a78d6e2a78b..12874164b0ae 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -141,6 +141,9 @@ #define ID_AA64MMFR1_VMIDBITS_SHIFT 4 #define ID_AA64MMFR1_HADBS_SHIFT 0 +#define ID_AA64MMFR1_VMIDBITS_8 0 +#define ID_AA64MMFR1_VMIDBITS_16 2 + /* id_aa64mmfr2 */ #define ID_AA64MMFR2_UAO_SHIFT 4 diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c index bfc54fd82797..5a9f3bf542b0 100644 --- a/arch/arm64/kvm/hyp/s2-setup.c +++ b/arch/arm64/kvm/hyp/s2-setup.c @@ -36,8 +36,10 @@ void __hyp_text __init_stage2_translation(void) * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS * bit in VTCR_EL2. */ - tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf; - val |= (tmp == 2) ? VTCR_EL2_VS : 0; + tmp = (read_sysreg(id_aa64mmfr1_el1) >> ID_AA64MMFR1_VMIDBITS_SHIFT) & 0xf; + val |= (tmp == ID_AA64MMFR1_VMIDBITS_16) ? + VTCR_EL2_VS_16BIT : + VTCR_EL2_VS_8BIT; write_sysreg(val, vtcr_el2); } diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c index 745695db5ba0..f2f264b5aafe 100644 --- a/arch/mips/alchemy/common/dbdma.c +++ b/arch/mips/alchemy/common/dbdma.c @@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid, au1x_dma_chan_t *cp; /* - * We do the intialization on the first channel allocation. + * We do the initialization on the first channel allocation. * We have to wait because of the interrupt handler initialization * which can't be done successfully during board set up. */ @@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr) dp->dscr_source1 = dscr->dscr_source1; dp->dscr_cmd1 = dscr->dscr_cmd1; nbytes = dscr->dscr_cmd1; - /* Allow the caller to specifiy if an interrupt is generated */ + /* Allow the caller to specify if an interrupt is generated */ dp->dscr_cmd0 &= ~DSCR_CMD0_IE; dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V; ctp->chan_ptr->ddma_dbell = 0; diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c index bdeed9d13c6f..433c4b9a9f0a 100644 --- a/arch/mips/alchemy/devboards/db1000.c +++ b/arch/mips/alchemy/devboards/db1000.c @@ -503,15 +503,15 @@ int __init db1000_dev_setup(void) if (board == BCSR_WHOAMI_DB1500) { c0 = AU1500_GPIO2_INT; c1 = AU1500_GPIO5_INT; - d0 = AU1500_GPIO0_INT; - d1 = AU1500_GPIO3_INT; + d0 = 0; /* GPIO number, NOT irq! */ + d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1500_GPIO1_INT; s1 = AU1500_GPIO4_INT; } else if (board == BCSR_WHOAMI_DB1100) { c0 = AU1100_GPIO2_INT; c1 = AU1100_GPIO5_INT; - d0 = AU1100_GPIO0_INT; - d1 = AU1100_GPIO3_INT; + d0 = 0; /* GPIO number, NOT irq! */ + d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1100_GPIO1_INT; s1 = AU1100_GPIO4_INT; @@ -545,15 +545,15 @@ int __init db1000_dev_setup(void) } else if (board == BCSR_WHOAMI_DB1000) { c0 = AU1000_GPIO2_INT; c1 = AU1000_GPIO5_INT; - d0 = AU1000_GPIO0_INT; - d1 = AU1000_GPIO3_INT; + d0 = 0; /* GPIO number, NOT irq! */ + d1 = 3; /* GPIO number, NOT irq! */ s0 = AU1000_GPIO1_INT; s1 = AU1000_GPIO4_INT; platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs)); } else if ((board == BCSR_WHOAMI_PB1500) || (board == BCSR_WHOAMI_PB1500R2)) { c0 = AU1500_GPIO203_INT; - d0 = AU1500_GPIO201_INT; + d0 = 1; /* GPIO number, NOT irq! */ s0 = AU1500_GPIO202_INT; twosocks = 0; flashsize = 64; @@ -566,7 +566,7 @@ int __init db1000_dev_setup(void) */ } else if (board == BCSR_WHOAMI_PB1100) { c0 = AU1100_GPIO11_INT; - d0 = AU1100_GPIO9_INT; + d0 = 9; /* GPIO number, NOT irq! */ s0 = AU1100_GPIO10_INT; twosocks = 0; flashsize = 64; @@ -583,7 +583,6 @@ int __init db1000_dev_setup(void) } else return 0; /* unknown board, no further dev setup to do */ - irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW); @@ -597,7 +596,6 @@ int __init db1000_dev_setup(void) c0, d0, /*s0*/0, 0, 0); if (twosocks) { - irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH); irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW); diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c index b518f029f5e7..1c01d6eadb08 100644 --- a/arch/mips/alchemy/devboards/db1550.c +++ b/arch/mips/alchemy/devboards/db1550.c @@ -514,7 +514,7 @@ static void __init db1550_devices(void) AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR, AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, - AU1550_GPIO3_INT, AU1550_GPIO0_INT, + AU1550_GPIO3_INT, 0, /*AU1550_GPIO21_INT*/0, 0, 0); db1x_register_pcmcia_socket( @@ -524,7 +524,7 @@ static void __init db1550_devices(void) AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, - AU1550_GPIO5_INT, AU1550_GPIO1_INT, + AU1550_GPIO5_INT, 1, /*AU1550_GPIO22_INT*/0, 0, 1); platform_device_register(&db1550_nand_dev); diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c index eb5117ced95a..618dfd735eed 100644 --- a/arch/mips/ath79/clock.c +++ b/arch/mips/ath79/clock.c @@ -26,8 +26,7 @@ #include "common.h" #define AR71XX_BASE_FREQ 40000000 -#define AR724X_BASE_FREQ 5000000 -#define AR913X_BASE_FREQ 5000000 +#define AR724X_BASE_FREQ 40000000 static struct clk *clks[3]; static struct clk_onecell_data clk_data = { @@ -103,8 +102,8 @@ static void __init ar724x_clocks_init(void) div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK); freq = div * ref_rate; - div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK); - freq *= div; + div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2; + freq /= div; cpu_rate = freq; @@ -123,39 +122,6 @@ static void __init ar724x_clocks_init(void) clk_add_alias("uart", NULL, "ahb", NULL); } -static void __init ar913x_clocks_init(void) -{ - unsigned long ref_rate; - unsigned long cpu_rate; - unsigned long ddr_rate; - unsigned long ahb_rate; - u32 pll; - u32 freq; - u32 div; - - ref_rate = AR913X_BASE_FREQ; - pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG); - - div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK); - freq = div * ref_rate; - - cpu_rate = freq; - - div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1; - ddr_rate = freq / div; - - div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2; - ahb_rate = cpu_rate / div; - - ath79_add_sys_clkdev("ref", ref_rate); - clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate); - clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate); - clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate); - - clk_add_alias("wdt", NULL, "ahb", NULL); - clk_add_alias("uart", NULL, "ahb", NULL); -} - static void __init ar933x_clocks_init(void) { unsigned long ref_rate; @@ -443,10 +409,8 @@ void __init ath79_clocks_init(void) { if (soc_is_ar71xx()) ar71xx_clocks_init(); - else if (soc_is_ar724x()) + else if (soc_is_ar724x() || soc_is_ar913x()) ar724x_clocks_init(); - else if (soc_is_ar913x()) - ar913x_clocks_init(); else if (soc_is_ar933x()) ar933x_clocks_init(); else if (soc_is_ar934x()) diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c index 959c145a0a2c..ca7ad131d057 100644 --- a/arch/mips/bcm47xx/sprom.c +++ b/arch/mips/bcm47xx/sprom.c @@ -714,11 +714,11 @@ void bcm47xx_sprom_register_fallbacks(void) { #if defined(CONFIG_BCM47XX_SSB) if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb)) - pr_warn("Failed to registered ssb SPROM handler\n"); + pr_warn("Failed to register ssb SPROM handler\n"); #endif #if defined(CONFIG_BCM47XX_BCMA) if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma)) - pr_warn("Failed to registered bcma SPROM handler\n"); + pr_warn("Failed to register bcma SPROM handler\n"); #endif } diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile index 4eff1ef02eff..309d2ad67e4d 100644 --- a/arch/mips/boot/compressed/Makefile +++ b/arch/mips/boot/compressed/Makefile @@ -39,10 +39,11 @@ vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o endif -vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o +vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o -$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib -$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c +extra-y += ashldi3.c bswapsi.c +$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib +$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c $(call cmd,shipped) targets := $(notdir $(vmlinuzobjs-y)) diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi index adb33e355043..56035e5b7008 100644 --- a/arch/mips/boot/dts/brcm/bcm7435.dtsi +++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi @@ -82,7 +82,7 @@ }; gisb-arb@400000 { - compatible = "brcm,bcm7400-gisb-arb"; + compatible = "brcm,bcm7435-gisb-arb"; reg = <0x400000 0xdc>; native-endian; interrupt-parent = <&sun_l2_intc>; diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi index 3ad4ba9b12fd..3c2ed9ee5b2f 100644 --- a/arch/mips/boot/dts/qca/ar9132.dtsi +++ b/arch/mips/boot/dts/qca/ar9132.dtsi @@ -83,7 +83,7 @@ }; pll: pll-controller@18050000 { - compatible = "qca,ar9132-ppl", + compatible = "qca,ar9132-pll", "qca,ar9130-pll"; reg = <0x18050000 0x20>; diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts index e535ee3c26a4..4f1540e5f963 100644 --- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts +++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts @@ -18,7 +18,7 @@ reg = <0x0 0x2000000>; }; - extosc: oscillator { + extosc: ref { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <40000000>; diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c index e59d1b79f24c..2f415d9d0f3c 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c +++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c @@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) gmx_rx_int_en.s.pause_drp = 1; /* Skipping gmx_rx_int_en.s.reserved_16_18 */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ @@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ @@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ @@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ @@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ @@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) /*gmx_rx_int_en.s.phy_spd = 1; */ /*gmx_rx_int_en.s.phy_link = 1; */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ @@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block) gmx_rx_int_en.s.pause_drp = 1; /* Skipping gmx_rx_int_en.s.reserved_16_18 */ /*gmx_rx_int_en.s.ifgerr = 1; */ - /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */ + /*gmx_rx_int_en.s.coldet = 1; // Collision detect */ /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */ /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */ /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */ diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c index 87be167a7a6a..676fab50dd2b 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-pko.c +++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c @@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void) /* * Set the size of the PKO command buffers to an odd number of * 64bit words. This allows the normal two word send to stay - * aligned and never span a comamnd word buffer. + * aligned and never span a command word buffer. */ config.u64 = 0; config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index b7fa9ae28c36..42412ba0f3bf 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -331,7 +331,7 @@ static int octeon_update_boot_vector(unsigned int cpu) } if (!(avail_coremask & (1 << coreid))) { - /* core not available, assume, that catched by simple-executive */ + /* core not available, assume, that caught by simple-executive */ cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); cvmx_write_csr(CVMX_CIU_PP_RST, 0); } diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig index 4e36b6e1869c..43e0ba24470c 100644 --- a/arch/mips/configs/ci20_defconfig +++ b/arch/mips/configs/ci20_defconfig @@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_CGROUPS=y +CONFIG_MEMCG=y +CONFIG_CGROUP_SCHED=y CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y -CONFIG_MEMCG=y -CONFIG_MEMCG_KMEM=y -CONFIG_CGROUP_SCHED=y CONFIG_NAMESPACES=y CONFIG_USER_NS=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y @@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y # CONFIG_ALLOW_DEV_COREDUMP is not set CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=32 +CONFIG_MTD=y +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_JZ4780=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_FASTMAP=y CONFIG_NETDEVICES=y # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set @@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y # CONFIG_PROC_PAGE_MONITOR is not set CONFIG_TMPFS=y CONFIG_CONFIGFS_FS=y -# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_UBIFS_FS=y # CONFIG_NETWORK_FILESYSTEMS is not set CONFIG_NLS=y CONFIG_NLS_CODEPAGE_437=y diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S index 8c6f508e59de..d7b99180c6e1 100644 --- a/arch/mips/dec/int-handler.S +++ b/arch/mips/dec/int-handler.S @@ -5,7 +5,7 @@ * Written by Ralf Baechle and Andreas Busse, modified for DECstation * support by Paul Antoine and Harald Koerfgen. * - * completly rewritten: + * completely rewritten: * Copyright (C) 1998 Harald Koerfgen * * Rewritten extensively for controller-driven IRQ support diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c index 5537b94572b2..0d75b5a0bad4 100644 --- a/arch/mips/fw/arc/memory.c +++ b/arch/mips/fw/arc/memory.c @@ -9,7 +9,7 @@ * PROM library functions for acquiring/using memory descriptors given to us * from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set * because on some machines like SGI IP27 the ARC memory configuration data - * completly bogus and alternate easier to use mechanisms are available. + * completely bogus and alternate easier to use mechanisms are available. */ #include <linux/init.h> #include <linux/kernel.h> diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index e7dc785a91ca..af12c1f9f1a8 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -102,7 +102,7 @@ extern void cpu_probe(void); extern void cpu_report(void); extern const char *__cpu_name[]; -#define cpu_name_string() __cpu_name[smp_processor_id()] +#define cpu_name_string() __cpu_name[raw_smp_processor_id()] struct seq_file; struct notifier_block; diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index cf92fe733995..c4873e8594ef 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -141,7 +141,7 @@ octeon_main_processor: .endm /* - * Do SMP slave processor setup necessary before we can savely execute C code. + * Do SMP slave processor setup necessary before we can safely execute C code. */ .macro smp_slave_setup .endm diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h index 13b0751b010a..a229297c880b 100644 --- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h @@ -16,7 +16,7 @@ .endm /* - * Do SMP slave processor setup necessary before we can savely execute C code. + * Do SMP slave processor setup necessary before we can safely execute C code. */ .macro smp_slave_setup .endm diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h index cf4384bfa846..b0b7261ff3ad 100644 --- a/arch/mips/include/asm/mach-ip27/irq.h +++ b/arch/mips/include/asm/mach-ip27/irq.h @@ -11,7 +11,7 @@ #define __ASM_MACH_IP27_IRQ_H /* - * A hardwired interrupt number is completly stupid for this system - a + * A hardwired interrupt number is completely stupid for this system - a * large configuration might have thousands if not tenthousands of * interrupts. */ diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h index b087cb83da3a..f992c1db876b 100644 --- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h @@ -81,7 +81,7 @@ .endm /* - * Do SMP slave processor setup necessary before we can savely execute C code. + * Do SMP slave processor setup necessary before we can safely execute C code. */ .macro smp_slave_setup GET_NASID_ASM t1 diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h index bf8c3e1860e7..7c7708a23baa 100644 --- a/arch/mips/include/asm/mach-jz4740/gpio.h +++ b/arch/mips/include/asm/mach-jz4740/gpio.h @@ -27,7 +27,7 @@ enum jz_gpio_function { /* Usually a driver for a SoC component has to request several gpio pins and - configure them as funcion pins. + configure them as function pins. jz_gpio_bulk_request can be used to ease this process. Usually one would do something like: diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h index b196825a1de9..d4635391c36a 100644 --- a/arch/mips/include/asm/mips-cm.h +++ b/arch/mips/include/asm/mips-cm.h @@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base; * This function returns the physical base address of the Coherence Manager * global control block, or 0 if no Coherence Manager is present. It provides * a default implementation which reads the CMGCRBase register where available, - * and may be overriden by platforms which determine this address in a + * and may be overridden by platforms which determine this address in a * different way by defining a function with the same prototype except for the * name mips_cm_phys_base (without underscores). */ diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h index 1f6ea8352ca9..20621e1ca238 100644 --- a/arch/mips/include/asm/mips-r2-to-r6-emul.h +++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h @@ -79,7 +79,7 @@ struct r2_decoder_table { }; -extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, const char *str); #ifndef CONFIG_MIPSR2_TO_R6_EMULATOR diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h index f7dd17d0dc22..f4f1996e0fac 100644 --- a/arch/mips/include/asm/octeon/cvmx-config.h +++ b/arch/mips/include/asm/octeon/cvmx-config.h @@ -33,7 +33,7 @@ /* Packet buffers */ #define CVMX_FPA_PACKET_POOL (0) #define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE -/* Work queue entrys */ +/* Work queue entries */ #define CVMX_FPA_WQE_POOL (1) #define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE /* PKO queue command buffers */ diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h index 19e139c9f337..3e982e0c397e 100644 --- a/arch/mips/include/asm/octeon/cvmx.h +++ b/arch/mips/include/asm/octeon/cvmx.h @@ -189,7 +189,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr) static inline void *cvmx_phys_to_ptr(uint64_t physical_address) { if (sizeof(void *) == 8) { - /* Just set the top bit, avoiding any TLB uglyness */ + /* Just set the top bit, avoiding any TLB ugliness */ return CASTPTR(void, CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, physical_address)); diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h index 8d7a63b52ac7..3206245d1ed6 100644 --- a/arch/mips/include/asm/pci/bridge.h +++ b/arch/mips/include/asm/pci/bridge.h @@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s { union { u32 cmd_word; struct { - u32 didn:4, /* Destination ID */ - sidn:4, /* Source ID */ - pactyp:4, /* Packet type */ - tnum:5, /* Trans Number */ - coh:1, /* Coh Transacti */ - ds:2, /* Data size */ - gbr:1, /* GBR enable */ - vbpm:1, /* VBPM message */ + u32 didn:4, /* Destination ID */ + sidn:4, /* Source ID */ + pactyp:4, /* Packet type */ + tnum:5, /* Trans Number */ + coh:1, /* Coh Transaction */ + ds:2, /* Data size */ + gbr:1, /* GBR enable */ + vbpm:1, /* VBPM message */ error:1, /* Error occurred */ - barr:1, /* Barrier op */ + barr:1, /* Barrier op */ rsvd:8; } berr_st; } berr_un; diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h index 59920b345942..4a9c99050c13 100644 --- a/arch/mips/include/asm/sgi/hpc3.h +++ b/arch/mips/include/asm/sgi/hpc3.h @@ -147,7 +147,7 @@ struct hpc3_ethregs { #define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */ #define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */ #define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */ -#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */ +#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */ u32 _unused2[0x1000/4 - 8]; /* padding */ diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h index 26ddfff28c8e..105a9479ac5f 100644 --- a/arch/mips/include/asm/sgiarcs.h +++ b/arch/mips/include/asm/sgiarcs.h @@ -144,7 +144,7 @@ struct linux_tinfo { struct linux_vdirent { ULONG namelen; unsigned char attr; - char fname[32]; /* XXX imperical, should be a define */ + char fname[32]; /* XXX empirical, should be a define */ }; /* Other stuff for files. */ @@ -179,7 +179,7 @@ struct linux_finfo { enum linux_devtypes dtype; unsigned long namelen; unsigned char attr; - char name[32]; /* XXX imperical, should be define */ + char name[32]; /* XXX empirical, should be define */ }; /* This describes the vector containing function pointers to the ARC diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h index e33f0363235b..feb385180f87 100644 --- a/arch/mips/include/asm/sn/ioc3.h +++ b/arch/mips/include/asm/sn/ioc3.h @@ -355,7 +355,7 @@ struct ioc3_etxd { #define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */ #define SSCR_RESET 0x80000000 /* reset DMA channels */ -/* all producer/comsumer pointers are the same bitfield */ +/* all producer/consumer pointers are the same bitfield */ #define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */ #define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */ #define PROD_CONS_PTR_OFF 3 diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h index 5998b13e9764..57ece90f8cf1 100644 --- a/arch/mips/include/asm/sn/sn0/hubio.h +++ b/arch/mips/include/asm/sn/sn0/hubio.h @@ -628,7 +628,7 @@ typedef union h1_icrbb_u { /* * Values for field imsgtype */ -#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */ +#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */ #define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */ #define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */ #define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 095ecafe6bd3..7f109d4f64a4 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -95,7 +95,7 @@ static inline bool eva_kernel_access(void) } /* - * Is a address valid? This does a straighforward calculation rather + * Is a address valid? This does a straightforward calculation rather * than tests. * * Address valid if: diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 3129795de940..24ad815c7f38 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -381,16 +381,18 @@ #define __NR_membarrier (__NR_Linux + 358) #define __NR_mlock2 (__NR_Linux + 359) #define __NR_copy_file_range (__NR_Linux + 360) +#define __NR_preadv2 (__NR_Linux + 361) +#define __NR_pwritev2 (__NR_Linux + 362) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 360 +#define __NR_Linux_syscalls 362 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 360 +#define __NR_O32_Linux_syscalls 362 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -719,16 +721,18 @@ #define __NR_membarrier (__NR_Linux + 318) #define __NR_mlock2 (__NR_Linux + 319) #define __NR_copy_file_range (__NR_Linux + 320) +#define __NR_preadv2 (__NR_Linux + 321) +#define __NR_pwritev2 (__NR_Linux + 322) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 320 +#define __NR_Linux_syscalls 322 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 320 +#define __NR_64_Linux_syscalls 322 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1061,15 +1065,17 @@ #define __NR_membarrier (__NR_Linux + 322) #define __NR_mlock2 (__NR_Linux + 323) #define __NR_copy_file_range (__NR_Linux + 324) +#define __NR_preadv2 (__NR_Linux + 325) +#define __NR_pwritev2 (__NR_Linux + 326) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 324 +#define __NR_Linux_syscalls 326 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 324 +#define __NR_N32_Linux_syscalls 326 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 1448c1f43d4e..760217bbb2fa 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -24,7 +24,7 @@ static char *cm2_tr[8] = { "0x04", "cpc", "0x06", "0x07" }; -/* CM3 Tag ECC transation type */ +/* CM3 Tag ECC transaction type */ static char *cm3_tr[16] = { [0x0] = "ReqNoData", [0x1] = "0x1", diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index 1f5aac7f9ec3..3fff89ae760b 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -940,42 +940,42 @@ repeat: switch (rt) { case tgei_op: if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) - do_trap_or_bp(regs, 0, "TGEI"); + do_trap_or_bp(regs, 0, 0, "TGEI"); MIPS_R2_STATS(traps); break; case tgeiu_op: if (regs->regs[rs] >= MIPSInst_UIMM(inst)) - do_trap_or_bp(regs, 0, "TGEIU"); + do_trap_or_bp(regs, 0, 0, "TGEIU"); MIPS_R2_STATS(traps); break; case tlti_op: if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) - do_trap_or_bp(regs, 0, "TLTI"); + do_trap_or_bp(regs, 0, 0, "TLTI"); MIPS_R2_STATS(traps); break; case tltiu_op: if (regs->regs[rs] < MIPSInst_UIMM(inst)) - do_trap_or_bp(regs, 0, "TLTIU"); + do_trap_or_bp(regs, 0, 0, "TLTIU"); MIPS_R2_STATS(traps); break; case teqi_op: if (regs->regs[rs] == MIPSInst_SIMM(inst)) - do_trap_or_bp(regs, 0, "TEQI"); + do_trap_or_bp(regs, 0, 0, "TEQI"); MIPS_R2_STATS(traps); break; case tnei_op: if (regs->regs[rs] != MIPSInst_SIMM(inst)) - do_trap_or_bp(regs, 0, "TNEI"); + do_trap_or_bp(regs, 0, 0, "TNEI"); MIPS_R2_STATS(traps); diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c index 2b70723071c3..9083d63b765c 100644 --- a/arch/mips/kernel/module-rela.c +++ b/arch/mips/kernel/module-rela.c @@ -109,9 +109,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, struct module *me) { Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr; + int (*handler)(struct module *me, u32 *location, Elf_Addr v); Elf_Sym *sym; u32 *location; - unsigned int i; + unsigned int i, type; Elf_Addr v; int res; @@ -134,9 +135,21 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, return -ENOENT; } - v = sym->st_value + rel[i].r_addend; + type = ELF_MIPS_R_TYPE(rel[i]); + + if (type < ARRAY_SIZE(reloc_handlers_rela)) + handler = reloc_handlers_rela[type]; + else + handler = NULL; - res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v); + if (!handler) { + pr_err("%s: Unknown relocation type %u\n", + me->name, type); + return -EINVAL; + } + + v = sym->st_value + rel[i].r_addend; + res = handler(me, location, v); if (res) return res; } diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c index 1833f5171ccd..f9b2936d598d 100644 --- a/arch/mips/kernel/module.c +++ b/arch/mips/kernel/module.c @@ -197,9 +197,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, struct module *me) { Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; + int (*handler)(struct module *me, u32 *location, Elf_Addr v); Elf_Sym *sym; u32 *location; - unsigned int i; + unsigned int i, type; Elf_Addr v; int res; @@ -223,9 +224,21 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, return -ENOENT; } - v = sym->st_value; + type = ELF_MIPS_R_TYPE(rel[i]); + + if (type < ARRAY_SIZE(reloc_handlers_rel)) + handler = reloc_handlers_rel[type]; + else + handler = NULL; - res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); + if (!handler) { + pr_err("%s: Unknown relocation type %u\n", + me->name, type); + return -EINVAL; + } + + v = sym->st_value; + res = handler(me, location, v); if (res) return res; } diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index d7b8dd43147a..9bc1191b1ab0 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -530,7 +530,7 @@ static void mipspmu_enable(struct pmu *pmu) /* * MIPS performance counters can be per-TC. The control registers can - * not be directly accessed accross CPUs. Hence if we want to do global + * not be directly accessed across CPUs. Hence if we want to do global * control, we need cross CPU calls. on_each_cpu() can help us, but we * can not make sure this function is called with interrupts enabled. So * here we pause local counters and then grab a rwlock and leave the diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index f63a289977cc..fa3f9ebad8f4 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -472,7 +472,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* * Disable all but self interventions. The load from COHCTL is defined * by the interAptiv & proAptiv SUMs as ensuring that the operation - * resulting from the preceeding store is complete. + * resulting from the preceding store is complete. */ uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); uasm_i_sw(&p, t0, 0, r_pcohctl); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eddd5fd6fdfa..92880cee449e 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -615,7 +615,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) * allows us to only worry about whether an FP mode switch is in * progress when FP is first used in a tasks time slice. Pretty much all * of the mode switch overhead can thus be confined to cases where mode - * switches are actually occuring. That is, to here. However for the + * switches are actually occurring. That is, to here. However for the * thread performing the mode switch it may take a while... */ if (num_online_cpus() > 1) { diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index a56317444bda..d01fe53a6638 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -596,3 +596,5 @@ EXPORT(sys_call_table) PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range /* 4360 */ + PTR sys_preadv2 + PTR sys_pwritev2 diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 2b2dc14610d0..6b73ecc02597 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -434,4 +434,6 @@ EXPORT(sys_call_table) PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range /* 5320 */ + PTR sys_preadv2 + PTR sys_pwritev2 .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 2bf5c8593d91..71f99d5f7a06 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -424,4 +424,6 @@ EXPORT(sysn32_call_table) PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range + PTR compat_sys_preadv2 /* 6325 */ + PTR compat_sys_pwritev2 .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index c5b759e584c7..91b43eea2d5a 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -579,4 +579,6 @@ EXPORT(sys32_call_table) PTR sys_membarrier PTR sys_mlock2 PTR sys_copy_file_range /* 4360 */ + PTR compat_sys_preadv2 + PTR compat_sys_pwritev2 .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 37708d9af638..27cb638f0824 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -243,6 +243,18 @@ static int __init mips_smp_ipi_init(void) struct irq_domain *ipidomain; struct device_node *node; + /* + * In some cases like qemu-malta, it is desired to try SMP with + * a single core. Qemu-malta has no GIC, so an attempt to set any IPIs + * would cause a BUG_ON() to be triggered since there's no ipidomain. + * + * Since for a single core system IPIs aren't required really, skip the + * initialisation which should generally keep any such configurations + * happy and only fail hard when trying to truely run SMP. + */ + if (cpumask_weight(cpu_possible_mask) == 1) + return 0; + node = of_irq_find_parent(of_root); ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index bf14da9f3e33..ae0c89d23ad7 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -56,6 +56,7 @@ #include <asm/pgtable.h> #include <asm/ptrace.h> #include <asm/sections.h> +#include <asm/siginfo.h> #include <asm/tlbdebug.h> #include <asm/traps.h> #include <asm/uaccess.h> @@ -871,7 +872,7 @@ out: exception_exit(prev_state); } -void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, const char *str) { siginfo_t info = { 0 }; @@ -928,7 +929,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code, default: scnprintf(b, sizeof(b), "%s instruction in kernel code", str); die_if_kernel(b, regs); - force_sig(SIGTRAP, current); + if (si_code) { + info.si_signo = SIGTRAP; + info.si_code = si_code; + force_sig_info(SIGTRAP, &info, current); + } else { + force_sig(SIGTRAP, current); + } } } @@ -1012,7 +1019,7 @@ asmlinkage void do_bp(struct pt_regs *regs) break; } - do_trap_or_bp(regs, bcode, "Break"); + do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); out: set_fs(seg); @@ -1054,7 +1061,7 @@ asmlinkage void do_tr(struct pt_regs *regs) tcode = (opcode >> 6) & ((1 << 10) - 1); } - do_trap_or_bp(regs, tcode, "Trap"); + do_trap_or_bp(regs, tcode, 0, "Trap"); out: set_fs(seg); @@ -1115,19 +1122,7 @@ no_r2_instr: if (unlikely(compute_return_epc(regs) < 0)) goto out; - if (get_isa16_mode(regs->cp0_epc)) { - unsigned short mmop[2] = { 0 }; - - if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) - status = SIGSEGV; - if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) - status = SIGSEGV; - opcode = mmop[0]; - opcode = (opcode << 16) | mmop[1]; - - if (status < 0) - status = simulate_rdhwr_mm(regs, opcode); - } else { + if (!get_isa16_mode(regs->cp0_epc)) { if (unlikely(get_user(opcode, epc) < 0)) status = SIGSEGV; @@ -1142,6 +1137,18 @@ no_r2_instr: if (status < 0) status = simulate_fp(regs, opcode, old_epc, old31); + } else if (cpu_has_mmips) { + unsigned short mmop[2] = { 0 }; + + if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) + status = SIGSEGV; + if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) + status = SIGSEGV; + opcode = mmop[0]; + opcode = (opcode << 16) | mmop[1]; + + if (status < 0) + status = simulate_rdhwr_mm(regs, opcode); } if (status < 0) @@ -1492,6 +1499,7 @@ asmlinkage void do_mdmx(struct pt_regs *regs) */ asmlinkage void do_watch(struct pt_regs *regs) { + siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT }; enum ctx_state prev_state; u32 cause; @@ -1512,7 +1520,7 @@ asmlinkage void do_watch(struct pt_regs *regs) if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { mips_read_watch_registers(); local_irq_enable(); - force_sig(SIGTRAP, current); + force_sig_info(SIGTRAP, &info, current); } else { mips_clear_watch_registers(); local_irq_enable(); @@ -2214,7 +2222,7 @@ void __init trap_init(void) /* * Copy the generic exception handlers to their final destination. - * This will be overriden later as suitable for a particular + * This will be overridden later as suitable for a particular * configuration. */ set_handler(0x180, &except_vec3_generic, 0x80); diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 490cea569d57..5c62065cbf22 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, { union mips_instruction insn; unsigned long value; - unsigned int res; + unsigned int res, preempted; unsigned long origpc; unsigned long orig31; void __user *fault_addr = NULL; @@ -1226,27 +1226,36 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, sizeof(*fpr))) goto sigbus; - /* - * Disable preemption to avoid a race between copying - * state from userland, migrating to another CPU and - * updating the hardware vector register below. - */ - preempt_disable(); - - res = __copy_from_user_inatomic(fpr, addr, - sizeof(*fpr)); - if (res) - goto fault; - - /* - * Update the hardware register if it is in use by the - * task in this quantum, in order to avoid having to - * save & restore the whole vector context. - */ - if (test_thread_flag(TIF_USEDMSA)) - write_msa_wr(wd, fpr, df); + do { + /* + * If we have live MSA context keep track of + * whether we get preempted in order to avoid + * the register context we load being clobbered + * by the live context as it's saved during + * preemption. If we don't have live context + * then it can't be saved to clobber the value + * we load. + */ + preempted = test_thread_flag(TIF_USEDMSA); + + res = __copy_from_user_inatomic(fpr, addr, + sizeof(*fpr)); + if (res) + goto fault; - preempt_enable(); + /* + * Update the hardware register if it is in use + * by the task in this quantum, in order to + * avoid having to save & restore the whole + * vector context. + */ + preempt_disable(); + if (test_thread_flag(TIF_USEDMSA)) { + write_msa_wr(wd, fpr, df); + preempted = 0; + } + preempt_enable(); + } while (preempted); break; case msa_st_op: diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c index a08c43946247..e0e1d0a611fc 100644 --- a/arch/mips/kvm/tlb.c +++ b/arch/mips/kvm/tlb.c @@ -632,7 +632,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); - /* Alocate new kernel and user ASIDs if needed */ + /* Allocate new kernel and user ASIDs if needed */ local_irq_save(flags); diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index ad988000563f..c4038d2a724c 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -500,7 +500,7 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); /* - * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) + * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) */ kvm_write_c0_guest_intctl(cop0, 0xFC000000); diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c index ad3c73436777..47d26c805eac 100644 --- a/arch/mips/math-emu/ieee754dp.c +++ b/arch/mips/math-emu/ieee754dp.c @@ -97,7 +97,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm) { assert(xm); /* we don't gen exact zeros (probably should) */ - assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */ + assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */ assert(xm & (DP_HIDDEN_BIT << 3)); if (xe < DP_EMIN) { @@ -165,7 +165,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm) /* strip grs bits */ xm >>= 3; - assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ + assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */ assert(xe >= DP_EMIN); if (xe > DP_EMAX) { @@ -198,7 +198,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm) ieee754_setcx(IEEE754_UNDERFLOW); return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm); } else { - assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */ + assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */ assert(xm & DP_HIDDEN_BIT); return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT); diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c index def00ffc50fc..e0b2c450b963 100644 --- a/arch/mips/math-emu/ieee754sp.c +++ b/arch/mips/math-emu/ieee754sp.c @@ -97,7 +97,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) { assert(xm); /* we don't gen exact zeros (probably should) */ - assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */ + assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */ assert(xm & (SP_HIDDEN_BIT << 3)); if (xe < SP_EMIN) { @@ -163,7 +163,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) /* strip grs bits */ xm >>= 3; - assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ + assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */ assert(xe >= SP_EMIN); if (xe > SP_EMAX) { @@ -196,7 +196,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm) ieee754_setcx(IEEE754_UNDERFLOW); return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm); } else { - assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */ + assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */ assert(xm & SP_HIDDEN_BIT); return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT); diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c index dc7c5a5214a9..026cb59a914d 100644 --- a/arch/mips/mm/sc-ip22.c +++ b/arch/mips/mm/sc-ip22.c @@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void) return 1; } -/* XXX Check with wje if the Indy caches can differenciate between +/* XXX Check with wje if the Indy caches can differentiate between writeback + invalidate and just invalidate. */ static struct bcache_ops indy_sc_ops = { .bc_enable = indy_sc_enable, diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 5037d5868cef..c17d7627f872 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -19,6 +19,7 @@ #include <asm/cpu.h> #include <asm/cpu-type.h> #include <asm/bootinfo.h> +#include <asm/hazards.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/tlb.h> @@ -486,6 +487,10 @@ static void r4k_tlb_configure(void) * be set to fixed-size pages. */ write_c0_pagemask(PM_DEFAULT_MASK); + back_to_back_c0_hazard(); + if (read_c0_pagemask() != PM_DEFAULT_MASK) + panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE); + write_c0_wired(0); if (current_cpu_type() == CPU_R10000 || current_cpu_type() == CPU_R12000 || diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 5a04b6f5c6fb..84c6e3fda84a 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -12,7 +12,7 @@ * Copyright (C) 2011 MIPS Technologies, Inc. * * ... and the days got worse and worse and now you see - * I've gone completly out of my mind. + * I've gone completely out of my mind. * * They're coming to take me a away haha * they're coming to take me a away hoho hihi haha diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 8d0eb2643248..f1f88291451e 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -7,7 +7,7 @@ * Copyright (C) 2000 by Silicon Graphics, Inc. * Copyright (C) 2004 by Christoph Hellwig * - * On SGI IP27 the ARC memory configuration data is completly bogus but + * On SGI IP27 the ARC memory configuration data is completely bogus but * alternate easier to use mechanisms are available. */ #include <linux/init.h> diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index d4dd6e58682c..7955e43f3f3f 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -44,20 +44,18 @@ static inline long access_ok(int type, const void __user * addr, #define LDD_USER(ptr) BUILD_BUG() #define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) #define STD_USER(x, ptr) __put_user_asm64(x, ptr) -#define ASM_WORD_INSN ".word\t" #else #define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) #define LDD_USER(ptr) __get_user_asm("ldd", ptr) #define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) #define STD_USER(x, ptr) __put_user_asm("std", x, ptr) -#define ASM_WORD_INSN ".dword\t" #endif /* - * The exception table contains two values: the first is an address - * for an instruction that is allowed to fault, and the second is - * the address to the fixup routine. Even on a 64bit kernel we could - * use a 32bit (unsigned int) address here. + * The exception table contains two values: the first is the relative offset to + * the address of the instruction that is allowed to fault, and the second is + * the relative offset to the address of the fixup routine. Since relative + * addresses are used, 32bit values are sufficient even on 64bit kernel. */ #define ARCH_HAS_RELATIVE_EXTABLE @@ -77,6 +75,7 @@ struct exception_table_entry { */ struct exception_data { unsigned long fault_ip; + unsigned long fault_gp; unsigned long fault_space; unsigned long fault_addr; }; diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index d2f62570a7b1..78d30d2ea2d8 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c @@ -299,6 +299,7 @@ int main(void) #endif BLANK(); DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip)); + DEFINE(EXCDATA_GP, offsetof(struct exception_data, fault_gp)); DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space)); DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr)); BLANK(); diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 91c2a39cd5aa..67001277256c 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page) if (!mapping) return; - pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff = page->index; /* We have carefully arranged in arch_get_unmapped_area() that * *any* mappings of a file are always congruently mapped (whether diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index b9d75d9fa9ac..a0ecdb4abcc8 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -660,6 +660,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, } *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); break; + case R_PARISC_PCREL32: + /* 32-bit PC relative address */ + *loc = val - dot - 8 + addend; + break; default: printk(KERN_ERR "module %s: Unknown relocation: %u\n", @@ -788,6 +792,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, CHECK_RELOC(val, 22); *loc = (*loc & ~0x3ff1ffd) | reassemble_22(val); break; + case R_PARISC_PCREL32: + /* 32-bit PC relative address */ + *loc = val - dot - 8 + addend; + break; case R_PARISC_DIR64: /* 64-bit effective address */ *loc64 = val + addend; diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 568b2c61ea02..3cad8aadc69e 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -47,11 +47,11 @@ EXPORT_SYMBOL(__cmpxchg_u64); EXPORT_SYMBOL(lclear_user); EXPORT_SYMBOL(lstrnlen_user); -/* Global fixups */ -extern void fixup_get_user_skip_1(void); -extern void fixup_get_user_skip_2(void); -extern void fixup_put_user_skip_1(void); -extern void fixup_put_user_skip_2(void); +/* Global fixups - defined as int to avoid creation of function pointers */ +extern int fixup_get_user_skip_1; +extern int fixup_get_user_skip_2; +extern int fixup_put_user_skip_1; +extern int fixup_put_user_skip_2; EXPORT_SYMBOL(fixup_get_user_skip_1); EXPORT_SYMBOL(fixup_get_user_skip_2); EXPORT_SYMBOL(fixup_put_user_skip_1); diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 16e0735e2f46..97d6b208e129 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -795,6 +795,9 @@ void notrace handle_interruption(int code, struct pt_regs *regs) if (fault_space == 0 && !faulthandler_disabled()) { + /* Clean up and return if in exception table. */ + if (fixup_exception(regs)) + return; pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); parisc_terminate("Kernel Fault", regs, code, fault_address); } diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S index 536ef66bb94b..1052b747e011 100644 --- a/arch/parisc/lib/fixup.S +++ b/arch/parisc/lib/fixup.S @@ -26,6 +26,7 @@ #ifdef CONFIG_SMP .macro get_fault_ip t1 t2 + loadgp addil LT%__per_cpu_offset,%r27 LDREG RT%__per_cpu_offset(%r1),\t1 /* t2 = smp_processor_id() */ @@ -40,14 +41,19 @@ LDREG RT%exception_data(%r1),\t1 /* t1 = this_cpu_ptr(&exception_data) */ add,l \t1,\t2,\t1 + /* %r27 = t1->fault_gp - restore gp */ + LDREG EXCDATA_GP(\t1), %r27 /* t1 = t1->fault_ip */ LDREG EXCDATA_IP(\t1), \t1 .endm #else .macro get_fault_ip t1 t2 + loadgp /* t1 = this_cpu_ptr(&exception_data) */ addil LT%exception_data,%r27 LDREG RT%exception_data(%r1),\t2 + /* %r27 = t2->fault_gp - restore gp */ + LDREG EXCDATA_GP(\t2), %r27 /* t1 = t2->fault_ip */ LDREG EXCDATA_IP(\t2), \t1 .endm diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 26fac9c671c9..16dbe81c97c9 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -145,6 +145,7 @@ int fixup_exception(struct pt_regs *regs) struct exception_data *d; d = this_cpu_ptr(&exception_data); d->fault_ip = regs->iaoq[0]; + d->fault_gp = regs->gr[27]; d->fault_space = regs->isr; d->fault_addr = regs->ior; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 3c07d6b96877..6b3e7c6ee096 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -22,7 +22,7 @@ #include <linux/swap.h> #include <linux/unistd.h> #include <linux/nodemask.h> /* for node_online_map */ -#include <linux/pagemap.h> /* for release_pages and page_cache_release */ +#include <linux/pagemap.h> /* for release_pages */ #include <linux/compat.h> #include <asm/pgalloc.h> diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index dfa863876778..6ca5f0525e57 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = SPUFS_MAGIC; sb->s_op = &s_ops; sb->s_fs_info = info; diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 0f3da2cb2bd6..255c7eec4481 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent) sbi->uid = current_uid(); sbi->gid = current_gid(); sb->s_fs_info = sbi; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = HYPFS_MAGIC; sb->s_op = &hypfs_s_ops; if (hypfs_parse_options(data, sb)) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 69247b4dcc43..cace818d86eb 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -23,7 +23,7 @@ /** * gmap_alloc - allocate a guest address space * @mm: pointer to the parent mm_struct - * @limit: maximum size of the gmap address space + * @limit: maximum address of the gmap address space * * Returns a guest address space structure. */ @@ -292,7 +292,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, if ((from | to | len) & (PMD_SIZE - 1)) return -EINVAL; if (len == 0 || from + len < from || to + len < to || - from + len > TASK_MAX_SIZE || to + len > gmap->asce_end) + from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end) return -EINVAL; flush = 0; diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f62a9f37f79f..b7e394485a5f 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -43,7 +43,7 @@ #define KVM_PIO_PAGE_OFFSET 1 #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 -#define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_HALT_POLL_NS_DEFAULT 400000 #define KVM_IRQCHIP_NUM_PINS KVM_IOAPIC_NUM_PINS diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 426e946ed0c0..5b3c9a55f51c 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -167,6 +167,14 @@ #define MSR_PKG_C9_RESIDENCY 0x00000631 #define MSR_PKG_C10_RESIDENCY 0x00000632 +/* Interrupt Response Limit */ +#define MSR_PKGC3_IRTL 0x0000060a +#define MSR_PKGC6_IRTL 0x0000060b +#define MSR_PKGC7_IRTL 0x0000060c +#define MSR_PKGC8_IRTL 0x00000633 +#define MSR_PKGC9_IRTL 0x00000634 +#define MSR_PKGC10_IRTL 0x00000635 + /* Run Time Average Power Limiting (RAPL) Interface */ #define MSR_RAPL_POWER_UNIT 0x00000606 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 2367ae07eb76..319b08a5b6ed 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -146,31 +146,6 @@ int default_check_phys_apicid_present(int phys_apicid) struct boot_params boot_params; -/* - * Machine setup.. - */ -static struct resource data_resource = { - .name = "Kernel data", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM -}; - -static struct resource code_resource = { - .name = "Kernel code", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM -}; - -static struct resource bss_resource = { - .name = "Kernel bss", - .start = 0, - .end = 0, - .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM -}; - - #ifdef CONFIG_X86_32 /* cpu data as detected by the assembly code in head.S */ struct cpuinfo_x86 new_cpu_data = { @@ -949,13 +924,6 @@ void __init setup_arch(char **cmdline_p) mpx_mm_init(&init_mm); - code_resource.start = __pa_symbol(_text); - code_resource.end = __pa_symbol(_etext)-1; - data_resource.start = __pa_symbol(_etext); - data_resource.end = __pa_symbol(_edata)-1; - bss_resource.start = __pa_symbol(__bss_start); - bss_resource.end = __pa_symbol(__bss_stop)-1; - #ifdef CONFIG_CMDLINE_BOOL #ifdef CONFIG_CMDLINE_OVERRIDE strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); @@ -1019,11 +987,6 @@ void __init setup_arch(char **cmdline_p) x86_init.resources.probe_roms(); - /* after parse_early_param, so could debug it */ - insert_resource(&iomem_resource, &code_resource); - insert_resource(&iomem_resource, &data_resource); - insert_resource(&iomem_resource, &bss_resource); - e820_add_kernel_range(); trim_bios_range(); #ifdef CONFIG_X86_32 diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 5ff3485acb60..01bd7b7a6866 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1116,6 +1116,11 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) break; case HVCALL_POST_MESSAGE: case HVCALL_SIGNAL_EVENT: + /* don't bother userspace if it has no way to handle it */ + if (!vcpu_to_synic(vcpu)->active) { + res = HV_STATUS_INVALID_HYPERCALL_CODE; + break; + } vcpu->run->exit_reason = KVM_EXIT_HYPERV; vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; vcpu->run->hyperv.u.hcall.input = param; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 443d2a57ad3d..1a2da0e5a373 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1369,7 +1369,7 @@ static void start_apic_timer(struct kvm_lapic *apic) hrtimer_start(&apic->lapic_timer.timer, ktime_add_ns(now, apic->lapic_timer.period), - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_PINNED); apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" PRIx64 ", " @@ -1402,7 +1402,7 @@ static void start_apic_timer(struct kvm_lapic *apic) expire = ktime_add_ns(now, ns); expire = ktime_sub_ns(expire, lapic_timer_advance_ns); hrtimer_start(&apic->lapic_timer.timer, - expire, HRTIMER_MODE_ABS); + expire, HRTIMER_MODE_ABS_PINNED); } else apic_timer_expired(apic); @@ -1868,7 +1868,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); + HRTIMER_MODE_ABS_PINNED); apic->lapic_timer.timer.function = apic_timer_fn; /* @@ -2003,7 +2003,7 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) timer = &vcpu->arch.apic->lapic_timer.timer; if (hrtimer_cancel(timer)) - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED); } /* diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 70e95d097ef1..1ff4dbb73fb7 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -557,8 +557,15 @@ static bool mmu_spte_update(u64 *sptep, u64 new_spte) !is_writable_pte(new_spte)) ret = true; - if (!shadow_accessed_mask) + if (!shadow_accessed_mask) { + /* + * We don't set page dirty when dropping non-writable spte. + * So do it now if the new spte is becoming non-writable. + */ + if (ret) + kvm_set_pfn_dirty(spte_to_pfn(old_spte)); return ret; + } /* * Flush TLB when accessed/dirty bits are changed in the page tables, @@ -605,7 +612,8 @@ static int mmu_spte_clear_track_bits(u64 *sptep) if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); - if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask)) + if (old_spte & (shadow_dirty_mask ? shadow_dirty_mask : + PT_WRITABLE_MASK)) kvm_set_pfn_dirty(pfn); return 1; } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 742d0f7d3556..0a2c70e43bc8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6095,12 +6095,10 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) } /* try to inject new event if pending */ - if (vcpu->arch.nmi_pending) { - if (kvm_x86_ops->nmi_allowed(vcpu)) { - --vcpu->arch.nmi_pending; - vcpu->arch.nmi_injected = true; - kvm_x86_ops->set_nmi(vcpu); - } + if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { + --vcpu->arch.nmi_pending; + vcpu->arch.nmi_injected = true; + kvm_x86_ops->set_nmi(vcpu); } else if (kvm_cpu_has_injectable_intr(vcpu)) { /* * Because interrupts can be injected asynchronously, we are @@ -6569,10 +6567,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) if (inject_pending_event(vcpu, req_int_win) != 0) req_immediate_exit = true; /* enable NMI/IRQ window open exits if needed */ - else if (vcpu->arch.nmi_pending) - kvm_x86_ops->enable_nmi_window(vcpu); - else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) - kvm_x86_ops->enable_irq_window(vcpu); + else { + if (vcpu->arch.nmi_pending) + kvm_x86_ops->enable_nmi_window(vcpu); + if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) + kvm_x86_ops->enable_irq_window(vcpu); + } if (kvm_lapic_enabled(vcpu)) { update_cr8_intercept(vcpu); diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index abf4901c917b..db52a7fafcc2 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c @@ -66,7 +66,7 @@ static u32 xen_apic_read(u32 reg) ret = HYPERVISOR_platform_op(&op); if (ret) - return 0; + op.u.pcpu_info.apic_id = BAD_APICID; return op.u.pcpu_info.apic_id << 24; } @@ -142,6 +142,14 @@ static void xen_silent_inquire(int apicid) { } +static int xen_cpu_present_to_apicid(int cpu) +{ + if (cpu_present(cpu)) + return xen_get_apic_id(xen_apic_read(APIC_ID)); + else + return BAD_APICID; +} + static struct apic xen_pv_apic = { .name = "Xen PV", .probe = xen_apic_probe_pv, @@ -162,7 +170,7 @@ static struct apic xen_pv_apic = { .ioapic_phys_id_map = default_ioapic_phys_id_map, /* Used on 32-bit */ .setup_apic_routing = NULL, - .cpu_present_to_apicid = default_cpu_present_to_apicid, + .cpu_present_to_apicid = xen_cpu_present_to_apicid, .apicid_to_cpu_present = physid_set_mask_of_physid, /* Used on 32-bit */ .check_phys_apicid_present = default_check_phys_apicid_present, /* smp_sanity_check needs it */ .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */ diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 3c6d17fd423a..719cf291dcdf 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -545,6 +545,8 @@ static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ * data back is to call: */ tick_nohz_idle_enter(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); } #else /* !CONFIG_HOTPLUG_CPU */ diff --git a/block/bio.c b/block/bio.c index f124a0a624fc..807d25e466ec 100644 --- a/block/bio.c +++ b/block/bio.c @@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, * release the pages we didn't map into the bio, if any */ while (j < page_limit) - page_cache_release(pages[j++]); + put_page(pages[j++]); } kfree(pages); @@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, for (j = 0; j < nr_pages; j++) { if (!pages[j]) break; - page_cache_release(pages[j]); + put_page(pages[j]); } out: kfree(pages); @@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio) if (bio_data_dir(bio) == READ) set_page_dirty_lock(bvec->bv_page); - page_cache_release(bvec->bv_page); + put_page(bvec->bv_page); } bio_put(bio); @@ -1615,8 +1615,8 @@ static void bio_release_pages(struct bio *bio) * the BIO and the offending pages and re-dirty the pages in process context. * * It is expected that bio_check_pages_dirty() will wholly own the BIO from - * here on. It will run one page_cache_release() against each page and will - * run one bio_put() against the BIO. + * here on. It will run one put_page() against each page and will run one + * bio_put() against the BIO. */ static void bio_dirty_fn(struct work_struct *work); @@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio) struct page *page = bvec->bv_page; if (PageDirty(page) || PageCompound(page)) { - page_cache_release(page); + put_page(page); bvec->bv_page = NULL; } else { nr_clean_pages++; diff --git a/block/blk-core.c b/block/blk-core.c index 827f8badd143..b60537b2c35b 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) goto fail_id; q->backing_dev_info.ra_pages = - (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK; q->backing_dev_info.name = "block"; q->node = node_id; diff --git a/block/blk-settings.c b/block/blk-settings.c index c7bb666aafd1..331e4eee0dda 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto struct queue_limits *limits = &q->limits; unsigned int max_sectors; - if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { - max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); + if ((max_hw_sectors << 9) < PAGE_SIZE) { + max_hw_sectors = 1 << (PAGE_SHIFT - 9); printk(KERN_INFO "%s: set to minimum %d\n", __func__, max_hw_sectors); } @@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments); **/ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) { - if (max_size < PAGE_CACHE_SIZE) { - max_size = PAGE_CACHE_SIZE; + if (max_size < PAGE_SIZE) { + max_size = PAGE_SIZE; printk(KERN_INFO "%s: set to minimum %d\n", __func__, max_size); } @@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain); **/ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) { - if (mask < PAGE_CACHE_SIZE - 1) { - mask = PAGE_CACHE_SIZE - 1; + if (mask < PAGE_SIZE - 1) { + mask = PAGE_SIZE - 1; printk(KERN_INFO "%s: set to minimum %lx\n", __func__, mask); } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index dd93763057ce..995b58d46ed1 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) static ssize_t queue_ra_show(struct request_queue *q, char *page) { unsigned long ra_kb = q->backing_dev_info.ra_pages << - (PAGE_CACHE_SHIFT - 10); + (PAGE_SHIFT - 10); return queue_var_show(ra_kb, (page)); } @@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) if (ret < 0) return ret; - q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); + q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10); return ret; } @@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) if (blk_queue_cluster(q)) return queue_var_show(queue_max_segment_size(q), (page)); - return queue_var_show(PAGE_CACHE_SIZE, (page)); + return queue_var_show(PAGE_SIZE, (page)); } static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) @@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) { unsigned long max_sectors_kb, max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, - page_kb = 1 << (PAGE_CACHE_SHIFT - 10); + page_kb = 1 << (PAGE_SHIFT - 10); ssize_t ret = queue_var_store(&max_sectors_kb, page, count); if (ret < 0) diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e3c591dd8f19..4a349787bc62 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c @@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, * idle timer unplug to continue working. */ if (cfq_cfqq_wait_request(cfqq)) { - if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || + if (blk_rq_bytes(rq) > PAGE_SIZE || cfqd->busy_queues > 1) { cfq_del_timer(cfqd, cfqq); cfq_clear_cfqq_wait_request(cfqq); diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index f678c733df40..556826ac7cb4 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c @@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) return -EINVAL; bdi = blk_get_backing_dev_info(bdev); return compat_put_long(arg, - (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); + (bdi->ra_pages * PAGE_SIZE) / 512); case BLKROGET: /* compatible */ return compat_put_int(arg, bdev_read_only(bdev) != 0); case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ @@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) if (!capable(CAP_SYS_ADMIN)) return -EACCES; bdi = blk_get_backing_dev_info(bdev); - bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; + bdi->ra_pages = (arg * 512) / PAGE_SIZE; return 0; case BLKGETSIZE: size = i_size_read(bdev->bd_inode); diff --git a/block/ioctl.c b/block/ioctl.c index d8996bbd7f12..4ff1f92f89ca 100644 --- a/block/ioctl.c +++ b/block/ioctl.c @@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if (!arg) return -EINVAL; bdi = blk_get_backing_dev_info(bdev); - return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); + return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512); case BLKROGET: return put_int(arg, bdev_read_only(bdev) != 0); case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ @@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, if(!capable(CAP_SYS_ADMIN)) return -EACCES; bdi = blk_get_backing_dev_info(bdev); - bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; + bdi->ra_pages = (arg * 512) / PAGE_SIZE; return 0; case BLKBSZSET: return blkdev_bszset(bdev, mode, argp); diff --git a/block/partition-generic.c b/block/partition-generic.c index 5d8701941054..2c6ae2aed2c4 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c @@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n) { struct address_space *mapping = bdev->bd_inode->i_mapping; - return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), - NULL); + return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)), + NULL); } unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) @@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) if (PageError(page)) goto fail; p->v = page; - return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9); + return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9); fail: - page_cache_release(page); + put_page(page); } p->v = NULL; return NULL; diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index a1e0b9ab847a..5fb7718f256c 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -246,6 +246,8 @@ static int device_wakeup_attach(struct device *dev, struct wakeup_source *ws) return -EEXIST; } dev->power.wakeup = ws; + if (dev->power.wakeirq) + device_wakeup_attach_irq(dev, dev->power.wakeirq); spin_unlock_irq(&dev->power.lock); return 0; } diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index dd73e1ff1759..ec9d8610b25f 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->flags & DEVFL_UP); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); q->backing_dev_info.name = "aoe"; - q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; + q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE; d->bufpool = mp; d->blkq = gd->queue = q; q->queuedata = d; diff --git a/drivers/block/brd.c b/drivers/block/brd.c index f7ecc287d733..51a071e32221 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector, struct page *page, int rw) { struct brd_device *brd = bdev->bd_disk->private_data; - int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector); + int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector); page_endio(page, rw & WRITE, err); return err; } diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index c227fd4cad75..7a1cf7eaa71d 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1327,8 +1327,8 @@ struct bm_extent { #endif #endif -/* BIO_MAX_SIZE is 256 * PAGE_CACHE_SIZE, - * so for typical PAGE_CACHE_SIZE of 4k, that is (1<<20) Byte. +/* BIO_MAX_SIZE is 256 * PAGE_SIZE, + * so for typical PAGE_SIZE of 4k, that is (1<<20) Byte. * Since we may live in a mixed-platform cluster, * we limit us to a platform agnostic constant here for now. * A followup commit may allow even bigger BIO sizes, diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 226eb0c9f0fb..1fd1dccebb6b 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi blk_queue_max_hw_sectors(q, max_hw_sectors); /* This is the workaround for "bio would need to, but cannot, be split" */ blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); - blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1); + blk_queue_segment_boundary(q, PAGE_SIZE-1); if (b) { struct drbd_connection *connection = first_peer_device(device)->connection; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 9c6234428607..94a1843b0426 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1953,7 +1953,7 @@ static struct ceph_osd_request *rbd_osd_req_create( osdc = &rbd_dev->rbd_client->client->osdc; osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, - GFP_ATOMIC); + GFP_NOIO); if (!osd_req) return NULL; /* ENOMEM */ @@ -2002,7 +2002,7 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) rbd_dev = img_request->rbd_dev; osdc = &rbd_dev->rbd_client->client->osdc; osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops, - false, GFP_ATOMIC); + false, GFP_NOIO); if (!osd_req) return NULL; /* ENOMEM */ @@ -2504,7 +2504,7 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, bio_chain_clone_range(&bio_list, &bio_offset, clone_size, - GFP_ATOMIC); + GFP_NOIO); if (!obj_request->bio_list) goto out_unwind; } else if (type == OBJ_REQUEST_PAGES) { diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index f951f911786e..5f8dbe640a20 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -4,9 +4,6 @@ * Copyright (C) 2014 Linaro. * Viresh Kumar <viresh.kumar@linaro.org> * - * The OPP code in function set_target() is reused from - * drivers/cpufreq/omap-cpufreq.c - * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 4b644526fd59..8b5a415ee14a 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -64,6 +64,25 @@ static inline int ceiling_fp(int32_t x) return ret; } +/** + * struct sample - Store performance sample + * @core_pct_busy: Ratio of APERF/MPERF in percent, which is actual + * performance during last sample period + * @busy_scaled: Scaled busy value which is used to calculate next + * P state. This can be different than core_pct_busy + * to account for cpu idle period + * @aperf: Difference of actual performance frequency clock count + * read from APERF MSR between last and current sample + * @mperf: Difference of maximum performance frequency clock count + * read from MPERF MSR between last and current sample + * @tsc: Difference of time stamp counter between last and + * current sample + * @freq: Effective frequency calculated from APERF/MPERF + * @time: Current time from scheduler + * + * This structure is used in the cpudata structure to store performance sample + * data for choosing next P State. + */ struct sample { int32_t core_pct_busy; int32_t busy_scaled; @@ -74,6 +93,20 @@ struct sample { u64 time; }; +/** + * struct pstate_data - Store P state data + * @current_pstate: Current requested P state + * @min_pstate: Min P state possible for this platform + * @max_pstate: Max P state possible for this platform + * @max_pstate_physical:This is physical Max P state for a processor + * This can be higher than the max_pstate which can + * be limited by platform thermal design power limits + * @scaling: Scaling factor to convert frequency to cpufreq + * frequency units + * @turbo_pstate: Max Turbo P state possible for this platform + * + * Stores the per cpu model P state limits and current P state. + */ struct pstate_data { int current_pstate; int min_pstate; @@ -83,6 +116,19 @@ struct pstate_data { int turbo_pstate; }; +/** + * struct vid_data - Stores voltage information data + * @min: VID data for this platform corresponding to + * the lowest P state + * @max: VID data corresponding to the highest P State. + * @turbo: VID data for turbo P state + * @ratio: Ratio of (vid max - vid min) / + * (max P state - Min P State) + * + * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling) + * This data is used in Atom platforms, where in addition to target P state, + * the voltage data needs to be specified to select next P State. + */ struct vid_data { int min; int max; @@ -90,6 +136,18 @@ struct vid_data { int32_t ratio; }; +/** + * struct _pid - Stores PID data + * @setpoint: Target set point for busyness or performance + * @integral: Storage for accumulated error values + * @p_gain: PID proportional gain + * @i_gain: PID integral gain + * @d_gain: PID derivative gain + * @deadband: PID deadband + * @last_err: Last error storage for integral part of PID calculation + * + * Stores PID coefficients and last error for PID controller. + */ struct _pid { int setpoint; int32_t integral; @@ -100,6 +158,23 @@ struct _pid { int32_t last_err; }; +/** + * struct cpudata - Per CPU instance data storage + * @cpu: CPU number for this instance data + * @update_util: CPUFreq utility callback information + * @pstate: Stores P state limits for this CPU + * @vid: Stores VID limits for this CPU + * @pid: Stores PID parameters for this CPU + * @last_sample_time: Last Sample time + * @prev_aperf: Last APERF value read from APERF MSR + * @prev_mperf: Last MPERF value read from MPERF MSR + * @prev_tsc: Last timestamp counter (TSC) value + * @prev_cummulative_iowait: IO Wait time difference from last and + * current sample + * @sample: Storage for storing last Sample data + * + * This structure stores per CPU instance data for all CPUs. + */ struct cpudata { int cpu; @@ -118,6 +193,19 @@ struct cpudata { }; static struct cpudata **all_cpu_data; + +/** + * struct pid_adjust_policy - Stores static PID configuration data + * @sample_rate_ms: PID calculation sample rate in ms + * @sample_rate_ns: Sample rate calculation in ns + * @deadband: PID deadband + * @setpoint: PID Setpoint + * @p_gain_pct: PID proportional gain + * @i_gain_pct: PID integral gain + * @d_gain_pct: PID derivative gain + * + * Stores per CPU model static PID configuration data. + */ struct pstate_adjust_policy { int sample_rate_ms; s64 sample_rate_ns; @@ -128,6 +216,20 @@ struct pstate_adjust_policy { int i_gain_pct; }; +/** + * struct pstate_funcs - Per CPU model specific callbacks + * @get_max: Callback to get maximum non turbo effective P state + * @get_max_physical: Callback to get maximum non turbo physical P state + * @get_min: Callback to get minimum P state + * @get_turbo: Callback to get turbo P state + * @get_scaling: Callback to get frequency scaling factor + * @get_val: Callback to convert P state to actual MSR write value + * @get_vid: Callback to get VID data for Atom platforms + * @get_target_pstate: Callback to a function to calculate next P state to use + * + * Core and Atom CPU models have different way to get P State limits. This + * structure is used to store those callbacks. + */ struct pstate_funcs { int (*get_max)(void); int (*get_max_physical)(void); @@ -139,6 +241,11 @@ struct pstate_funcs { int32_t (*get_target_pstate)(struct cpudata *); }; +/** + * struct cpu_defaults- Per CPU model default config data + * @pid_policy: PID config data + * @funcs: Callback function data + */ struct cpu_defaults { struct pstate_adjust_policy pid_policy; struct pstate_funcs funcs; @@ -151,6 +258,34 @@ static struct pstate_adjust_policy pid_params; static struct pstate_funcs pstate_funcs; static int hwp_active; + +/** + * struct perf_limits - Store user and policy limits + * @no_turbo: User requested turbo state from intel_pstate sysfs + * @turbo_disabled: Platform turbo status either from msr + * MSR_IA32_MISC_ENABLE or when maximum available pstate + * matches the maximum turbo pstate + * @max_perf_pct: Effective maximum performance limit in percentage, this + * is minimum of either limits enforced by cpufreq policy + * or limits from user set limits via intel_pstate sysfs + * @min_perf_pct: Effective minimum performance limit in percentage, this + * is maximum of either limits enforced by cpufreq policy + * or limits from user set limits via intel_pstate sysfs + * @max_perf: This is a scaled value between 0 to 255 for max_perf_pct + * This value is used to limit max pstate + * @min_perf: This is a scaled value between 0 to 255 for min_perf_pct + * This value is used to limit min pstate + * @max_policy_pct: The maximum performance in percentage enforced by + * cpufreq setpolicy interface + * @max_sysfs_pct: The maximum performance in percentage enforced by + * intel pstate sysfs interface + * @min_policy_pct: The minimum performance in percentage enforced by + * cpufreq setpolicy interface + * @min_sysfs_pct: The minimum performance in percentage enforced by + * intel pstate sysfs interface + * + * Storage for user and policy defined limits. + */ struct perf_limits { int no_turbo; int turbo_disabled; @@ -910,7 +1045,14 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time) cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; cpu->prev_tsc = tsc; - return true; + /* + * First time this function is invoked in a given cycle, all of the + * previous sample data fields are equal to zero or stale and they must + * be populated with meaningful numbers for things to work, so assume + * that sample.time will always be reset before setting the utilization + * update hook and make the caller skip the sample then. + */ + return !!cpu->last_sample_time; } static inline int32_t get_avg_frequency(struct cpudata *cpu) @@ -984,8 +1126,7 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) * enough period of time to adjust our busyness. */ duration_ns = cpu->sample.time - cpu->last_sample_time; - if ((s64)duration_ns > pid_params.sample_rate_ns * 3 - && cpu->last_sample_time > 0) { + if ((s64)duration_ns > pid_params.sample_rate_ns * 3) { sample_ratio = div_fp(int_tofp(pid_params.sample_rate_ns), int_tofp(duration_ns)); core_busy = mul_fp(core_busy, sample_ratio); @@ -1100,10 +1241,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum) intel_pstate_get_cpu_pstates(cpu); intel_pstate_busy_pid_reset(cpu); - intel_pstate_sample(cpu, 0); cpu->update_util.func = intel_pstate_update_util; - cpufreq_set_update_util_data(cpunum, &cpu->update_util); pr_debug("intel_pstate: controlling: cpu %d\n", cpunum); @@ -1122,22 +1261,54 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) return get_avg_frequency(cpu); } +static void intel_pstate_set_update_util_hook(unsigned int cpu_num) +{ + struct cpudata *cpu = all_cpu_data[cpu_num]; + + /* Prevent intel_pstate_update_util() from using stale data. */ + cpu->sample.time = 0; + cpufreq_set_update_util_data(cpu_num, &cpu->update_util); +} + +static void intel_pstate_clear_update_util_hook(unsigned int cpu) +{ + cpufreq_set_update_util_data(cpu, NULL); + synchronize_sched(); +} + +static void intel_pstate_set_performance_limits(struct perf_limits *limits) +{ + limits->no_turbo = 0; + limits->turbo_disabled = 0; + limits->max_perf_pct = 100; + limits->max_perf = int_tofp(1); + limits->min_perf_pct = 100; + limits->min_perf = int_tofp(1); + limits->max_policy_pct = 100; + limits->max_sysfs_pct = 100; + limits->min_policy_pct = 0; + limits->min_sysfs_pct = 0; +} + static int intel_pstate_set_policy(struct cpufreq_policy *policy) { if (!policy->cpuinfo.max_freq) return -ENODEV; - if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && - policy->max >= policy->cpuinfo.max_freq) { - pr_debug("intel_pstate: set performance\n"); + intel_pstate_clear_update_util_hook(policy->cpu); + + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { limits = &performance_limits; - if (hwp_active) - intel_pstate_hwp_set(policy->cpus); - return 0; + if (policy->max >= policy->cpuinfo.max_freq) { + pr_debug("intel_pstate: set performance\n"); + intel_pstate_set_performance_limits(limits); + goto out; + } + } else { + pr_debug("intel_pstate: set powersave\n"); + limits = &powersave_limits; } - pr_debug("intel_pstate: set powersave\n"); - limits = &powersave_limits; limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100); limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, @@ -1163,6 +1334,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits->max_perf = div_fp(int_tofp(limits->max_perf_pct), int_tofp(100)); + out: + intel_pstate_set_update_util_hook(policy->cpu); + if (hwp_active) intel_pstate_hwp_set(policy->cpus); @@ -1187,8 +1361,7 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) pr_debug("intel_pstate: CPU %d exiting\n", cpu_num); - cpufreq_set_update_util_data(cpu_num, NULL); - synchronize_sched(); + intel_pstate_clear_update_util_hook(cpu_num); if (hwp_active) return; @@ -1455,8 +1628,7 @@ out: get_online_cpus(); for_each_online_cpu(cpu) { if (all_cpu_data[cpu]) { - cpufreq_set_update_util_data(cpu, NULL); - synchronize_sched(); + intel_pstate_clear_update_util_hook(cpu); kfree(all_cpu_data[cpu]); } } diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index fedbff55a7f3..815c4a5cae54 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -77,12 +77,28 @@ static inline u16 fw_cfg_sel_endianness(u16 key) static inline void fw_cfg_read_blob(u16 key, void *buf, loff_t pos, size_t count) { + u32 glk; + acpi_status status; + + /* If we have ACPI, ensure mutual exclusion against any potential + * device access by the firmware, e.g. via AML methods: + */ + status = acpi_acquire_global_lock(ACPI_WAIT_FOREVER, &glk); + if (ACPI_FAILURE(status) && status != AE_NOT_CONFIGURED) { + /* Should never get here */ + WARN(1, "fw_cfg_read_blob: Failed to lock ACPI!\n"); + memset(buf, 0, count); + return; + } + mutex_lock(&fw_cfg_dev_lock); iowrite16(fw_cfg_sel_endianness(key), fw_cfg_reg_ctrl); while (pos-- > 0) ioread8(fw_cfg_reg_data); ioread8_rep(fw_cfg_reg_data, buf, count); mutex_unlock(&fw_cfg_dev_lock); + + acpi_release_global_lock(glk); } /* clean up fw_cfg device i/o */ @@ -727,12 +743,18 @@ device_param_cb(mmio, &fw_cfg_cmdline_param_ops, NULL, S_IRUSR); static int __init fw_cfg_sysfs_init(void) { + int ret; + /* create /sys/firmware/qemu_fw_cfg/ top level directory */ fw_cfg_top_ko = kobject_create_and_add("qemu_fw_cfg", firmware_kobj); if (!fw_cfg_top_ko) return -ENOMEM; - return platform_driver_register(&fw_cfg_sysfs_driver); + ret = platform_driver_register(&fw_cfg_sysfs_driver); + if (ret) + fw_cfg_kobj_cleanup(fw_cfg_top_ko); + + return ret; } static void __exit fw_cfg_sysfs_exit(void) diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index d0d3065a7557..e66084c295fb 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -18,6 +18,7 @@ #include <linux/i2c.h> #include <linux/platform_data/pca953x.h> #include <linux/slab.h> +#include <asm/unaligned.h> #include <linux/of_platform.h> #include <linux/acpi.h> @@ -159,7 +160,7 @@ static int pca953x_write_regs(struct pca953x_chip *chip, int reg, u8 *val) switch (chip->chip_type) { case PCA953X_TYPE: ret = i2c_smbus_write_word_data(chip->client, - reg << 1, (u16) *val); + reg << 1, cpu_to_le16(get_unaligned((u16 *)val))); break; case PCA957X_TYPE: ret = i2c_smbus_write_byte_data(chip->client, reg << 1, diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index b2b7b78664b8..76ac906b4d78 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -283,8 +283,8 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip, writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); ret = pinctrl_gpio_direction_output(chip->base + offset); - if (!ret) - return 0; + if (ret) + return ret; spin_lock_irqsave(&gpio_lock, flags); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 72065532c1c7..b747c76fd2b1 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -68,6 +68,7 @@ LIST_HEAD(gpio_devices); static void gpiochip_free_hogs(struct gpio_chip *chip); static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); +static bool gpiolib_initialized; static inline void desc_set_label(struct gpio_desc *d, const char *label) { @@ -440,9 +441,63 @@ static void gpiodevice_release(struct device *dev) cdev_del(&gdev->chrdev); list_del(&gdev->list); ida_simple_remove(&gpio_ida, gdev->id); + kfree(gdev->label); + kfree(gdev->descs); kfree(gdev); } +static int gpiochip_setup_dev(struct gpio_device *gdev) +{ + int status; + + cdev_init(&gdev->chrdev, &gpio_fileops); + gdev->chrdev.owner = THIS_MODULE; + gdev->chrdev.kobj.parent = &gdev->dev.kobj; + gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id); + status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1); + if (status < 0) + chip_warn(gdev->chip, "failed to add char device %d:%d\n", + MAJOR(gpio_devt), gdev->id); + else + chip_dbg(gdev->chip, "added GPIO chardev (%d:%d)\n", + MAJOR(gpio_devt), gdev->id); + status = device_add(&gdev->dev); + if (status) + goto err_remove_chardev; + + status = gpiochip_sysfs_register(gdev); + if (status) + goto err_remove_device; + + /* From this point, the .release() function cleans up gpio_device */ + gdev->dev.release = gpiodevice_release; + get_device(&gdev->dev); + pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", + __func__, gdev->base, gdev->base + gdev->ngpio - 1, + dev_name(&gdev->dev), gdev->chip->label ? : "generic"); + + return 0; + +err_remove_device: + device_del(&gdev->dev); +err_remove_chardev: + cdev_del(&gdev->chrdev); + return status; +} + +static void gpiochip_setup_devs(void) +{ + struct gpio_device *gdev; + int err; + + list_for_each_entry(gdev, &gpio_devices, list) { + err = gpiochip_setup_dev(gdev); + if (err) + pr_err("%s: Failed to initialize gpio device (%d)\n", + dev_name(&gdev->dev), err); + } +} + /** * gpiochip_add_data() - register a gpio_chip * @chip: the chip to register, with chip->base initialized @@ -457,6 +512,9 @@ static void gpiodevice_release(struct device *dev) * the gpio framework's arch_initcall(). Otherwise sysfs initialization * for GPIOs will fail rudely. * + * gpiochip_add_data() must only be called after gpiolib initialization, + * ie after core_initcall(). + * * If chip->base is negative, this requests dynamic assignment of * a range of valid GPIOs. */ @@ -504,8 +562,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) else gdev->owner = THIS_MODULE; - gdev->descs = devm_kcalloc(&gdev->dev, chip->ngpio, - sizeof(gdev->descs[0]), GFP_KERNEL); + gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); if (!gdev->descs) { status = -ENOMEM; goto err_free_gdev; @@ -514,16 +571,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) if (chip->ngpio == 0) { chip_err(chip, "tried to insert a GPIO chip with zero lines\n"); status = -EINVAL; - goto err_free_gdev; + goto err_free_descs; } if (chip->label) - gdev->label = devm_kstrdup(&gdev->dev, chip->label, GFP_KERNEL); + gdev->label = kstrdup(chip->label, GFP_KERNEL); else - gdev->label = devm_kstrdup(&gdev->dev, "unknown", GFP_KERNEL); + gdev->label = kstrdup("unknown", GFP_KERNEL); if (!gdev->label) { status = -ENOMEM; - goto err_free_gdev; + goto err_free_descs; } gdev->ngpio = chip->ngpio; @@ -543,7 +600,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) if (base < 0) { status = base; spin_unlock_irqrestore(&gpio_lock, flags); - goto err_free_gdev; + goto err_free_label; } /* * TODO: it should not be necessary to reflect the assigned @@ -558,7 +615,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) status = gpiodev_add_to_list(gdev); if (status) { spin_unlock_irqrestore(&gpio_lock, flags); - goto err_free_gdev; + goto err_free_label; } for (i = 0; i < chip->ngpio; i++) { @@ -596,39 +653,16 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) * we get a device node entry in sysfs under * /sys/bus/gpio/devices/gpiochipN/dev that can be used for * coldplug of device nodes and other udev business. + * We can do this only if gpiolib has been initialized. + * Otherwise, defer until later. */ - cdev_init(&gdev->chrdev, &gpio_fileops); - gdev->chrdev.owner = THIS_MODULE; - gdev->chrdev.kobj.parent = &gdev->dev.kobj; - gdev->dev.devt = MKDEV(MAJOR(gpio_devt), gdev->id); - status = cdev_add(&gdev->chrdev, gdev->dev.devt, 1); - if (status < 0) - chip_warn(chip, "failed to add char device %d:%d\n", - MAJOR(gpio_devt), gdev->id); - else - chip_dbg(chip, "added GPIO chardev (%d:%d)\n", - MAJOR(gpio_devt), gdev->id); - status = device_add(&gdev->dev); - if (status) - goto err_remove_chardev; - - status = gpiochip_sysfs_register(gdev); - if (status) - goto err_remove_device; - - /* From this point, the .release() function cleans up gpio_device */ - gdev->dev.release = gpiodevice_release; - get_device(&gdev->dev); - pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", - __func__, gdev->base, gdev->base + gdev->ngpio - 1, - dev_name(&gdev->dev), chip->label ? : "generic"); - + if (gpiolib_initialized) { + status = gpiochip_setup_dev(gdev); + if (status) + goto err_remove_chip; + } return 0; -err_remove_device: - device_del(&gdev->dev); -err_remove_chardev: - cdev_del(&gdev->chrdev); err_remove_chip: acpi_gpiochip_remove(chip); gpiochip_free_hogs(chip); @@ -637,6 +671,10 @@ err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); spin_unlock_irqrestore(&gpio_lock, flags); +err_free_label: + kfree(gdev->label); +err_free_descs: + kfree(gdev->descs); err_free_gdev: ida_simple_remove(&gpio_ida, gdev->id); /* failures here can mean systems won't boot... */ @@ -2231,9 +2269,11 @@ static struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id, return desc; } -static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, +static struct gpio_desc *acpi_find_gpio(struct device *dev, + const char *con_id, unsigned int idx, - enum gpio_lookup_flags *flags) + enum gpiod_flags flags, + enum gpio_lookup_flags *lookupflags) { struct acpi_device *adev = ACPI_COMPANION(dev); struct acpi_gpio_info info; @@ -2264,10 +2304,16 @@ static struct gpio_desc *acpi_find_gpio(struct device *dev, const char *con_id, desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); if (IS_ERR(desc)) return desc; + + if ((flags == GPIOD_OUT_LOW || flags == GPIOD_OUT_HIGH) && + info.gpioint) { + dev_dbg(dev, "refusing GpioInt() entry when doing GPIOD_OUT_* lookup\n"); + return ERR_PTR(-ENOENT); + } } if (info.polarity == GPIO_ACTIVE_LOW) - *flags |= GPIO_ACTIVE_LOW; + *lookupflags |= GPIO_ACTIVE_LOW; return desc; } @@ -2530,7 +2576,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev, desc = of_find_gpio(dev, con_id, idx, &lookupflags); } else if (ACPI_COMPANION(dev)) { dev_dbg(dev, "using ACPI for GPIO lookup\n"); - desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); + desc = acpi_find_gpio(dev, con_id, idx, flags, &lookupflags); } } @@ -2829,6 +2875,9 @@ static int __init gpiolib_dev_init(void) if (ret < 0) { pr_err("gpiolib: failed to allocate char dev region\n"); bus_unregister(&gpio_bus_type); + } else { + gpiolib_initialized = true; + gpiochip_setup_devs(); } return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c4a21c6428f5..62a778012fe0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1591,6 +1591,7 @@ struct amdgpu_uvd { struct amdgpu_bo *vcpu_bo; void *cpu_addr; uint64_t gpu_addr; + void *saved_bo; atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; struct delayed_work idle_work; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 7a4b101e10c6..6043dc7c3a94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -816,10 +816,13 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, struct drm_device *ddev = adev->ddev; struct drm_crtc *crtc; uint32_t line_time_us, vblank_lines; + struct cgs_mode_info *mode_info; if (info == NULL) return -EINVAL; + mode_info = info->mode_info; + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { @@ -828,7 +831,7 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, info->active_display_mask |= (1 << amdgpu_crtc->crtc_id); info->display_count++; } - if (info->mode_info != NULL && + if (mode_info != NULL && crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) / @@ -836,10 +839,10 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end - amdgpu_crtc->hw_mode.crtc_vdisplay + (amdgpu_crtc->v_border * 2); - info->mode_info->vblank_time_us = vblank_lines * line_time_us; - info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - info->mode_info->ref_clock = adev->clock.spll.reference_freq; - info->mode_info++; + mode_info->vblank_time_us = vblank_lines * line_time_us; + mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); + mode_info->ref_clock = adev->clock.spll.reference_freq; + mode_info = NULL; } } } @@ -847,6 +850,16 @@ static int amdgpu_cgs_get_active_displays_info(void *cgs_device, return 0; } + +static int amdgpu_cgs_notify_dpm_enabled(void *cgs_device, bool enabled) +{ + CGS_FUNC_ADEV; + + adev->pm.dpm_enabled = enabled; + + return 0; +} + /** \brief evaluate acpi namespace object, handle or pathname must be valid * \param cgs_device * \param info input/output arguments for the control method @@ -1097,6 +1110,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_set_powergating_state, amdgpu_cgs_set_clockgating_state, amdgpu_cgs_get_active_displays_info, + amdgpu_cgs_notify_dpm_enabled, amdgpu_cgs_call_acpi_method, amdgpu_cgs_query_system_info, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f0ed974bd4e0..3fb405b3a614 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -57,7 +57,7 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work, if (!fence_add_callback(fence, &work->cb, amdgpu_flip_callback)) return true; - fence_put(*f); + fence_put(fence); return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f1e17d60055a..93462aea9faa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -556,12 +556,10 @@ static struct pci_driver amdgpu_kms_pci_driver = { static int __init amdgpu_init(void) { amdgpu_sync_init(); -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force()) { DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); return -EINVAL; } -#endif DRM_INFO("amdgpu kernel modesetting enabled.\n"); driver = &kms_driver; pdriver = &amdgpu_kms_pci_driver; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 4303b447efe8..d81f1f4883a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -121,7 +121,7 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_fence *fence; - struct fence **ptr; + struct fence *old, **ptr; uint32_t seq; fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL); @@ -141,7 +141,11 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct fence **f) /* This function can't be called concurrently anyway, otherwise * emitting the fence would mess up the hardware ring buffer. */ - BUG_ON(rcu_dereference_protected(*ptr, 1)); + old = rcu_dereference_protected(*ptr, 1); + if (old && !fence_is_signaled(old)) { + DRM_INFO("rcu slot is busy\n"); + fence_wait(old, false); + } rcu_assign_pointer(*ptr, fence_get(&fence->base)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index f594cfaa97e5..762cfdb85147 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -219,6 +219,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev) if (r) { return r; } + adev->ddev->vblank_disable_allowed = true; + /* enable msi */ adev->irq.msi_enabled = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 7805a8706af7..598eb0cd5aab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -382,6 +382,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_vram_gtt vram_gtt; vram_gtt.vram_size = adev->mc.real_vram_size; + vram_gtt.vram_size -= adev->vram_pin_size; vram_gtt.vram_cpu_accessible_size = adev->mc.visible_vram_size; vram_gtt.vram_cpu_accessible_size -= adev->vram_pin_size; vram_gtt.gtt_size = adev->mc.gtt_size; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 56d1458393cc..5b6639faa731 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -476,6 +476,17 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); } +static const char *amdgpu_vram_names[] = { + "UNKNOWN", + "GDDR1", + "DDR2", + "GDDR3", + "GDDR4", + "GDDR5", + "HBM", + "DDR3" +}; + int amdgpu_bo_init(struct amdgpu_device *adev) { /* Add an MTRR for the VRAM */ @@ -484,8 +495,8 @@ int amdgpu_bo_init(struct amdgpu_device *adev) DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", adev->mc.mc_vram_size >> 20, (unsigned long long)adev->mc.aper_size >> 20); - DRM_INFO("RAM width %dbits DDR\n", - adev->mc.vram_width); + DRM_INFO("RAM width %dbits %s\n", + adev->mc.vram_width, amdgpu_vram_names[adev->mc.vram_type]); return amdgpu_ttm_init(adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 3cb6d6c413c7..e9c6ae6ed2f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -143,7 +143,7 @@ static int amdgpu_pp_late_init(void *handle) adev->powerplay.pp_handle); #ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { + if (adev->pp_enabled && adev->pm.dpm_enabled) { amdgpu_pm_sysfs_init(adev); amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL); } @@ -161,12 +161,8 @@ static int amdgpu_pp_sw_init(void *handle) adev->powerplay.pp_handle); #ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { - if (amdgpu_dpm == 0) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - } + if (adev->pp_enabled) + adev->pm.dpm_enabled = true; #endif return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f1a55d1888cb..6f3369de232f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -622,7 +622,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) set_page_dirty(page); mark_page_accessed(page); - page_cache_release(page); + put_page(page); } sg_free_table(ttm->sg); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index c1a581044417..338da80006b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -241,32 +241,28 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) int amdgpu_uvd_suspend(struct amdgpu_device *adev) { - struct amdgpu_ring *ring = &adev->uvd.ring; - int i, r; + unsigned size; + void *ptr; + int i; if (adev->uvd.vcpu_bo == NULL) return 0; - for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { - uint32_t handle = atomic_read(&adev->uvd.handles[i]); - if (handle != 0) { - struct fence *fence; + for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) + if (atomic_read(&adev->uvd.handles[i])) + break; - amdgpu_uvd_note_usage(adev); + if (i == AMDGPU_MAX_UVD_HANDLES) + return 0; - r = amdgpu_uvd_get_destroy_msg(ring, handle, false, &fence); - if (r) { - DRM_ERROR("Error destroying UVD (%d)!\n", r); - continue; - } + size = amdgpu_bo_size(adev->uvd.vcpu_bo); + ptr = adev->uvd.cpu_addr; - fence_wait(fence, false); - fence_put(fence); + adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); + if (!adev->uvd.saved_bo) + return -ENOMEM; - adev->uvd.filp[i] = NULL; - atomic_set(&adev->uvd.handles[i], 0); - } - } + memcpy(adev->uvd.saved_bo, ptr, size); return 0; } @@ -275,23 +271,29 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev) { unsigned size; void *ptr; - const struct common_firmware_header *hdr; - unsigned offset; if (adev->uvd.vcpu_bo == NULL) return -EINVAL; - hdr = (const struct common_firmware_header *)adev->uvd.fw->data; - offset = le32_to_cpu(hdr->ucode_array_offset_bytes); - memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, - (adev->uvd.fw->size) - offset); - size = amdgpu_bo_size(adev->uvd.vcpu_bo); - size -= le32_to_cpu(hdr->ucode_size_bytes); ptr = adev->uvd.cpu_addr; - ptr += le32_to_cpu(hdr->ucode_size_bytes); - memset(ptr, 0, size); + if (adev->uvd.saved_bo != NULL) { + memcpy(ptr, adev->uvd.saved_bo, size); + kfree(adev->uvd.saved_bo); + adev->uvd.saved_bo = NULL; + } else { + const struct common_firmware_header *hdr; + unsigned offset; + + hdr = (const struct common_firmware_header *)adev->uvd.fw->data; + offset = le32_to_cpu(hdr->ucode_array_offset_bytes); + memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, + (adev->uvd.fw->size) - offset); + size -= le32_to_cpu(hdr->ucode_size_bytes); + ptr += le32_to_cpu(hdr->ucode_size_bytes); + memset(ptr, 0, size); + } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 82ce7d943884..05b0353d3880 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -903,14 +903,6 @@ static int gmc_v7_0_early_init(void *handle) gmc_v7_0_set_gart_funcs(adev); gmc_v7_0_set_irq_funcs(adev); - if (adev->flags & AMD_IS_APU) { - adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - } else { - u32 tmp = RREG32(mmMC_SEQ_MISC0); - tmp &= MC_SEQ_MISC0__MT__MASK; - adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); - } - return 0; } @@ -927,6 +919,14 @@ static int gmc_v7_0_sw_init(void *handle) int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->flags & AMD_IS_APU) { + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + } else { + u32 tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v7_0_convert_vram_type(tmp); + } + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 29bd7b57dc91..02deb3229405 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -863,14 +863,6 @@ static int gmc_v8_0_early_init(void *handle) gmc_v8_0_set_gart_funcs(adev); gmc_v8_0_set_irq_funcs(adev); - if (adev->flags & AMD_IS_APU) { - adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; - } else { - u32 tmp = RREG32(mmMC_SEQ_MISC0); - tmp &= MC_SEQ_MISC0__MT__MASK; - adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); - } - return 0; } @@ -881,12 +873,27 @@ static int gmc_v8_0_late_init(void *handle) return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0); } +#define mmMC_SEQ_MISC0_FIJI 0xA71 + static int gmc_v8_0_sw_init(void *handle) { int r; int dma_bits; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (adev->flags & AMD_IS_APU) { + adev->mc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; + } else { + u32 tmp; + + if (adev->asic_type == CHIP_FIJI) + tmp = RREG32(mmMC_SEQ_MISC0_FIJI); + else + tmp = RREG32(mmMC_SEQ_MISC0); + tmp &= MC_SEQ_MISC0__MT__MASK; + adev->mc.vram_type = gmc_v8_0_convert_vram_type(tmp); + } + r = amdgpu_irq_add_id(adev, 146, &adev->mc.vm_fault); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index c606ccb38d8b..cb463753115b 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_suspend(adev); + r = uvd_v4_2_hw_fini(adev); if (r) return r; - r = uvd_v4_2_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index e3c852d9d79a..16476d80f475 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - r = amdgpu_uvd_suspend(adev); + r = uvd_v5_0_hw_fini(adev); if (r) return r; - r = uvd_v5_0_hw_fini(adev); + r = amdgpu_uvd_suspend(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 3375e614ac67..d49379145ef2 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -214,15 +214,16 @@ static int uvd_v6_0_suspend(void *handle) int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + r = uvd_v6_0_hw_fini(adev); + if (r) + return r; + /* Skip this for APU for now */ if (!(adev->flags & AMD_IS_APU)) { r = amdgpu_uvd_suspend(adev); if (r) return r; } - r = uvd_v6_0_hw_fini(adev); - if (r) - return r; return r; } diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index aec38fc3834f..ab84d4947247 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -589,6 +589,8 @@ typedef int(*cgs_get_active_displays_info)( void *cgs_device, struct cgs_display_info *info); +typedef int (*cgs_notify_dpm_enabled)(void *cgs_device, bool enabled); + typedef int (*cgs_call_acpi_method)(void *cgs_device, uint32_t acpi_method, uint32_t acpi_function, @@ -644,6 +646,8 @@ struct cgs_ops { cgs_set_clockgating_state set_clockgating_state; /* display manager */ cgs_get_active_displays_info get_active_displays_info; + /* notify dpm enabled */ + cgs_notify_dpm_enabled notify_dpm_enabled; /* ACPI */ cgs_call_acpi_method call_acpi_method; /* get system info */ @@ -734,8 +738,12 @@ struct cgs_device CGS_CALL(set_powergating_state, dev, block_type, state) #define cgs_set_clockgating_state(dev, block_type, state) \ CGS_CALL(set_clockgating_state, dev, block_type, state) +#define cgs_notify_dpm_enabled(dev, enabled) \ + CGS_CALL(notify_dpm_enabled, dev, enabled) + #define cgs_get_active_displays_info(dev, info) \ CGS_CALL(get_active_displays_info, dev, info) + #define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \ CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) #define cgs_query_system_info(dev, sys_info) \ diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c index 6b52c78cb404..56856a2864d1 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c @@ -137,14 +137,14 @@ static const pem_event_action *resume_event[] = { reset_display_configCounter_tasks, update_dal_configuration_tasks, vari_bright_resume_tasks, - block_adjust_power_state_tasks, setup_asic_tasks, enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ enable_dynamic_state_management_tasks, enable_clock_power_gatings_tasks, enable_disable_bapm_tasks, initialize_thermal_controller_tasks, - reset_boot_state_tasks, + get_2d_performance_state_tasks, + set_performance_state_tasks, adjust_power_state_tasks, enable_disable_fps_tasks, notify_hw_power_source_tasks, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 51dedf84623c..89f31bc5b68b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -2389,6 +2389,7 @@ static int fiji_populate_smc_vce_level(struct pp_hwmgr *hwmgr, for(count = 0; count < table->VceLevelCount; count++) { table->VceLevel[count].Frequency = mm_table->entries[count].eclk; + table->VceLevel[count].MinVoltage = 0; table->VceLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; table->VceLevel[count].MinVoltage |= @@ -2465,6 +2466,7 @@ static int fiji_populate_smc_samu_level(struct pp_hwmgr *hwmgr, for (count = 0; count < table->SamuLevelCount; count++) { /* not sure whether we need evclk or not */ + table->SamuLevel[count].MinVoltage = 0; table->SamuLevel[count].Frequency = mm_table->entries[count].samclock; table->SamuLevel[count].MinVoltage |= (mm_table->entries[count].vddc * VOLTAGE_SCALE) << VDDC_SHIFT; @@ -2562,6 +2564,7 @@ static int fiji_populate_smc_uvd_level(struct pp_hwmgr *hwmgr, table->UvdBootLevel = 0; for (count = 0; count < table->UvdLevelCount; count++) { + table->UvdLevel[count].MinVoltage = 0; table->UvdLevel[count].VclkFrequency = mm_table->entries[count].vclk; table->UvdLevel[count].DclkFrequency = mm_table->entries[count].dclk; table->UvdLevel[count].MinVoltage |= (mm_table->entries[count].vddc * @@ -2900,6 +2903,8 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) if(FIJI_VOLTAGE_CONTROL_NONE != data->voltage_control) fiji_populate_smc_voltage_tables(hwmgr, table); + table->SystemFlags = 0; + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition)) table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; @@ -2997,6 +3002,7 @@ static int fiji_init_smc_table(struct pp_hwmgr *hwmgr) table->MemoryThermThrottleEnable = 1; table->PCIeBootLinkLevel = 0; /* 0:Gen1 1:Gen2 2:Gen3*/ table->PCIeGenInterval = 1; + table->VRConfig = 0; result = fiji_populate_vr_config(hwmgr, table); PP_ASSERT_WITH_CODE(0 == result, @@ -5195,6 +5201,67 @@ static int fiji_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static inline bool fiji_are_power_levels_equal(const struct fiji_performance_level *pl1, + const struct fiji_performance_level *pl2) +{ + return ((pl1->memory_clock == pl2->memory_clock) && + (pl1->engine_clock == pl2->engine_clock) && + (pl1->pcie_gen == pl2->pcie_gen) && + (pl1->pcie_lane == pl2->pcie_lane)); +} + +int fiji_check_states_equal(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *pstate1, const struct pp_hw_power_state *pstate2, bool *equal) +{ + const struct fiji_power_state *psa = cast_const_phw_fiji_power_state(pstate1); + const struct fiji_power_state *psb = cast_const_phw_fiji_power_state(pstate2); + int i; + + if (equal == NULL || psa == NULL || psb == NULL) + return -EINVAL; + + /* If the two states don't even have the same number of performance levels they cannot be the same state. */ + if (psa->performance_level_count != psb->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < psa->performance_level_count; i++) { + if (!fiji_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) { + /* If we have found even one performance level pair that is different the states are different. */ + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk)); + *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk)); + *equal &= (psa->sclk_threshold == psb->sclk_threshold); + *equal &= (psa->acp_clk == psb->acp_clk); + + return 0; +} + +bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + bool is_update_required = false; + struct cgs_display_info info = {0,0,NULL}; + + cgs_get_active_displays_info(hwmgr->device, &info); + + if (data->display_timing.num_existing_displays != info.display_count) + is_update_required = true; +/* TO DO NEED TO GET DEEP SLEEP CLOCK FROM DAL + if (phm_cap_enabled(hwmgr->hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) { + cgs_get_min_clock_settings(hwmgr->device, &min_clocks); + if(min_clocks.engineClockInSR != data->display_timing.minClockInSR) + is_update_required = true; +*/ + return is_update_required; +} + + static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .backend_init = &fiji_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, @@ -5230,6 +5297,8 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .register_internal_thermal_interrupt = fiji_register_internal_thermal_interrupt, .set_fan_control_mode = fiji_set_fan_control_mode, .get_fan_control_mode = fiji_get_fan_control_mode, + .check_states_equal = fiji_check_states_equal, + .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, .get_pp_table = fiji_get_pp_table, .set_pp_table = fiji_set_pp_table, .force_clock_level = fiji_force_clock_level, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index be31bed2538a..fa208ada6892 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -58,6 +58,9 @@ void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VpuRecoveryInProgress); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDDPM); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEDPM); + if (acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST) && acpi_atcs_functions_supported(hwmgr->device, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION)) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PCIEPerformanceRequest); @@ -130,18 +133,25 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr, int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) { + int ret = 1; + bool enabled; PHM_FUNC_CHECK(hwmgr); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TablelessHardwareInterface)) { if (NULL != hwmgr->hwmgr_func->dynamic_state_management_enable) - return hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); + ret = hwmgr->hwmgr_func->dynamic_state_management_enable(hwmgr); } else { - return phm_dispatch_table(hwmgr, + ret = phm_dispatch_table(hwmgr, &(hwmgr->enable_dynamic_state_management), NULL, NULL); } - return 0; + + enabled = ret == 0 ? true : false; + + cgs_notify_dpm_enabled(hwmgr->device, enabled); + + return ret; } int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index 56b829f97699..3ac1ae4d8caf 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -57,14 +57,13 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) DRM_ERROR("failed to map control registers area\n"); ret = PTR_ERR(hdlcd->mmio); hdlcd->mmio = NULL; - goto fail; + return ret; } version = hdlcd_read(hdlcd, HDLCD_REG_VERSION); if ((version & HDLCD_PRODUCT_MASK) != HDLCD_PRODUCT_ID) { DRM_ERROR("unknown product id: 0x%x\n", version); - ret = -EINVAL; - goto fail; + return -EINVAL; } DRM_INFO("found ARM HDLCD version r%dp%d\n", (version & HDLCD_VERSION_MAJOR_MASK) >> 8, @@ -73,7 +72,7 @@ static int hdlcd_load(struct drm_device *drm, unsigned long flags) /* Get the optional framebuffer memory resource */ ret = of_reserved_mem_device_init(drm->dev); if (ret && ret != -ENODEV) - goto fail; + return ret; ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)); if (ret) @@ -101,8 +100,6 @@ irq_fail: drm_crtc_cleanup(&hdlcd->crtc); setup_fail: of_reserved_mem_device_release(drm->dev); -fail: - devm_clk_put(drm->dev, hdlcd->clk); return ret; } @@ -412,7 +409,6 @@ err_unload: pm_runtime_put_sync(drm->dev); pm_runtime_disable(drm->dev); of_reserved_mem_device_release(drm->dev); - devm_clk_put(dev, hdlcd->clk); err_free: drm_dev_unref(drm); @@ -436,10 +432,6 @@ static void hdlcd_drm_unbind(struct device *dev) pm_runtime_put_sync(drm->dev); pm_runtime_disable(drm->dev); of_reserved_mem_device_release(drm->dev); - if (!IS_ERR(hdlcd->clk)) { - devm_clk_put(drm->dev, hdlcd->clk); - hdlcd->clk = NULL; - } drm_mode_config_cleanup(drm); drm_dev_unregister(drm); drm_dev_unref(drm); diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c index 6e731db31aa4..aca7f9cc6109 100644 --- a/drivers/gpu/drm/armada/armada_gem.c +++ b/drivers/gpu/drm/armada/armada_gem.c @@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, release: for_each_sg(sgt->sgl, sg, num, i) - page_cache_release(sg_page(sg)); + put_page(sg_page(sg)); free_table: sg_free_table(sgt); free_sgt: @@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, if (dobj->obj.filp) { struct scatterlist *sg; for_each_sg(sgt->sgl, sg, sgt->nents, i) - page_cache_release(sg_page(sg)); + put_page(sg_page(sg)); } sg_free_table(sgt); diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 9a32d9dfdd26..fcd9c0714836 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -218,10 +218,8 @@ static struct drm_driver driver = { static int __init ast_init(void) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && ast_modeset == -1) return -EINVAL; -#endif if (ast_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 3d8d16402d07..8ab4318e57a1 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -615,7 +615,7 @@ err: static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev) { mutex_lock(&dev->mode_config.mutex); - drm_connector_unplug_all(dev); + drm_connector_unregister_all(dev); mutex_unlock(&dev->mode_config.mutex); } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 27e2022de89d..efd94e00c3e5 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -40,4 +40,6 @@ config DRM_PARADE_PS8622 ---help--- Parade eDP-LVDS bridge chip driver. +source "drivers/gpu/drm/bridge/analogix/Kconfig" + endmenu diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index f13c33d67c03..ff821f4b5833 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o +obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig new file mode 100644 index 000000000000..80f286fa3a69 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/Kconfig @@ -0,0 +1,3 @@ +config DRM_ANALOGIX_DP + tristate + depends on DRM diff --git a/drivers/gpu/drm/bridge/analogix/Makefile b/drivers/gpu/drm/bridge/analogix/Makefile new file mode 100644 index 000000000000..cd4010ba6890 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/Makefile @@ -0,0 +1,2 @@ +analogix_dp-objs := analogix_dp_core.o analogix_dp_reg.o +obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix_dp.o diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c new file mode 100644 index 000000000000..7699597070a1 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -0,0 +1,1430 @@ +/* +* Analogix DP (Display Port) core interface driver. +* +* Copyright (C) 2012 Samsung Electronics Co., Ltd. +* Author: Jingoo Han <jg1.han@samsung.com> +* +* This program is free software; you can redistribute it and/or modify it +* under the terms of the GNU General Public License as published by the +* Free Software Foundation; either version 2 of the License, or (at your +* option) any later version. +*/ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/interrupt.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/component.h> +#include <linux/phy/phy.h> + +#include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> + +#include <drm/bridge/analogix_dp.h> + +#include "analogix_dp_core.h" + +#define to_dp(nm) container_of(nm, struct analogix_dp_device, nm) + +struct bridge_init { + struct i2c_client *client; + struct device_node *node; +}; + +static void analogix_dp_init_dp(struct analogix_dp_device *dp) +{ + analogix_dp_reset(dp); + + analogix_dp_swreset(dp); + + analogix_dp_init_analog_param(dp); + analogix_dp_init_interrupt(dp); + + /* SW defined function Normal operation */ + analogix_dp_enable_sw_function(dp); + + analogix_dp_config_interrupt(dp); + analogix_dp_init_analog_func(dp); + + analogix_dp_init_hpd(dp); + analogix_dp_init_aux(dp); +} + +static int analogix_dp_detect_hpd(struct analogix_dp_device *dp) +{ + int timeout_loop = 0; + + while (timeout_loop < DP_TIMEOUT_LOOP_COUNT) { + if (analogix_dp_get_plug_in_status(dp) == 0) + return 0; + + timeout_loop++; + usleep_range(10, 11); + } + + /* + * Some edp screen do not have hpd signal, so we can't just + * return failed when hpd plug in detect failed, DT property + * "force-hpd" would indicate whether driver need this. + */ + if (!dp->force_hpd) + return -ETIMEDOUT; + + /* + * The eDP TRM indicate that if HPD_STATUS(RO) is 0, AUX CH + * will not work, so we need to give a force hpd action to + * set HPD_STATUS manually. + */ + dev_dbg(dp->dev, "failed to get hpd plug status, try to force hpd\n"); + + analogix_dp_force_hpd(dp); + + if (analogix_dp_get_plug_in_status(dp) != 0) { + dev_err(dp->dev, "failed to get hpd plug in status\n"); + return -EINVAL; + } + + dev_dbg(dp->dev, "success to get plug in status after force hpd\n"); + + return 0; +} + +static unsigned char analogix_dp_calc_edid_check_sum(unsigned char *edid_data) +{ + int i; + unsigned char sum = 0; + + for (i = 0; i < EDID_BLOCK_LENGTH; i++) + sum = sum + edid_data[i]; + + return sum; +} + +static int analogix_dp_read_edid(struct analogix_dp_device *dp) +{ + unsigned char *edid = dp->edid; + unsigned int extend_block = 0; + unsigned char sum; + unsigned char test_vector; + int retval; + + /* + * EDID device address is 0x50. + * However, if necessary, you must have set upper address + * into E-EDID in I2C device, 0x30. + */ + + /* Read Extension Flag, Number of 128-byte EDID extension blocks */ + retval = analogix_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR, + EDID_EXTENSION_FLAG, + &extend_block); + if (retval) + return retval; + + if (extend_block > 0) { + dev_dbg(dp->dev, "EDID data includes a single extension!\n"); + + /* Read EDID data */ + retval = analogix_dp_read_bytes_from_i2c(dp, + I2C_EDID_DEVICE_ADDR, + EDID_HEADER_PATTERN, + EDID_BLOCK_LENGTH, + &edid[EDID_HEADER_PATTERN]); + if (retval != 0) { + dev_err(dp->dev, "EDID Read failed!\n"); + return -EIO; + } + sum = analogix_dp_calc_edid_check_sum(edid); + if (sum != 0) { + dev_err(dp->dev, "EDID bad checksum!\n"); + return -EIO; + } + + /* Read additional EDID data */ + retval = analogix_dp_read_bytes_from_i2c(dp, + I2C_EDID_DEVICE_ADDR, + EDID_BLOCK_LENGTH, + EDID_BLOCK_LENGTH, + &edid[EDID_BLOCK_LENGTH]); + if (retval != 0) { + dev_err(dp->dev, "EDID Read failed!\n"); + return -EIO; + } + sum = analogix_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]); + if (sum != 0) { + dev_err(dp->dev, "EDID bad checksum!\n"); + return -EIO; + } + + analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST, + &test_vector); + if (test_vector & DP_TEST_LINK_EDID_READ) { + analogix_dp_write_byte_to_dpcd(dp, + DP_TEST_EDID_CHECKSUM, + edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]); + analogix_dp_write_byte_to_dpcd(dp, + DP_TEST_RESPONSE, + DP_TEST_EDID_CHECKSUM_WRITE); + } + } else { + dev_info(dp->dev, "EDID data does not include any extensions.\n"); + + /* Read EDID data */ + retval = analogix_dp_read_bytes_from_i2c(dp, + I2C_EDID_DEVICE_ADDR, EDID_HEADER_PATTERN, + EDID_BLOCK_LENGTH, &edid[EDID_HEADER_PATTERN]); + if (retval != 0) { + dev_err(dp->dev, "EDID Read failed!\n"); + return -EIO; + } + sum = analogix_dp_calc_edid_check_sum(edid); + if (sum != 0) { + dev_err(dp->dev, "EDID bad checksum!\n"); + return -EIO; + } + + analogix_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST, + &test_vector); + if (test_vector & DP_TEST_LINK_EDID_READ) { + analogix_dp_write_byte_to_dpcd(dp, + DP_TEST_EDID_CHECKSUM, edid[EDID_CHECKSUM]); + analogix_dp_write_byte_to_dpcd(dp, + DP_TEST_RESPONSE, DP_TEST_EDID_CHECKSUM_WRITE); + } + } + + dev_dbg(dp->dev, "EDID Read success!\n"); + return 0; +} + +static int analogix_dp_handle_edid(struct analogix_dp_device *dp) +{ + u8 buf[12]; + int i; + int retval; + + /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */ + retval = analogix_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, 12, buf); + if (retval) + return retval; + + /* Read EDID */ + for (i = 0; i < 3; i++) { + retval = analogix_dp_read_edid(dp); + if (!retval) + break; + } + + return retval; +} + +static void +analogix_dp_enable_rx_to_enhanced_mode(struct analogix_dp_device *dp, + bool enable) +{ + u8 data; + + analogix_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data); + + if (enable) + analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET, + DP_LANE_COUNT_ENHANCED_FRAME_EN | + DPCD_LANE_COUNT_SET(data)); + else + analogix_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET, + DPCD_LANE_COUNT_SET(data)); +} + +static int analogix_dp_is_enhanced_mode_available(struct analogix_dp_device *dp) +{ + u8 data; + int retval; + + analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data); + retval = DPCD_ENHANCED_FRAME_CAP(data); + + return retval; +} + +static void analogix_dp_set_enhanced_mode(struct analogix_dp_device *dp) +{ + u8 data; + + data = analogix_dp_is_enhanced_mode_available(dp); + analogix_dp_enable_rx_to_enhanced_mode(dp, data); + analogix_dp_enable_enhanced_mode(dp, data); +} + +static void analogix_dp_training_pattern_dis(struct analogix_dp_device *dp) +{ + analogix_dp_set_training_pattern(dp, DP_NONE); + + analogix_dp_write_byte_to_dpcd(dp, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); +} + +static void +analogix_dp_set_lane_lane_pre_emphasis(struct analogix_dp_device *dp, + int pre_emphasis, int lane) +{ + switch (lane) { + case 0: + analogix_dp_set_lane0_pre_emphasis(dp, pre_emphasis); + break; + case 1: + analogix_dp_set_lane1_pre_emphasis(dp, pre_emphasis); + break; + + case 2: + analogix_dp_set_lane2_pre_emphasis(dp, pre_emphasis); + break; + + case 3: + analogix_dp_set_lane3_pre_emphasis(dp, pre_emphasis); + break; + } +} + +static int analogix_dp_link_start(struct analogix_dp_device *dp) +{ + u8 buf[4]; + int lane, lane_count, pll_tries, retval; + + lane_count = dp->link_train.lane_count; + + dp->link_train.lt_state = CLOCK_RECOVERY; + dp->link_train.eq_loop = 0; + + for (lane = 0; lane < lane_count; lane++) + dp->link_train.cr_loop[lane] = 0; + + /* Set link rate and count as you want to establish*/ + analogix_dp_set_link_bandwidth(dp, dp->link_train.link_rate); + analogix_dp_set_lane_count(dp, dp->link_train.lane_count); + + /* Setup RX configuration */ + buf[0] = dp->link_train.link_rate; + buf[1] = dp->link_train.lane_count; + retval = analogix_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, 2, buf); + if (retval) + return retval; + + /* Set TX pre-emphasis to minimum */ + for (lane = 0; lane < lane_count; lane++) + analogix_dp_set_lane_lane_pre_emphasis(dp, + PRE_EMPHASIS_LEVEL_0, lane); + + /* Wait for PLL lock */ + pll_tries = 0; + while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { + if (pll_tries == DP_TIMEOUT_LOOP_COUNT) { + dev_err(dp->dev, "Wait for PLL lock timed out\n"); + return -ETIMEDOUT; + } + + pll_tries++; + usleep_range(90, 120); + } + + /* Set training pattern 1 */ + analogix_dp_set_training_pattern(dp, TRAINING_PTN1); + + /* Set RX training pattern */ + retval = analogix_dp_write_byte_to_dpcd(dp, + DP_TRAINING_PATTERN_SET, + DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1); + if (retval) + return retval; + + for (lane = 0; lane < lane_count; lane++) + buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 | + DP_TRAIN_VOLTAGE_SWING_LEVEL_0; + + retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, + lane_count, buf); + + return retval; +} + +static unsigned char analogix_dp_get_lane_status(u8 link_status[2], int lane) +{ + int shift = (lane & 1) * 4; + u8 link_value = link_status[lane >> 1]; + + return (link_value >> shift) & 0xf; +} + +static int analogix_dp_clock_recovery_ok(u8 link_status[2], int lane_count) +{ + int lane; + u8 lane_status; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = analogix_dp_get_lane_status(link_status, lane); + if ((lane_status & DP_LANE_CR_DONE) == 0) + return -EINVAL; + } + return 0; +} + +static int analogix_dp_channel_eq_ok(u8 link_status[2], u8 link_align, + int lane_count) +{ + int lane; + u8 lane_status; + + if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0) + return -EINVAL; + + for (lane = 0; lane < lane_count; lane++) { + lane_status = analogix_dp_get_lane_status(link_status, lane); + lane_status &= DP_CHANNEL_EQ_BITS; + if (lane_status != DP_CHANNEL_EQ_BITS) + return -EINVAL; + } + + return 0; +} + +static unsigned char +analogix_dp_get_adjust_request_voltage(u8 adjust_request[2], int lane) +{ + int shift = (lane & 1) * 4; + u8 link_value = adjust_request[lane >> 1]; + + return (link_value >> shift) & 0x3; +} + +static unsigned char analogix_dp_get_adjust_request_pre_emphasis( + u8 adjust_request[2], + int lane) +{ + int shift = (lane & 1) * 4; + u8 link_value = adjust_request[lane >> 1]; + + return ((link_value >> shift) & 0xc) >> 2; +} + +static void analogix_dp_set_lane_link_training(struct analogix_dp_device *dp, + u8 training_lane_set, int lane) +{ + switch (lane) { + case 0: + analogix_dp_set_lane0_link_training(dp, training_lane_set); + break; + case 1: + analogix_dp_set_lane1_link_training(dp, training_lane_set); + break; + + case 2: + analogix_dp_set_lane2_link_training(dp, training_lane_set); + break; + + case 3: + analogix_dp_set_lane3_link_training(dp, training_lane_set); + break; + } +} + +static unsigned int +analogix_dp_get_lane_link_training(struct analogix_dp_device *dp, + int lane) +{ + u32 reg; + + switch (lane) { + case 0: + reg = analogix_dp_get_lane0_link_training(dp); + break; + case 1: + reg = analogix_dp_get_lane1_link_training(dp); + break; + case 2: + reg = analogix_dp_get_lane2_link_training(dp); + break; + case 3: + reg = analogix_dp_get_lane3_link_training(dp); + break; + default: + WARN_ON(1); + return 0; + } + + return reg; +} + +static void analogix_dp_reduce_link_rate(struct analogix_dp_device *dp) +{ + analogix_dp_training_pattern_dis(dp); + analogix_dp_set_enhanced_mode(dp); + + dp->link_train.lt_state = FAILED; +} + +static void analogix_dp_get_adjust_training_lane(struct analogix_dp_device *dp, + u8 adjust_request[2]) +{ + int lane, lane_count; + u8 voltage_swing, pre_emphasis, training_lane; + + lane_count = dp->link_train.lane_count; + for (lane = 0; lane < lane_count; lane++) { + voltage_swing = analogix_dp_get_adjust_request_voltage( + adjust_request, lane); + pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis( + adjust_request, lane); + training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) | + DPCD_PRE_EMPHASIS_SET(pre_emphasis); + + if (voltage_swing == VOLTAGE_LEVEL_3) + training_lane |= DP_TRAIN_MAX_SWING_REACHED; + if (pre_emphasis == PRE_EMPHASIS_LEVEL_3) + training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + dp->link_train.training_lane[lane] = training_lane; + } +} + +static int analogix_dp_process_clock_recovery(struct analogix_dp_device *dp) +{ + int lane, lane_count, retval; + u8 voltage_swing, pre_emphasis, training_lane; + u8 link_status[2], adjust_request[2]; + + usleep_range(100, 101); + + lane_count = dp->link_train.lane_count; + + retval = analogix_dp_read_bytes_from_dpcd(dp, + DP_LANE0_1_STATUS, 2, link_status); + if (retval) + return retval; + + retval = analogix_dp_read_bytes_from_dpcd(dp, + DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request); + if (retval) + return retval; + + if (analogix_dp_clock_recovery_ok(link_status, lane_count) == 0) { + /* set training pattern 2 for EQ */ + analogix_dp_set_training_pattern(dp, TRAINING_PTN2); + + retval = analogix_dp_write_byte_to_dpcd(dp, + DP_TRAINING_PATTERN_SET, + DP_LINK_SCRAMBLING_DISABLE | + DP_TRAINING_PATTERN_2); + if (retval) + return retval; + + dev_info(dp->dev, "Link Training Clock Recovery success\n"); + dp->link_train.lt_state = EQUALIZER_TRAINING; + } else { + for (lane = 0; lane < lane_count; lane++) { + training_lane = analogix_dp_get_lane_link_training( + dp, lane); + voltage_swing = analogix_dp_get_adjust_request_voltage( + adjust_request, lane); + pre_emphasis = analogix_dp_get_adjust_request_pre_emphasis( + adjust_request, lane); + + if (DPCD_VOLTAGE_SWING_GET(training_lane) == + voltage_swing && + DPCD_PRE_EMPHASIS_GET(training_lane) == + pre_emphasis) + dp->link_train.cr_loop[lane]++; + + if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP || + voltage_swing == VOLTAGE_LEVEL_3 || + pre_emphasis == PRE_EMPHASIS_LEVEL_3) { + dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n", + dp->link_train.cr_loop[lane], + voltage_swing, pre_emphasis); + analogix_dp_reduce_link_rate(dp); + return -EIO; + } + } + } + + analogix_dp_get_adjust_training_lane(dp, adjust_request); + + for (lane = 0; lane < lane_count; lane++) + analogix_dp_set_lane_link_training(dp, + dp->link_train.training_lane[lane], lane); + + retval = analogix_dp_write_bytes_to_dpcd(dp, + DP_TRAINING_LANE0_SET, lane_count, + dp->link_train.training_lane); + if (retval) + return retval; + + return retval; +} + +static int analogix_dp_process_equalizer_training(struct analogix_dp_device *dp) +{ + int lane, lane_count, retval; + u32 reg; + u8 link_align, link_status[2], adjust_request[2]; + + usleep_range(400, 401); + + lane_count = dp->link_train.lane_count; + + retval = analogix_dp_read_bytes_from_dpcd(dp, + DP_LANE0_1_STATUS, 2, link_status); + if (retval) + return retval; + + if (analogix_dp_clock_recovery_ok(link_status, lane_count)) { + analogix_dp_reduce_link_rate(dp); + return -EIO; + } + + retval = analogix_dp_read_bytes_from_dpcd(dp, + DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request); + if (retval) + return retval; + + retval = analogix_dp_read_byte_from_dpcd(dp, + DP_LANE_ALIGN_STATUS_UPDATED, &link_align); + if (retval) + return retval; + + analogix_dp_get_adjust_training_lane(dp, adjust_request); + + if (!analogix_dp_channel_eq_ok(link_status, link_align, lane_count)) { + /* traing pattern Set to Normal */ + analogix_dp_training_pattern_dis(dp); + + dev_info(dp->dev, "Link Training success!\n"); + + analogix_dp_get_link_bandwidth(dp, ®); + dp->link_train.link_rate = reg; + dev_dbg(dp->dev, "final bandwidth = %.2x\n", + dp->link_train.link_rate); + + analogix_dp_get_lane_count(dp, ®); + dp->link_train.lane_count = reg; + dev_dbg(dp->dev, "final lane count = %.2x\n", + dp->link_train.lane_count); + + /* set enhanced mode if available */ + analogix_dp_set_enhanced_mode(dp); + dp->link_train.lt_state = FINISHED; + + return 0; + } + + /* not all locked */ + dp->link_train.eq_loop++; + + if (dp->link_train.eq_loop > MAX_EQ_LOOP) { + dev_err(dp->dev, "EQ Max loop\n"); + analogix_dp_reduce_link_rate(dp); + return -EIO; + } + + for (lane = 0; lane < lane_count; lane++) + analogix_dp_set_lane_link_training(dp, + dp->link_train.training_lane[lane], lane); + + retval = analogix_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, + lane_count, dp->link_train.training_lane); + + return retval; +} + +static void analogix_dp_get_max_rx_bandwidth(struct analogix_dp_device *dp, + u8 *bandwidth) +{ + u8 data; + + /* + * For DP rev.1.1, Maximum link rate of Main Link lanes + * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps + * For DP rev.1.2, Maximum link rate of Main Link lanes + * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps, 0x14 = 5.4Gbps + */ + analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data); + *bandwidth = data; +} + +static void analogix_dp_get_max_rx_lane_count(struct analogix_dp_device *dp, + u8 *lane_count) +{ + u8 data; + + /* + * For DP rev.1.1, Maximum number of Main Link lanes + * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes + */ + analogix_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data); + *lane_count = DPCD_MAX_LANE_COUNT(data); +} + +static void analogix_dp_init_training(struct analogix_dp_device *dp, + enum link_lane_count_type max_lane, + int max_rate) +{ + /* + * MACRO_RST must be applied after the PLL_LOCK to avoid + * the DP inter pair skew issue for at least 10 us + */ + analogix_dp_reset_macro(dp); + + /* Initialize by reading RX's DPCD */ + analogix_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate); + analogix_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count); + + if ((dp->link_train.link_rate != DP_LINK_BW_1_62) && + (dp->link_train.link_rate != DP_LINK_BW_2_7) && + (dp->link_train.link_rate != DP_LINK_BW_5_4)) { + dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n", + dp->link_train.link_rate); + dp->link_train.link_rate = DP_LINK_BW_1_62; + } + + if (dp->link_train.lane_count == 0) { + dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n", + dp->link_train.lane_count); + dp->link_train.lane_count = (u8)LANE_COUNT1; + } + + /* Setup TX lane count & rate */ + if (dp->link_train.lane_count > max_lane) + dp->link_train.lane_count = max_lane; + if (dp->link_train.link_rate > max_rate) + dp->link_train.link_rate = max_rate; + + /* All DP analog module power up */ + analogix_dp_set_analog_power_down(dp, POWER_ALL, 0); +} + +static int analogix_dp_sw_link_training(struct analogix_dp_device *dp) +{ + int retval = 0, training_finished = 0; + + dp->link_train.lt_state = START; + + /* Process here */ + while (!retval && !training_finished) { + switch (dp->link_train.lt_state) { + case START: + retval = analogix_dp_link_start(dp); + if (retval) + dev_err(dp->dev, "LT link start failed!\n"); + break; + case CLOCK_RECOVERY: + retval = analogix_dp_process_clock_recovery(dp); + if (retval) + dev_err(dp->dev, "LT CR failed!\n"); + break; + case EQUALIZER_TRAINING: + retval = analogix_dp_process_equalizer_training(dp); + if (retval) + dev_err(dp->dev, "LT EQ failed!\n"); + break; + case FINISHED: + training_finished = 1; + break; + case FAILED: + return -EREMOTEIO; + } + } + if (retval) + dev_err(dp->dev, "eDP link training failed (%d)\n", retval); + + return retval; +} + +static int analogix_dp_set_link_train(struct analogix_dp_device *dp, + u32 count, u32 bwtype) +{ + int i; + int retval; + + for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) { + analogix_dp_init_training(dp, count, bwtype); + retval = analogix_dp_sw_link_training(dp); + if (retval == 0) + break; + + usleep_range(100, 110); + } + + return retval; +} + +static int analogix_dp_config_video(struct analogix_dp_device *dp) +{ + int retval = 0; + int timeout_loop = 0; + int done_count = 0; + + analogix_dp_config_video_slave_mode(dp); + + analogix_dp_set_video_color_format(dp); + + if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { + dev_err(dp->dev, "PLL is not locked yet.\n"); + return -EINVAL; + } + + for (;;) { + timeout_loop++; + if (analogix_dp_is_slave_video_stream_clock_on(dp) == 0) + break; + if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { + dev_err(dp->dev, "Timeout of video streamclk ok\n"); + return -ETIMEDOUT; + } + + usleep_range(1, 2); + } + + /* Set to use the register calculated M/N video */ + analogix_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0); + + /* For video bist, Video timing must be generated by register */ + analogix_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE); + + /* Disable video mute */ + analogix_dp_enable_video_mute(dp, 0); + + /* Configure video slave mode */ + analogix_dp_enable_video_master(dp, 0); + + timeout_loop = 0; + + for (;;) { + timeout_loop++; + if (analogix_dp_is_video_stream_on(dp) == 0) { + done_count++; + if (done_count > 10) + break; + } else if (done_count) { + done_count = 0; + } + if (timeout_loop > DP_TIMEOUT_LOOP_COUNT) { + dev_err(dp->dev, "Timeout of video streamclk ok\n"); + return -ETIMEDOUT; + } + + usleep_range(1000, 1001); + } + + if (retval != 0) + dev_err(dp->dev, "Video stream is not detected!\n"); + + return retval; +} + +static void analogix_dp_enable_scramble(struct analogix_dp_device *dp, + bool enable) +{ + u8 data; + + if (enable) { + analogix_dp_enable_scrambling(dp); + + analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET, + &data); + analogix_dp_write_byte_to_dpcd(dp, + DP_TRAINING_PATTERN_SET, + (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE)); + } else { + analogix_dp_disable_scrambling(dp); + + analogix_dp_read_byte_from_dpcd(dp, DP_TRAINING_PATTERN_SET, + &data); + analogix_dp_write_byte_to_dpcd(dp, + DP_TRAINING_PATTERN_SET, + (u8)(data | DP_LINK_SCRAMBLING_DISABLE)); + } +} + +static irqreturn_t analogix_dp_hardirq(int irq, void *arg) +{ + struct analogix_dp_device *dp = arg; + irqreturn_t ret = IRQ_NONE; + enum dp_irq_type irq_type; + + irq_type = analogix_dp_get_irq_type(dp); + if (irq_type != DP_IRQ_TYPE_UNKNOWN) { + analogix_dp_mute_hpd_interrupt(dp); + ret = IRQ_WAKE_THREAD; + } + + return ret; +} + +static irqreturn_t analogix_dp_irq_thread(int irq, void *arg) +{ + struct analogix_dp_device *dp = arg; + enum dp_irq_type irq_type; + + irq_type = analogix_dp_get_irq_type(dp); + if (irq_type & DP_IRQ_TYPE_HP_CABLE_IN || + irq_type & DP_IRQ_TYPE_HP_CABLE_OUT) { + dev_dbg(dp->dev, "Detected cable status changed!\n"); + if (dp->drm_dev) + drm_helper_hpd_irq_event(dp->drm_dev); + } + + if (irq_type != DP_IRQ_TYPE_UNKNOWN) { + analogix_dp_clear_hotplug_interrupts(dp); + analogix_dp_unmute_hpd_interrupt(dp); + } + + return IRQ_HANDLED; +} + +static void analogix_dp_commit(struct analogix_dp_device *dp) +{ + int ret; + + /* Keep the panel disabled while we configure video */ + if (dp->plat_data->panel) { + if (drm_panel_disable(dp->plat_data->panel)) + DRM_ERROR("failed to disable the panel\n"); + } + + ret = analogix_dp_set_link_train(dp, dp->video_info.max_lane_count, + dp->video_info.max_link_rate); + if (ret) { + dev_err(dp->dev, "unable to do link train\n"); + return; + } + + analogix_dp_enable_scramble(dp, 1); + analogix_dp_enable_rx_to_enhanced_mode(dp, 1); + analogix_dp_enable_enhanced_mode(dp, 1); + + analogix_dp_init_video(dp); + ret = analogix_dp_config_video(dp); + if (ret) + dev_err(dp->dev, "unable to config video\n"); + + /* Safe to enable the panel now */ + if (dp->plat_data->panel) { + if (drm_panel_enable(dp->plat_data->panel)) + DRM_ERROR("failed to enable the panel\n"); + } + + /* Enable video */ + analogix_dp_start_video(dp); +} + +int analogix_dp_get_modes(struct drm_connector *connector) +{ + struct analogix_dp_device *dp = to_dp(connector); + struct edid *edid = (struct edid *)dp->edid; + int num_modes = 0; + + if (analogix_dp_handle_edid(dp) == 0) { + drm_mode_connector_update_edid_property(&dp->connector, edid); + num_modes += drm_add_edid_modes(&dp->connector, edid); + } + + if (dp->plat_data->panel) + num_modes += drm_panel_get_modes(dp->plat_data->panel); + + if (dp->plat_data->get_modes) + num_modes += dp->plat_data->get_modes(dp->plat_data); + + return num_modes; +} + +static struct drm_encoder * +analogix_dp_best_encoder(struct drm_connector *connector) +{ + struct analogix_dp_device *dp = to_dp(connector); + + return dp->encoder; +} + +static const struct drm_connector_helper_funcs analogix_dp_connector_helper_funcs = { + .get_modes = analogix_dp_get_modes, + .best_encoder = analogix_dp_best_encoder, +}; + +enum drm_connector_status +analogix_dp_detect(struct drm_connector *connector, bool force) +{ + struct analogix_dp_device *dp = to_dp(connector); + + if (analogix_dp_detect_hpd(dp)) + return connector_status_disconnected; + + return connector_status_connected; +} + +static void analogix_dp_connector_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + +} + +static const struct drm_connector_funcs analogix_dp_connector_funcs = { + .dpms = drm_atomic_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = analogix_dp_detect, + .destroy = analogix_dp_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int analogix_dp_bridge_attach(struct drm_bridge *bridge) +{ + struct analogix_dp_device *dp = bridge->driver_private; + struct drm_encoder *encoder = dp->encoder; + struct drm_connector *connector = &dp->connector; + int ret; + + if (!bridge->encoder) { + DRM_ERROR("Parent encoder object not found"); + return -ENODEV; + } + + connector->polled = DRM_CONNECTOR_POLL_HPD; + + ret = drm_connector_init(dp->drm_dev, connector, + &analogix_dp_connector_funcs, + DRM_MODE_CONNECTOR_eDP); + if (ret) { + DRM_ERROR("Failed to initialize connector with drm\n"); + return ret; + } + + drm_connector_helper_add(connector, + &analogix_dp_connector_helper_funcs); + drm_mode_connector_attach_encoder(connector, encoder); + + /* + * NOTE: the connector registration is implemented in analogix + * platform driver, that to say connector would be exist after + * plat_data->attch return, that's why we record the connector + * point after plat attached. + */ + if (dp->plat_data->attach) { + ret = dp->plat_data->attach(dp->plat_data, bridge, connector); + if (ret) { + DRM_ERROR("Failed at platform attch func\n"); + return ret; + } + } + + if (dp->plat_data->panel) { + ret = drm_panel_attach(dp->plat_data->panel, &dp->connector); + if (ret) { + DRM_ERROR("Failed to attach panel\n"); + return ret; + } + } + + return 0; +} + +static void analogix_dp_bridge_enable(struct drm_bridge *bridge) +{ + struct analogix_dp_device *dp = bridge->driver_private; + + if (dp->dpms_mode == DRM_MODE_DPMS_ON) + return; + + pm_runtime_get_sync(dp->dev); + + if (dp->plat_data->power_on) + dp->plat_data->power_on(dp->plat_data); + + phy_power_on(dp->phy); + analogix_dp_init_dp(dp); + enable_irq(dp->irq); + analogix_dp_commit(dp); + + dp->dpms_mode = DRM_MODE_DPMS_ON; +} + +static void analogix_dp_bridge_disable(struct drm_bridge *bridge) +{ + struct analogix_dp_device *dp = bridge->driver_private; + + if (dp->dpms_mode != DRM_MODE_DPMS_ON) + return; + + if (dp->plat_data->panel) { + if (drm_panel_disable(dp->plat_data->panel)) { + DRM_ERROR("failed to disable the panel\n"); + return; + } + } + + disable_irq(dp->irq); + phy_power_off(dp->phy); + + if (dp->plat_data->power_off) + dp->plat_data->power_off(dp->plat_data); + + pm_runtime_put_sync(dp->dev); + + dp->dpms_mode = DRM_MODE_DPMS_OFF; +} + +static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, + struct drm_display_mode *orig_mode, + struct drm_display_mode *mode) +{ + struct analogix_dp_device *dp = bridge->driver_private; + struct drm_display_info *display_info = &dp->connector.display_info; + struct video_info *video = &dp->video_info; + struct device_node *dp_node = dp->dev->of_node; + int vic; + + /* Input video interlaces & hsync pol & vsync pol */ + video->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); + video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); + + /* Input video dynamic_range & colorimetry */ + vic = drm_match_cea_mode(mode); + if ((vic == 6) || (vic == 7) || (vic == 21) || (vic == 22) || + (vic == 2) || (vic == 3) || (vic == 17) || (vic == 18)) { + video->dynamic_range = CEA; + video->ycbcr_coeff = COLOR_YCBCR601; + } else if (vic) { + video->dynamic_range = CEA; + video->ycbcr_coeff = COLOR_YCBCR709; + } else { + video->dynamic_range = VESA; + video->ycbcr_coeff = COLOR_YCBCR709; + } + + /* Input vide bpc and color_formats */ + switch (display_info->bpc) { + case 12: + video->color_depth = COLOR_12; + break; + case 10: + video->color_depth = COLOR_10; + break; + case 8: + video->color_depth = COLOR_8; + break; + case 6: + video->color_depth = COLOR_6; + break; + default: + video->color_depth = COLOR_8; + break; + } + if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB444) + video->color_space = COLOR_YCBCR444; + else if (display_info->color_formats & DRM_COLOR_FORMAT_YCRCB422) + video->color_space = COLOR_YCBCR422; + else if (display_info->color_formats & DRM_COLOR_FORMAT_RGB444) + video->color_space = COLOR_RGB; + else + video->color_space = COLOR_RGB; + + /* + * NOTE: those property parsing code is used for providing backward + * compatibility for samsung platform. + * Due to we used the "of_property_read_u32" interfaces, when this + * property isn't present, the "video_info" can keep the original + * values and wouldn't be modified. + */ + of_property_read_u32(dp_node, "samsung,color-space", + &video->color_space); + of_property_read_u32(dp_node, "samsung,dynamic-range", + &video->dynamic_range); + of_property_read_u32(dp_node, "samsung,ycbcr-coeff", + &video->ycbcr_coeff); + of_property_read_u32(dp_node, "samsung,color-depth", + &video->color_depth); + if (of_property_read_bool(dp_node, "hsync-active-high")) + video->h_sync_polarity = true; + if (of_property_read_bool(dp_node, "vsync-active-high")) + video->v_sync_polarity = true; + if (of_property_read_bool(dp_node, "interlaced")) + video->interlaced = true; +} + +static void analogix_dp_bridge_nop(struct drm_bridge *bridge) +{ + /* do nothing */ +} + +static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { + .enable = analogix_dp_bridge_enable, + .disable = analogix_dp_bridge_disable, + .pre_enable = analogix_dp_bridge_nop, + .post_disable = analogix_dp_bridge_nop, + .mode_set = analogix_dp_bridge_mode_set, + .attach = analogix_dp_bridge_attach, +}; + +static int analogix_dp_create_bridge(struct drm_device *drm_dev, + struct analogix_dp_device *dp) +{ + struct drm_bridge *bridge; + int ret; + + bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); + if (!bridge) { + DRM_ERROR("failed to allocate for drm bridge\n"); + return -ENOMEM; + } + + dp->bridge = bridge; + + dp->encoder->bridge = bridge; + bridge->driver_private = dp; + bridge->encoder = dp->encoder; + bridge->funcs = &analogix_dp_bridge_funcs; + + ret = drm_bridge_attach(drm_dev, bridge); + if (ret) { + DRM_ERROR("failed to attach drm bridge\n"); + return -EINVAL; + } + + return 0; +} + +static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) +{ + struct device_node *dp_node = dp->dev->of_node; + struct video_info *video_info = &dp->video_info; + + switch (dp->plat_data->dev_type) { + case RK3288_DP: + /* + * Like Rk3288 DisplayPort TRM indicate that "Main link + * containing 4 physical lanes of 2.7/1.62 Gbps/lane". + */ + video_info->max_link_rate = 0x0A; + video_info->max_lane_count = 0x04; + break; + case EXYNOS_DP: + /* + * NOTE: those property parseing code is used for + * providing backward compatibility for samsung platform. + */ + of_property_read_u32(dp_node, "samsung,link-rate", + &video_info->max_link_rate); + of_property_read_u32(dp_node, "samsung,lane-count", + &video_info->max_lane_count); + break; + } + + return 0; +} + +int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, + struct analogix_dp_plat_data *plat_data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct analogix_dp_device *dp; + struct resource *res; + unsigned int irq_flags; + int ret; + + if (!plat_data) { + dev_err(dev, "Invalided input plat_data\n"); + return -EINVAL; + } + + dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + dev_set_drvdata(dev, dp); + + dp->dev = &pdev->dev; + dp->dpms_mode = DRM_MODE_DPMS_OFF; + + /* + * platform dp driver need containor_of the plat_data to get + * the driver private data, so we need to store the point of + * plat_data, not the context of plat_data. + */ + dp->plat_data = plat_data; + + ret = analogix_dp_dt_parse_pdata(dp); + if (ret) + return ret; + + dp->phy = devm_phy_get(dp->dev, "dp"); + if (IS_ERR(dp->phy)) { + dev_err(dp->dev, "no DP phy configured\n"); + ret = PTR_ERR(dp->phy); + if (ret) { + /* + * phy itself is not enabled, so we can move forward + * assigning NULL to phy pointer. + */ + if (ret == -ENOSYS || ret == -ENODEV) + dp->phy = NULL; + else + return ret; + } + } + + dp->clock = devm_clk_get(&pdev->dev, "dp"); + if (IS_ERR(dp->clock)) { + dev_err(&pdev->dev, "failed to get clock\n"); + return PTR_ERR(dp->clock); + } + + clk_prepare_enable(dp->clock); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + dp->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dp->reg_base)) + return PTR_ERR(dp->reg_base); + + dp->force_hpd = of_property_read_bool(dev->of_node, "force-hpd"); + + dp->hpd_gpio = of_get_named_gpio(dev->of_node, "hpd-gpios", 0); + if (!gpio_is_valid(dp->hpd_gpio)) + dp->hpd_gpio = of_get_named_gpio(dev->of_node, + "samsung,hpd-gpio", 0); + + if (gpio_is_valid(dp->hpd_gpio)) { + /* + * Set up the hotplug GPIO from the device tree as an interrupt. + * Simply specifying a different interrupt in the device tree + * doesn't work since we handle hotplug rather differently when + * using a GPIO. We also need the actual GPIO specifier so + * that we can get the current state of the GPIO. + */ + ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN, + "hpd_gpio"); + if (ret) { + dev_err(&pdev->dev, "failed to get hpd gpio\n"); + return ret; + } + dp->irq = gpio_to_irq(dp->hpd_gpio); + irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; + } else { + dp->hpd_gpio = -ENODEV; + dp->irq = platform_get_irq(pdev, 0); + irq_flags = 0; + } + + if (dp->irq == -ENXIO) { + dev_err(&pdev->dev, "failed to get irq\n"); + return -ENODEV; + } + + pm_runtime_enable(dev); + + phy_power_on(dp->phy); + + if (dp->plat_data->panel) { + if (drm_panel_prepare(dp->plat_data->panel)) { + DRM_ERROR("failed to setup the panel\n"); + return -EBUSY; + } + } + + analogix_dp_init_dp(dp); + + ret = devm_request_threaded_irq(&pdev->dev, dp->irq, + analogix_dp_hardirq, + analogix_dp_irq_thread, + irq_flags, "analogix-dp", dp); + if (ret) { + dev_err(&pdev->dev, "failed to request irq\n"); + goto err_disable_pm_runtime; + } + disable_irq(dp->irq); + + dp->drm_dev = drm_dev; + dp->encoder = dp->plat_data->encoder; + + ret = analogix_dp_create_bridge(drm_dev, dp); + if (ret) { + DRM_ERROR("failed to create bridge (%d)\n", ret); + drm_encoder_cleanup(dp->encoder); + goto err_disable_pm_runtime; + } + + return 0; + +err_disable_pm_runtime: + pm_runtime_disable(dev); + + return ret; +} +EXPORT_SYMBOL_GPL(analogix_dp_bind); + +void analogix_dp_unbind(struct device *dev, struct device *master, + void *data) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + + analogix_dp_bridge_disable(dp->bridge); + + if (dp->plat_data->panel) { + if (drm_panel_unprepare(dp->plat_data->panel)) + DRM_ERROR("failed to turnoff the panel\n"); + } + + pm_runtime_disable(dev); +} +EXPORT_SYMBOL_GPL(analogix_dp_unbind); + +#ifdef CONFIG_PM +int analogix_dp_suspend(struct device *dev) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + + clk_disable_unprepare(dp->clock); + + if (dp->plat_data->panel) { + if (drm_panel_unprepare(dp->plat_data->panel)) + DRM_ERROR("failed to turnoff the panel\n"); + } + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_suspend); + +int analogix_dp_resume(struct device *dev) +{ + struct analogix_dp_device *dp = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(dp->clock); + if (ret < 0) { + DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); + return ret; + } + + if (dp->plat_data->panel) { + if (drm_panel_prepare(dp->plat_data->panel)) { + DRM_ERROR("failed to setup the panel\n"); + return -EBUSY; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(analogix_dp_resume); +#endif + +MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); +MODULE_DESCRIPTION("Analogix DP Core Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h new file mode 100644 index 000000000000..f09275d40f70 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -0,0 +1,281 @@ +/* + * Header file for Analogix DP (Display Port) core interface driver. + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _ANALOGIX_DP_CORE_H +#define _ANALOGIX_DP_CORE_H + +#include <drm/drm_crtc.h> +#include <drm/drm_dp_helper.h> + +#define DP_TIMEOUT_LOOP_COUNT 100 +#define MAX_CR_LOOP 5 +#define MAX_EQ_LOOP 5 + +/* I2C EDID Chip ID, Slave Address */ +#define I2C_EDID_DEVICE_ADDR 0x50 +#define I2C_E_EDID_DEVICE_ADDR 0x30 + +#define EDID_BLOCK_LENGTH 0x80 +#define EDID_HEADER_PATTERN 0x00 +#define EDID_EXTENSION_FLAG 0x7e +#define EDID_CHECKSUM 0x7f + +/* DP_MAX_LANE_COUNT */ +#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1) +#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f) + +/* DP_LANE_COUNT_SET */ +#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f) + +/* DP_TRAINING_LANE0_SET */ +#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3) +#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3) +#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0) +#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3) + +enum link_lane_count_type { + LANE_COUNT1 = 1, + LANE_COUNT2 = 2, + LANE_COUNT4 = 4 +}; + +enum link_training_state { + START, + CLOCK_RECOVERY, + EQUALIZER_TRAINING, + FINISHED, + FAILED +}; + +enum voltage_swing_level { + VOLTAGE_LEVEL_0, + VOLTAGE_LEVEL_1, + VOLTAGE_LEVEL_2, + VOLTAGE_LEVEL_3, +}; + +enum pre_emphasis_level { + PRE_EMPHASIS_LEVEL_0, + PRE_EMPHASIS_LEVEL_1, + PRE_EMPHASIS_LEVEL_2, + PRE_EMPHASIS_LEVEL_3, +}; + +enum pattern_set { + PRBS7, + D10_2, + TRAINING_PTN1, + TRAINING_PTN2, + DP_NONE +}; + +enum color_space { + COLOR_RGB, + COLOR_YCBCR422, + COLOR_YCBCR444 +}; + +enum color_depth { + COLOR_6, + COLOR_8, + COLOR_10, + COLOR_12 +}; + +enum color_coefficient { + COLOR_YCBCR601, + COLOR_YCBCR709 +}; + +enum dynamic_range { + VESA, + CEA +}; + +enum pll_status { + PLL_UNLOCKED, + PLL_LOCKED +}; + +enum clock_recovery_m_value_type { + CALCULATED_M, + REGISTER_M +}; + +enum video_timing_recognition_type { + VIDEO_TIMING_FROM_CAPTURE, + VIDEO_TIMING_FROM_REGISTER +}; + +enum analog_power_block { + AUX_BLOCK, + CH0_BLOCK, + CH1_BLOCK, + CH2_BLOCK, + CH3_BLOCK, + ANALOG_TOTAL, + POWER_ALL +}; + +enum dp_irq_type { + DP_IRQ_TYPE_HP_CABLE_IN, + DP_IRQ_TYPE_HP_CABLE_OUT, + DP_IRQ_TYPE_HP_CHANGE, + DP_IRQ_TYPE_UNKNOWN, +}; + +struct video_info { + char *name; + + bool h_sync_polarity; + bool v_sync_polarity; + bool interlaced; + + enum color_space color_space; + enum dynamic_range dynamic_range; + enum color_coefficient ycbcr_coeff; + enum color_depth color_depth; + + int max_link_rate; + enum link_lane_count_type max_lane_count; +}; + +struct link_train { + int eq_loop; + int cr_loop[4]; + + u8 link_rate; + u8 lane_count; + u8 training_lane[4]; + + enum link_training_state lt_state; +}; + +struct analogix_dp_device { + struct drm_encoder *encoder; + struct device *dev; + struct drm_device *drm_dev; + struct drm_connector connector; + struct drm_bridge *bridge; + struct clk *clock; + unsigned int irq; + void __iomem *reg_base; + + struct video_info video_info; + struct link_train link_train; + struct phy *phy; + int dpms_mode; + int hpd_gpio; + bool force_hpd; + unsigned char edid[EDID_BLOCK_LENGTH * 2]; + + struct analogix_dp_plat_data *plat_data; +}; + +/* analogix_dp_reg.c */ +void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable); +void analogix_dp_stop_video(struct analogix_dp_device *dp); +void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable); +void analogix_dp_init_analog_param(struct analogix_dp_device *dp); +void analogix_dp_init_interrupt(struct analogix_dp_device *dp); +void analogix_dp_reset(struct analogix_dp_device *dp); +void analogix_dp_swreset(struct analogix_dp_device *dp); +void analogix_dp_config_interrupt(struct analogix_dp_device *dp); +void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp); +void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp); +enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp); +void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable); +void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, + enum analog_power_block block, + bool enable); +void analogix_dp_init_analog_func(struct analogix_dp_device *dp); +void analogix_dp_init_hpd(struct analogix_dp_device *dp); +void analogix_dp_force_hpd(struct analogix_dp_device *dp); +enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp); +void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp); +void analogix_dp_reset_aux(struct analogix_dp_device *dp); +void analogix_dp_init_aux(struct analogix_dp_device *dp); +int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp); +void analogix_dp_enable_sw_function(struct analogix_dp_device *dp); +int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp); +int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned char data); +int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned char *data); +int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned int count, + unsigned char data[]); +int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned int count, + unsigned char data[]); +int analogix_dp_select_i2c_device(struct analogix_dp_device *dp, + unsigned int device_addr, + unsigned int reg_addr); +int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp, + unsigned int device_addr, + unsigned int reg_addr, + unsigned int *data); +int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp, + unsigned int device_addr, + unsigned int reg_addr, + unsigned int count, + unsigned char edid[]); +void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype); +void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype); +void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count); +void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count); +void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, + bool enable); +void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, + enum pattern_set pattern); +void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, + u32 level); +void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, + u32 level); +void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, + u32 level); +void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, + u32 level); +void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, + u32 training_lane); +void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, + u32 training_lane); +void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, + u32 training_lane); +void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, + u32 training_lane); +u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp); +u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp); +u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp); +u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp); +void analogix_dp_reset_macro(struct analogix_dp_device *dp); +void analogix_dp_init_video(struct analogix_dp_device *dp); + +void analogix_dp_set_video_color_format(struct analogix_dp_device *dp); +int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp); +void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp, + enum clock_recovery_m_value_type type, + u32 m_value, + u32 n_value); +void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type); +void analogix_dp_enable_video_master(struct analogix_dp_device *dp, + bool enable); +void analogix_dp_start_video(struct analogix_dp_device *dp); +int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp); +void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp); +void analogix_dp_enable_scrambling(struct analogix_dp_device *dp); +void analogix_dp_disable_scrambling(struct analogix_dp_device *dp); +#endif /* _ANALOGIX_DP_CORE_H */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c new file mode 100644 index 000000000000..49205ef02be3 --- /dev/null +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c @@ -0,0 +1,1320 @@ +/* + * Analogix DP (Display port) core register interface driver. + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/device.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/gpio.h> + +#include <drm/bridge/analogix_dp.h> + +#include "analogix_dp_core.h" +#include "analogix_dp_reg.h" + +#define COMMON_INT_MASK_1 0 +#define COMMON_INT_MASK_2 0 +#define COMMON_INT_MASK_3 0 +#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) +#define INT_STA_MASK INT_HPD + +void analogix_dp_enable_video_mute(struct analogix_dp_device *dp, bool enable) +{ + u32 reg; + + if (enable) { + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg |= HDCP_VIDEO_MUTE; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg &= ~HDCP_VIDEO_MUTE; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + } +} + +void analogix_dp_stop_video(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg &= ~VIDEO_EN; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); +} + +void analogix_dp_lane_swap(struct analogix_dp_device *dp, bool enable) +{ + u32 reg; + + if (enable) + reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | + LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; + else + reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | + LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; + + writel(reg, dp->reg_base + ANALOGIX_DP_LANE_MAP); +} + +void analogix_dp_init_analog_param(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = TX_TERMINAL_CTRL_50_OHM; + writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_1); + + reg = SEL_24M | TX_DVDD_BIT_1_0625V; + writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2); + + if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) { + writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1); + writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2); + writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3); + writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4); + writel(0x22, dp->reg_base + ANALOGIX_DP_PLL_REG_5); + } + + reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; + writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_3); + + reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | + TX_CUR1_2X | TX_CUR_16_MA; + writel(reg, dp->reg_base + ANALOGIX_DP_PLL_FILTER_CTL_1); + + reg = CH3_AMP_400_MV | CH2_AMP_400_MV | + CH1_AMP_400_MV | CH0_AMP_400_MV; + writel(reg, dp->reg_base + ANALOGIX_DP_TX_AMP_TUNING_CTL); +} + +void analogix_dp_init_interrupt(struct analogix_dp_device *dp) +{ + /* Set interrupt pin assertion polarity as high */ + writel(INT_POL1 | INT_POL0, dp->reg_base + ANALOGIX_DP_INT_CTL); + + /* Clear pending regisers */ + writel(0xff, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + writel(0x4f, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_2); + writel(0xe0, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_3); + writel(0xe7, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); + writel(0x63, dp->reg_base + ANALOGIX_DP_INT_STA); + + /* 0:mask,1: unmask */ + writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); + writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); + writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); + writel(0x00, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + writel(0x00, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); +} + +void analogix_dp_reset(struct analogix_dp_device *dp) +{ + u32 reg; + + analogix_dp_stop_video(dp); + analogix_dp_enable_video_mute(dp, 0); + + reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | + AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | + HDCP_FUNC_EN_N | SW_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + + reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | + SERDES_FIFO_FUNC_EN_N | + LS_CLK_DOMAIN_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + + usleep_range(20, 30); + + analogix_dp_lane_swap(dp, 0); + + writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + writel(0x40, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + writel(0x0, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + + writel(0x0, dp->reg_base + ANALOGIX_DP_PKT_SEND_CTL); + writel(0x0, dp->reg_base + ANALOGIX_DP_HDCP_CTL); + + writel(0x5e, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_L); + writel(0x1a, dp->reg_base + ANALOGIX_DP_HPD_DEGLITCH_H); + + writel(0x10, dp->reg_base + ANALOGIX_DP_LINK_DEBUG_CTL); + + writel(0x0, dp->reg_base + ANALOGIX_DP_PHY_TEST); + + writel(0x0, dp->reg_base + ANALOGIX_DP_VIDEO_FIFO_THRD); + writel(0x20, dp->reg_base + ANALOGIX_DP_AUDIO_MARGIN); + + writel(0x4, dp->reg_base + ANALOGIX_DP_M_VID_GEN_FILTER_TH); + writel(0x2, dp->reg_base + ANALOGIX_DP_M_AUD_GEN_FILTER_TH); + + writel(0x00000101, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); +} + +void analogix_dp_swreset(struct analogix_dp_device *dp) +{ + writel(RESET_DP_TX, dp->reg_base + ANALOGIX_DP_TX_SW_RESET); +} + +void analogix_dp_config_interrupt(struct analogix_dp_device *dp) +{ + u32 reg; + + /* 0: mask, 1: unmask */ + reg = COMMON_INT_MASK_1; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_1); + + reg = COMMON_INT_MASK_2; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_2); + + reg = COMMON_INT_MASK_3; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_3); + + reg = COMMON_INT_MASK_4; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + + reg = INT_STA_MASK; + writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); +} + +void analogix_dp_mute_hpd_interrupt(struct analogix_dp_device *dp) +{ + u32 reg; + + /* 0: mask, 1: unmask */ + reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + reg &= ~COMMON_INT_MASK_4; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + + reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA_MASK); + reg &= ~INT_STA_MASK; + writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); +} + +void analogix_dp_unmute_hpd_interrupt(struct analogix_dp_device *dp) +{ + u32 reg; + + /* 0: mask, 1: unmask */ + reg = COMMON_INT_MASK_4; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_MASK_4); + + reg = INT_STA_MASK; + writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA_MASK); +} + +enum pll_status analogix_dp_get_pll_lock_status(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + if (reg & PLL_LOCK) + return PLL_LOCKED; + else + return PLL_UNLOCKED; +} + +void analogix_dp_set_pll_power_down(struct analogix_dp_device *dp, bool enable) +{ + u32 reg; + + if (enable) { + reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL); + reg |= DP_PLL_PD; + writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL); + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_PLL_CTL); + reg &= ~DP_PLL_PD; + writel(reg, dp->reg_base + ANALOGIX_DP_PLL_CTL); + } +} + +void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp, + enum analog_power_block block, + bool enable) +{ + u32 reg; + u32 phy_pd_addr = ANALOGIX_DP_PHY_PD; + + if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) + phy_pd_addr = ANALOGIX_DP_PD; + + switch (block) { + case AUX_BLOCK: + if (enable) { + reg = readl(dp->reg_base + phy_pd_addr); + reg |= AUX_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + reg = readl(dp->reg_base + phy_pd_addr); + reg &= ~AUX_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } + break; + case CH0_BLOCK: + if (enable) { + reg = readl(dp->reg_base + phy_pd_addr); + reg |= CH0_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + reg = readl(dp->reg_base + phy_pd_addr); + reg &= ~CH0_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } + break; + case CH1_BLOCK: + if (enable) { + reg = readl(dp->reg_base + phy_pd_addr); + reg |= CH1_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + reg = readl(dp->reg_base + phy_pd_addr); + reg &= ~CH1_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } + break; + case CH2_BLOCK: + if (enable) { + reg = readl(dp->reg_base + phy_pd_addr); + reg |= CH2_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + reg = readl(dp->reg_base + phy_pd_addr); + reg &= ~CH2_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } + break; + case CH3_BLOCK: + if (enable) { + reg = readl(dp->reg_base + phy_pd_addr); + reg |= CH3_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + reg = readl(dp->reg_base + phy_pd_addr); + reg &= ~CH3_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } + break; + case ANALOG_TOTAL: + if (enable) { + reg = readl(dp->reg_base + phy_pd_addr); + reg |= DP_PHY_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + reg = readl(dp->reg_base + phy_pd_addr); + reg &= ~DP_PHY_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } + break; + case POWER_ALL: + if (enable) { + reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD | + CH1_PD | CH0_PD; + writel(reg, dp->reg_base + phy_pd_addr); + } else { + writel(0x00, dp->reg_base + phy_pd_addr); + } + break; + default: + break; + } +} + +void analogix_dp_init_analog_func(struct analogix_dp_device *dp) +{ + u32 reg; + int timeout_loop = 0; + + analogix_dp_set_analog_power_down(dp, POWER_ALL, 0); + + reg = PLL_LOCK_CHG; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + + reg = readl(dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); + writel(reg, dp->reg_base + ANALOGIX_DP_DEBUG_CTL); + + /* Power up PLL */ + if (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { + analogix_dp_set_pll_power_down(dp, 0); + + while (analogix_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { + timeout_loop++; + if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { + dev_err(dp->dev, "failed to get pll lock status\n"); + return; + } + usleep_range(10, 20); + } + } + + /* Enable Serdes FIFO function and Link symbol clock domain module */ + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N + | AUX_FUNC_EN_N); + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); +} + +void analogix_dp_clear_hotplug_interrupts(struct analogix_dp_device *dp) +{ + u32 reg; + + if (gpio_is_valid(dp->hpd_gpio)) + return; + + reg = HOTPLUG_CHG | HPD_LOST | PLUG; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); + + reg = INT_HPD; + writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); +} + +void analogix_dp_init_hpd(struct analogix_dp_device *dp) +{ + u32 reg; + + if (gpio_is_valid(dp->hpd_gpio)) + return; + + analogix_dp_clear_hotplug_interrupts(dp); + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg &= ~(F_HPD | HPD_CTRL); + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); +} + +void analogix_dp_force_hpd(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + reg = (F_HPD | HPD_CTRL); + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); +} + +enum dp_irq_type analogix_dp_get_irq_type(struct analogix_dp_device *dp) +{ + u32 reg; + + if (gpio_is_valid(dp->hpd_gpio)) { + reg = gpio_get_value(dp->hpd_gpio); + if (reg) + return DP_IRQ_TYPE_HP_CABLE_IN; + else + return DP_IRQ_TYPE_HP_CABLE_OUT; + } else { + /* Parse hotplug interrupt status register */ + reg = readl(dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_4); + + if (reg & PLUG) + return DP_IRQ_TYPE_HP_CABLE_IN; + + if (reg & HPD_LOST) + return DP_IRQ_TYPE_HP_CABLE_OUT; + + if (reg & HOTPLUG_CHG) + return DP_IRQ_TYPE_HP_CHANGE; + + return DP_IRQ_TYPE_UNKNOWN; + } +} + +void analogix_dp_reset_aux(struct analogix_dp_device *dp) +{ + u32 reg; + + /* Disable AUX channel module */ + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg |= AUX_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); +} + +void analogix_dp_init_aux(struct analogix_dp_device *dp) +{ + u32 reg; + + /* Clear inerrupts related to AUX channel */ + reg = RPLY_RECEIV | AUX_ERR; + writel(reg, dp->reg_base + ANALOGIX_DP_INT_STA); + + analogix_dp_reset_aux(dp); + + /* Disable AUX transaction H/W retry */ + if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) + reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) | + AUX_HW_RETRY_COUNT_SEL(3) | + AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; + else + reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | + AUX_HW_RETRY_COUNT_SEL(0) | + AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_HW_RETRY_CTL); + + /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ + reg = DEFER_CTRL_EN | DEFER_COUNT(1); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_DEFER_CTL); + + /* Enable AUX channel module */ + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_2); + reg &= ~AUX_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_2); +} + +int analogix_dp_get_plug_in_status(struct analogix_dp_device *dp) +{ + u32 reg; + + if (gpio_is_valid(dp->hpd_gpio)) { + if (gpio_get_value(dp->hpd_gpio)) + return 0; + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + if (reg & HPD_STATUS) + return 0; + } + + return -EINVAL; +} + +void analogix_dp_enable_sw_function(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + reg &= ~SW_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); +} + +int analogix_dp_start_aux_transaction(struct analogix_dp_device *dp) +{ + int reg; + int retval = 0; + int timeout_loop = 0; + + /* Enable AUX CH operation */ + reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); + reg |= AUX_EN; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); + + /* Is AUX CH command reply received? */ + reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); + while (!(reg & RPLY_RECEIV)) { + timeout_loop++; + if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { + dev_err(dp->dev, "AUX CH command reply failed!\n"); + return -ETIMEDOUT; + } + reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); + usleep_range(10, 11); + } + + /* Clear interrupt source for AUX CH command reply */ + writel(RPLY_RECEIV, dp->reg_base + ANALOGIX_DP_INT_STA); + + /* Clear interrupt source for AUX CH access error */ + reg = readl(dp->reg_base + ANALOGIX_DP_INT_STA); + if (reg & AUX_ERR) { + writel(AUX_ERR, dp->reg_base + ANALOGIX_DP_INT_STA); + return -EREMOTEIO; + } + + /* Check AUX CH error access status */ + reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_STA); + if ((reg & AUX_STATUS_MASK) != 0) { + dev_err(dp->dev, "AUX CH error happens: %d\n\n", + reg & AUX_STATUS_MASK); + return -EREMOTEIO; + } + + return retval; +} + +int analogix_dp_write_byte_to_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned char data) +{ + u32 reg; + int i; + int retval; + + for (i = 0; i < 3; i++) { + /* Clear AUX CH data buffer */ + reg = BUF_CLR; + writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + + /* Select DPCD device address */ + reg = AUX_ADDR_7_0(reg_addr); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + reg = AUX_ADDR_15_8(reg_addr); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + reg = AUX_ADDR_19_16(reg_addr); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + + /* Write data buffer */ + reg = (unsigned int)data; + writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0); + + /* + * Set DisplayPort transaction and write 1 byte + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval == 0) + break; + + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); + } + + return retval; +} + +int analogix_dp_read_byte_from_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned char *data) +{ + u32 reg; + int i; + int retval; + + for (i = 0; i < 3; i++) { + /* Clear AUX CH data buffer */ + reg = BUF_CLR; + writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + + /* Select DPCD device address */ + reg = AUX_ADDR_7_0(reg_addr); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + reg = AUX_ADDR_15_8(reg_addr); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + reg = AUX_ADDR_19_16(reg_addr); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + + /* + * Set DisplayPort transaction and read 1 byte + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval == 0) + break; + + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); + } + + /* Read data buffer */ + reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0); + *data = (unsigned char)(reg & 0xff); + + return retval; +} + +int analogix_dp_write_bytes_to_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned int count, + unsigned char data[]) +{ + u32 reg; + unsigned int start_offset; + unsigned int cur_data_count; + unsigned int cur_data_idx; + int i; + int retval = 0; + + /* Clear AUX CH data buffer */ + reg = BUF_CLR; + writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + + start_offset = 0; + while (start_offset < count) { + /* Buffer size of AUX CH is 16 * 4bytes */ + if ((count - start_offset) > 16) + cur_data_count = 16; + else + cur_data_count = count - start_offset; + + for (i = 0; i < 3; i++) { + /* Select DPCD device address */ + reg = AUX_ADDR_7_0(reg_addr + start_offset); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + reg = AUX_ADDR_15_8(reg_addr + start_offset); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + reg = AUX_ADDR_19_16(reg_addr + start_offset); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + + for (cur_data_idx = 0; cur_data_idx < cur_data_count; + cur_data_idx++) { + reg = data[start_offset + cur_data_idx]; + writel(reg, dp->reg_base + + ANALOGIX_DP_BUF_DATA_0 + + 4 * cur_data_idx); + } + + /* + * Set DisplayPort transaction and write + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_LENGTH(cur_data_count) | + AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval == 0) + break; + + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", + __func__); + } + + start_offset += cur_data_count; + } + + return retval; +} + +int analogix_dp_read_bytes_from_dpcd(struct analogix_dp_device *dp, + unsigned int reg_addr, + unsigned int count, + unsigned char data[]) +{ + u32 reg; + unsigned int start_offset; + unsigned int cur_data_count; + unsigned int cur_data_idx; + int i; + int retval = 0; + + /* Clear AUX CH data buffer */ + reg = BUF_CLR; + writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + + start_offset = 0; + while (start_offset < count) { + /* Buffer size of AUX CH is 16 * 4bytes */ + if ((count - start_offset) > 16) + cur_data_count = 16; + else + cur_data_count = count - start_offset; + + /* AUX CH Request Transaction process */ + for (i = 0; i < 3; i++) { + /* Select DPCD device address */ + reg = AUX_ADDR_7_0(reg_addr + start_offset); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + reg = AUX_ADDR_15_8(reg_addr + start_offset); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + reg = AUX_ADDR_19_16(reg_addr + start_offset); + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + + /* + * Set DisplayPort transaction and read + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_LENGTH(cur_data_count) | + AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval == 0) + break; + + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", + __func__); + } + + for (cur_data_idx = 0; cur_data_idx < cur_data_count; + cur_data_idx++) { + reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + + 4 * cur_data_idx); + data[start_offset + cur_data_idx] = + (unsigned char)reg; + } + + start_offset += cur_data_count; + } + + return retval; +} + +int analogix_dp_select_i2c_device(struct analogix_dp_device *dp, + unsigned int device_addr, + unsigned int reg_addr) +{ + u32 reg; + int retval; + + /* Set EDID device address */ + reg = device_addr; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_ADDR_7_0); + writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_15_8); + writel(0x0, dp->reg_base + ANALOGIX_DP_AUX_ADDR_19_16); + + /* Set offset from base address of EDID device */ + writel(reg_addr, dp->reg_base + ANALOGIX_DP_BUF_DATA_0); + + /* + * Set I2C transaction and write address + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT | + AUX_TX_COMM_WRITE; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval != 0) + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); + + return retval; +} + +int analogix_dp_read_byte_from_i2c(struct analogix_dp_device *dp, + unsigned int device_addr, + unsigned int reg_addr, + unsigned int *data) +{ + u32 reg; + int i; + int retval; + + for (i = 0; i < 3; i++) { + /* Clear AUX CH data buffer */ + reg = BUF_CLR; + writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + + /* Select EDID device */ + retval = analogix_dp_select_i2c_device(dp, device_addr, + reg_addr); + if (retval != 0) + continue; + + /* + * Set I2C transaction and read data + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_TX_COMM_I2C_TRANSACTION | + AUX_TX_COMM_READ; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval == 0) + break; + + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); + } + + /* Read data */ + if (retval == 0) + *data = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0); + + return retval; +} + +int analogix_dp_read_bytes_from_i2c(struct analogix_dp_device *dp, + unsigned int device_addr, + unsigned int reg_addr, + unsigned int count, + unsigned char edid[]) +{ + u32 reg; + unsigned int i, j; + unsigned int cur_data_idx; + unsigned int defer = 0; + int retval = 0; + + for (i = 0; i < count; i += 16) { + for (j = 0; j < 3; j++) { + /* Clear AUX CH data buffer */ + reg = BUF_CLR; + writel(reg, dp->reg_base + ANALOGIX_DP_BUFFER_DATA_CTL); + + /* Set normal AUX CH command */ + reg = readl(dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); + reg &= ~ADDR_ONLY; + writel(reg, dp->reg_base + ANALOGIX_DP_AUX_CH_CTL_2); + + /* + * If Rx sends defer, Tx sends only reads + * request without sending address + */ + if (!defer) + retval = analogix_dp_select_i2c_device(dp, + device_addr, reg_addr + i); + else + defer = 0; + + if (retval == 0) { + /* + * Set I2C transaction and write data + * If bit 3 is 1, DisplayPort transaction. + * If Bit 3 is 0, I2C transaction. + */ + reg = AUX_LENGTH(16) | + AUX_TX_COMM_I2C_TRANSACTION | + AUX_TX_COMM_READ; + writel(reg, dp->reg_base + + ANALOGIX_DP_AUX_CH_CTL_1); + + /* Start AUX transaction */ + retval = analogix_dp_start_aux_transaction(dp); + if (retval == 0) + break; + + dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", + __func__); + } + /* Check if Rx sends defer */ + reg = readl(dp->reg_base + ANALOGIX_DP_AUX_RX_COMM); + if (reg == AUX_RX_COMM_AUX_DEFER || + reg == AUX_RX_COMM_I2C_DEFER) { + dev_err(dp->dev, "Defer: %d\n\n", reg); + defer = 1; + } + } + + for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) { + reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 + + 4 * cur_data_idx); + edid[i + cur_data_idx] = (unsigned char)reg; + } + } + + return retval; +} + +void analogix_dp_set_link_bandwidth(struct analogix_dp_device *dp, u32 bwtype) +{ + u32 reg; + + reg = bwtype; + if ((bwtype == DP_LINK_BW_2_7) || (bwtype == DP_LINK_BW_1_62)) + writel(reg, dp->reg_base + ANALOGIX_DP_LINK_BW_SET); +} + +void analogix_dp_get_link_bandwidth(struct analogix_dp_device *dp, u32 *bwtype) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LINK_BW_SET); + *bwtype = reg; +} + +void analogix_dp_set_lane_count(struct analogix_dp_device *dp, u32 count) +{ + u32 reg; + + reg = count; + writel(reg, dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); +} + +void analogix_dp_get_lane_count(struct analogix_dp_device *dp, u32 *count) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LANE_COUNT_SET); + *count = reg; +} + +void analogix_dp_enable_enhanced_mode(struct analogix_dp_device *dp, + bool enable) +{ + u32 reg; + + if (enable) { + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg |= ENHANCED; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg &= ~ENHANCED; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + } +} + +void analogix_dp_set_training_pattern(struct analogix_dp_device *dp, + enum pattern_set pattern) +{ + u32 reg; + + switch (pattern) { + case PRBS7: + reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + break; + case D10_2: + reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + break; + case TRAINING_PTN1: + reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + break; + case TRAINING_PTN2: + reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + break; + case DP_NONE: + reg = SCRAMBLING_ENABLE | + LINK_QUAL_PATTERN_SET_DISABLE | + SW_TRAINING_PATTERN_SET_NORMAL; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + break; + default: + break; + } +} + +void analogix_dp_set_lane0_pre_emphasis(struct analogix_dp_device *dp, + u32 level) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); + reg &= ~PRE_EMPHASIS_SET_MASK; + reg |= level << PRE_EMPHASIS_SET_SHIFT; + writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane1_pre_emphasis(struct analogix_dp_device *dp, + u32 level) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); + reg &= ~PRE_EMPHASIS_SET_MASK; + reg |= level << PRE_EMPHASIS_SET_SHIFT; + writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane2_pre_emphasis(struct analogix_dp_device *dp, + u32 level) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); + reg &= ~PRE_EMPHASIS_SET_MASK; + reg |= level << PRE_EMPHASIS_SET_SHIFT; + writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane3_pre_emphasis(struct analogix_dp_device *dp, + u32 level) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); + reg &= ~PRE_EMPHASIS_SET_MASK; + reg |= level << PRE_EMPHASIS_SET_SHIFT; + writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane0_link_training(struct analogix_dp_device *dp, + u32 training_lane) +{ + u32 reg; + + reg = training_lane; + writel(reg, dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane1_link_training(struct analogix_dp_device *dp, + u32 training_lane) +{ + u32 reg; + + reg = training_lane; + writel(reg, dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane2_link_training(struct analogix_dp_device *dp, + u32 training_lane) +{ + u32 reg; + + reg = training_lane; + writel(reg, dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); +} + +void analogix_dp_set_lane3_link_training(struct analogix_dp_device *dp, + u32 training_lane) +{ + u32 reg; + + reg = training_lane; + writel(reg, dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); +} + +u32 analogix_dp_get_lane0_link_training(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN0_LINK_TRAINING_CTL); + return reg; +} + +u32 analogix_dp_get_lane1_link_training(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN1_LINK_TRAINING_CTL); + return reg; +} + +u32 analogix_dp_get_lane2_link_training(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN2_LINK_TRAINING_CTL); + return reg; +} + +u32 analogix_dp_get_lane3_link_training(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_LN3_LINK_TRAINING_CTL); + return reg; +} + +void analogix_dp_reset_macro(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_PHY_TEST); + reg |= MACRO_RST; + writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); + + /* 10 us is the minimum reset time. */ + usleep_range(10, 20); + + reg &= ~MACRO_RST; + writel(reg, dp->reg_base + ANALOGIX_DP_PHY_TEST); +} + +void analogix_dp_init_video(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; + writel(reg, dp->reg_base + ANALOGIX_DP_COMMON_INT_STA_1); + + reg = 0x0; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + + reg = CHA_CRI(4) | CHA_CTRL; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + + reg = 0x0; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + + reg = VID_HRES_TH(2) | VID_VRES_TH(0); + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_8); +} + +void analogix_dp_set_video_color_format(struct analogix_dp_device *dp) +{ + u32 reg; + + /* Configure the input color depth, color space, dynamic range */ + reg = (dp->video_info.dynamic_range << IN_D_RANGE_SHIFT) | + (dp->video_info.color_depth << IN_BPC_SHIFT) | + (dp->video_info.color_space << IN_COLOR_F_SHIFT); + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_2); + + /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); + reg &= ~IN_YC_COEFFI_MASK; + if (dp->video_info.ycbcr_coeff) + reg |= IN_YC_COEFFI_ITU709; + else + reg |= IN_YC_COEFFI_ITU601; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_3); +} + +int analogix_dp_is_slave_video_stream_clock_on(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_1); + + if (!(reg & DET_STA)) { + dev_dbg(dp->dev, "Input stream clock not detected.\n"); + return -EINVAL; + } + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_2); + dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); + + if (reg & CHA_STA) { + dev_dbg(dp->dev, "Input stream clk is changing\n"); + return -EINVAL; + } + + return 0; +} + +void analogix_dp_set_video_cr_mn(struct analogix_dp_device *dp, + enum clock_recovery_m_value_type type, + u32 m_value, u32 n_value) +{ + u32 reg; + + if (type == REGISTER_M) { + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg |= FIX_M_VID; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg = m_value & 0xff; + writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_0); + reg = (m_value >> 8) & 0xff; + writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_1); + reg = (m_value >> 16) & 0xff; + writel(reg, dp->reg_base + ANALOGIX_DP_M_VID_2); + + reg = n_value & 0xff; + writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_0); + reg = (n_value >> 8) & 0xff; + writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_1); + reg = (n_value >> 16) & 0xff; + writel(reg, dp->reg_base + ANALOGIX_DP_N_VID_2); + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + reg &= ~FIX_M_VID; + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_4); + + writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_0); + writel(0x80, dp->reg_base + ANALOGIX_DP_N_VID_1); + writel(0x00, dp->reg_base + ANALOGIX_DP_N_VID_2); + } +} + +void analogix_dp_set_video_timing_mode(struct analogix_dp_device *dp, u32 type) +{ + u32 reg; + + if (type == VIDEO_TIMING_FROM_CAPTURE) { + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg &= ~FORMAT_SEL; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg |= FORMAT_SEL; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + } +} + +void analogix_dp_enable_video_master(struct analogix_dp_device *dp, bool enable) +{ + u32 reg; + + if (enable) { + reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + reg &= ~VIDEO_MODE_MASK; + reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; + writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + } else { + reg = readl(dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + reg &= ~VIDEO_MODE_MASK; + reg |= VIDEO_MODE_SLAVE_MODE; + writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); + } +} + +void analogix_dp_start_video(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); + reg |= VIDEO_EN; + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_1); +} + +int analogix_dp_is_video_stream_on(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + writel(reg, dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + + reg = readl(dp->reg_base + ANALOGIX_DP_SYS_CTL_3); + if (!(reg & STRM_VALID)) { + dev_dbg(dp->dev, "Input video stream is not detected.\n"); + return -EINVAL; + } + + return 0; +} + +void analogix_dp_config_video_slave_mode(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + reg &= ~(MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N); + reg |= MASTER_VID_FUNC_EN_N; + writel(reg, dp->reg_base + ANALOGIX_DP_FUNC_EN_1); + + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg &= ~INTERACE_SCAN_CFG; + reg |= (dp->video_info.interlaced << 2); + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg &= ~VSYNC_POLARITY_CFG; + reg |= (dp->video_info.v_sync_polarity << 1); + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + + reg = readl(dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + reg &= ~HSYNC_POLARITY_CFG; + reg |= (dp->video_info.h_sync_polarity << 0); + writel(reg, dp->reg_base + ANALOGIX_DP_VIDEO_CTL_10); + + reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; + writel(reg, dp->reg_base + ANALOGIX_DP_SOC_GENERAL_CTL); +} + +void analogix_dp_enable_scrambling(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + reg &= ~SCRAMBLING_DISABLE; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); +} + +void analogix_dp_disable_scrambling(struct analogix_dp_device *dp) +{ + u32 reg; + + reg = readl(dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); + reg |= SCRAMBLING_DISABLE; + writel(reg, dp->reg_base + ANALOGIX_DP_TRAINING_PTN_SET); +} diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h index 2e9bd0e0b9f2..337912b0aeab 100644 --- a/drivers/gpu/drm/exynos/exynos_dp_reg.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h @@ -1,5 +1,5 @@ /* - * Register definition file for Samsung DP driver + * Register definition file for Analogix DP core driver * * Copyright (C) 2012 Samsung Electronics Co., Ltd. * Author: Jingoo Han <jg1.han@samsung.com> @@ -9,96 +9,104 @@ * published by the Free Software Foundation. */ -#ifndef _EXYNOS_DP_REG_H -#define _EXYNOS_DP_REG_H - -#define EXYNOS_DP_TX_SW_RESET 0x14 -#define EXYNOS_DP_FUNC_EN_1 0x18 -#define EXYNOS_DP_FUNC_EN_2 0x1C -#define EXYNOS_DP_VIDEO_CTL_1 0x20 -#define EXYNOS_DP_VIDEO_CTL_2 0x24 -#define EXYNOS_DP_VIDEO_CTL_3 0x28 - -#define EXYNOS_DP_VIDEO_CTL_8 0x3C -#define EXYNOS_DP_VIDEO_CTL_10 0x44 - -#define EXYNOS_DP_LANE_MAP 0x35C - -#define EXYNOS_DP_ANALOG_CTL_1 0x370 -#define EXYNOS_DP_ANALOG_CTL_2 0x374 -#define EXYNOS_DP_ANALOG_CTL_3 0x378 -#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C -#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380 - -#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390 - -#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4 -#define EXYNOS_DP_COMMON_INT_STA_2 0x3C8 -#define EXYNOS_DP_COMMON_INT_STA_3 0x3CC -#define EXYNOS_DP_COMMON_INT_STA_4 0x3D0 -#define EXYNOS_DP_INT_STA 0x3DC -#define EXYNOS_DP_COMMON_INT_MASK_1 0x3E0 -#define EXYNOS_DP_COMMON_INT_MASK_2 0x3E4 -#define EXYNOS_DP_COMMON_INT_MASK_3 0x3E8 -#define EXYNOS_DP_COMMON_INT_MASK_4 0x3EC -#define EXYNOS_DP_INT_STA_MASK 0x3F8 -#define EXYNOS_DP_INT_CTL 0x3FC - -#define EXYNOS_DP_SYS_CTL_1 0x600 -#define EXYNOS_DP_SYS_CTL_2 0x604 -#define EXYNOS_DP_SYS_CTL_3 0x608 -#define EXYNOS_DP_SYS_CTL_4 0x60C - -#define EXYNOS_DP_PKT_SEND_CTL 0x640 -#define EXYNOS_DP_HDCP_CTL 0x648 - -#define EXYNOS_DP_LINK_BW_SET 0x680 -#define EXYNOS_DP_LANE_COUNT_SET 0x684 -#define EXYNOS_DP_TRAINING_PTN_SET 0x688 -#define EXYNOS_DP_LN0_LINK_TRAINING_CTL 0x68C -#define EXYNOS_DP_LN1_LINK_TRAINING_CTL 0x690 -#define EXYNOS_DP_LN2_LINK_TRAINING_CTL 0x694 -#define EXYNOS_DP_LN3_LINK_TRAINING_CTL 0x698 - -#define EXYNOS_DP_DEBUG_CTL 0x6C0 -#define EXYNOS_DP_HPD_DEGLITCH_L 0x6C4 -#define EXYNOS_DP_HPD_DEGLITCH_H 0x6C8 -#define EXYNOS_DP_LINK_DEBUG_CTL 0x6E0 - -#define EXYNOS_DP_M_VID_0 0x700 -#define EXYNOS_DP_M_VID_1 0x704 -#define EXYNOS_DP_M_VID_2 0x708 -#define EXYNOS_DP_N_VID_0 0x70C -#define EXYNOS_DP_N_VID_1 0x710 -#define EXYNOS_DP_N_VID_2 0x714 - -#define EXYNOS_DP_PLL_CTL 0x71C -#define EXYNOS_DP_PHY_PD 0x720 -#define EXYNOS_DP_PHY_TEST 0x724 - -#define EXYNOS_DP_VIDEO_FIFO_THRD 0x730 -#define EXYNOS_DP_AUDIO_MARGIN 0x73C - -#define EXYNOS_DP_M_VID_GEN_FILTER_TH 0x764 -#define EXYNOS_DP_M_AUD_GEN_FILTER_TH 0x778 -#define EXYNOS_DP_AUX_CH_STA 0x780 -#define EXYNOS_DP_AUX_CH_DEFER_CTL 0x788 -#define EXYNOS_DP_AUX_RX_COMM 0x78C -#define EXYNOS_DP_BUFFER_DATA_CTL 0x790 -#define EXYNOS_DP_AUX_CH_CTL_1 0x794 -#define EXYNOS_DP_AUX_ADDR_7_0 0x798 -#define EXYNOS_DP_AUX_ADDR_15_8 0x79C -#define EXYNOS_DP_AUX_ADDR_19_16 0x7A0 -#define EXYNOS_DP_AUX_CH_CTL_2 0x7A4 - -#define EXYNOS_DP_BUF_DATA_0 0x7C0 - -#define EXYNOS_DP_SOC_GENERAL_CTL 0x800 - -/* EXYNOS_DP_TX_SW_RESET */ +#ifndef _ANALOGIX_DP_REG_H +#define _ANALOGIX_DP_REG_H + +#define ANALOGIX_DP_TX_SW_RESET 0x14 +#define ANALOGIX_DP_FUNC_EN_1 0x18 +#define ANALOGIX_DP_FUNC_EN_2 0x1C +#define ANALOGIX_DP_VIDEO_CTL_1 0x20 +#define ANALOGIX_DP_VIDEO_CTL_2 0x24 +#define ANALOGIX_DP_VIDEO_CTL_3 0x28 + +#define ANALOGIX_DP_VIDEO_CTL_8 0x3C +#define ANALOGIX_DP_VIDEO_CTL_10 0x44 + +#define ANALOGIX_DP_PLL_REG_1 0xfc +#define ANALOGIX_DP_PLL_REG_2 0x9e4 +#define ANALOGIX_DP_PLL_REG_3 0x9e8 +#define ANALOGIX_DP_PLL_REG_4 0x9ec +#define ANALOGIX_DP_PLL_REG_5 0xa00 + +#define ANALOGIX_DP_PD 0x12c + +#define ANALOGIX_DP_LANE_MAP 0x35C + +#define ANALOGIX_DP_ANALOG_CTL_1 0x370 +#define ANALOGIX_DP_ANALOG_CTL_2 0x374 +#define ANALOGIX_DP_ANALOG_CTL_3 0x378 +#define ANALOGIX_DP_PLL_FILTER_CTL_1 0x37C +#define ANALOGIX_DP_TX_AMP_TUNING_CTL 0x380 + +#define ANALOGIX_DP_AUX_HW_RETRY_CTL 0x390 + +#define ANALOGIX_DP_COMMON_INT_STA_1 0x3C4 +#define ANALOGIX_DP_COMMON_INT_STA_2 0x3C8 +#define ANALOGIX_DP_COMMON_INT_STA_3 0x3CC +#define ANALOGIX_DP_COMMON_INT_STA_4 0x3D0 +#define ANALOGIX_DP_INT_STA 0x3DC +#define ANALOGIX_DP_COMMON_INT_MASK_1 0x3E0 +#define ANALOGIX_DP_COMMON_INT_MASK_2 0x3E4 +#define ANALOGIX_DP_COMMON_INT_MASK_3 0x3E8 +#define ANALOGIX_DP_COMMON_INT_MASK_4 0x3EC +#define ANALOGIX_DP_INT_STA_MASK 0x3F8 +#define ANALOGIX_DP_INT_CTL 0x3FC + +#define ANALOGIX_DP_SYS_CTL_1 0x600 +#define ANALOGIX_DP_SYS_CTL_2 0x604 +#define ANALOGIX_DP_SYS_CTL_3 0x608 +#define ANALOGIX_DP_SYS_CTL_4 0x60C + +#define ANALOGIX_DP_PKT_SEND_CTL 0x640 +#define ANALOGIX_DP_HDCP_CTL 0x648 + +#define ANALOGIX_DP_LINK_BW_SET 0x680 +#define ANALOGIX_DP_LANE_COUNT_SET 0x684 +#define ANALOGIX_DP_TRAINING_PTN_SET 0x688 +#define ANALOGIX_DP_LN0_LINK_TRAINING_CTL 0x68C +#define ANALOGIX_DP_LN1_LINK_TRAINING_CTL 0x690 +#define ANALOGIX_DP_LN2_LINK_TRAINING_CTL 0x694 +#define ANALOGIX_DP_LN3_LINK_TRAINING_CTL 0x698 + +#define ANALOGIX_DP_DEBUG_CTL 0x6C0 +#define ANALOGIX_DP_HPD_DEGLITCH_L 0x6C4 +#define ANALOGIX_DP_HPD_DEGLITCH_H 0x6C8 +#define ANALOGIX_DP_LINK_DEBUG_CTL 0x6E0 + +#define ANALOGIX_DP_M_VID_0 0x700 +#define ANALOGIX_DP_M_VID_1 0x704 +#define ANALOGIX_DP_M_VID_2 0x708 +#define ANALOGIX_DP_N_VID_0 0x70C +#define ANALOGIX_DP_N_VID_1 0x710 +#define ANALOGIX_DP_N_VID_2 0x714 + +#define ANALOGIX_DP_PLL_CTL 0x71C +#define ANALOGIX_DP_PHY_PD 0x720 +#define ANALOGIX_DP_PHY_TEST 0x724 + +#define ANALOGIX_DP_VIDEO_FIFO_THRD 0x730 +#define ANALOGIX_DP_AUDIO_MARGIN 0x73C + +#define ANALOGIX_DP_M_VID_GEN_FILTER_TH 0x764 +#define ANALOGIX_DP_M_AUD_GEN_FILTER_TH 0x778 +#define ANALOGIX_DP_AUX_CH_STA 0x780 +#define ANALOGIX_DP_AUX_CH_DEFER_CTL 0x788 +#define ANALOGIX_DP_AUX_RX_COMM 0x78C +#define ANALOGIX_DP_BUFFER_DATA_CTL 0x790 +#define ANALOGIX_DP_AUX_CH_CTL_1 0x794 +#define ANALOGIX_DP_AUX_ADDR_7_0 0x798 +#define ANALOGIX_DP_AUX_ADDR_15_8 0x79C +#define ANALOGIX_DP_AUX_ADDR_19_16 0x7A0 +#define ANALOGIX_DP_AUX_CH_CTL_2 0x7A4 + +#define ANALOGIX_DP_BUF_DATA_0 0x7C0 + +#define ANALOGIX_DP_SOC_GENERAL_CTL 0x800 + +/* ANALOGIX_DP_TX_SW_RESET */ #define RESET_DP_TX (0x1 << 0) -/* EXYNOS_DP_FUNC_EN_1 */ +/* ANALOGIX_DP_FUNC_EN_1 */ #define MASTER_VID_FUNC_EN_N (0x1 << 7) #define SLAVE_VID_FUNC_EN_N (0x1 << 5) #define AUD_FIFO_FUNC_EN_N (0x1 << 4) @@ -107,17 +115,17 @@ #define CRC_FUNC_EN_N (0x1 << 1) #define SW_FUNC_EN_N (0x1 << 0) -/* EXYNOS_DP_FUNC_EN_2 */ +/* ANALOGIX_DP_FUNC_EN_2 */ #define SSC_FUNC_EN_N (0x1 << 7) #define AUX_FUNC_EN_N (0x1 << 2) #define SERDES_FIFO_FUNC_EN_N (0x1 << 1) #define LS_CLK_DOMAIN_FUNC_EN_N (0x1 << 0) -/* EXYNOS_DP_VIDEO_CTL_1 */ +/* ANALOGIX_DP_VIDEO_CTL_1 */ #define VIDEO_EN (0x1 << 7) #define HDCP_VIDEO_MUTE (0x1 << 6) -/* EXYNOS_DP_VIDEO_CTL_1 */ +/* ANALOGIX_DP_VIDEO_CTL_1 */ #define IN_D_RANGE_MASK (0x1 << 7) #define IN_D_RANGE_SHIFT (7) #define IN_D_RANGE_CEA (0x1 << 7) @@ -134,7 +142,7 @@ #define IN_COLOR_F_YCBCR422 (0x1 << 0) #define IN_COLOR_F_RGB (0x0 << 0) -/* EXYNOS_DP_VIDEO_CTL_3 */ +/* ANALOGIX_DP_VIDEO_CTL_3 */ #define IN_YC_COEFFI_MASK (0x1 << 7) #define IN_YC_COEFFI_SHIFT (7) #define IN_YC_COEFFI_ITU709 (0x1 << 7) @@ -144,17 +152,21 @@ #define VID_CHK_UPDATE_TYPE_1 (0x1 << 4) #define VID_CHK_UPDATE_TYPE_0 (0x0 << 4) -/* EXYNOS_DP_VIDEO_CTL_8 */ +/* ANALOGIX_DP_VIDEO_CTL_8 */ #define VID_HRES_TH(x) (((x) & 0xf) << 4) #define VID_VRES_TH(x) (((x) & 0xf) << 0) -/* EXYNOS_DP_VIDEO_CTL_10 */ +/* ANALOGIX_DP_VIDEO_CTL_10 */ #define FORMAT_SEL (0x1 << 4) #define INTERACE_SCAN_CFG (0x1 << 2) #define VSYNC_POLARITY_CFG (0x1 << 1) #define HSYNC_POLARITY_CFG (0x1 << 0) -/* EXYNOS_DP_LANE_MAP */ +/* ANALOGIX_DP_PLL_REG_1 */ +#define REF_CLK_24M (0x1 << 1) +#define REF_CLK_27M (0x0 << 1) + +/* ANALOGIX_DP_LANE_MAP */ #define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6) #define LANE3_MAP_LOGIC_LANE_1 (0x1 << 6) #define LANE3_MAP_LOGIC_LANE_2 (0x2 << 6) @@ -172,30 +184,30 @@ #define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0) #define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0) -/* EXYNOS_DP_ANALOG_CTL_1 */ +/* ANALOGIX_DP_ANALOG_CTL_1 */ #define TX_TERMINAL_CTRL_50_OHM (0x1 << 4) -/* EXYNOS_DP_ANALOG_CTL_2 */ +/* ANALOGIX_DP_ANALOG_CTL_2 */ #define SEL_24M (0x1 << 3) #define TX_DVDD_BIT_1_0625V (0x4 << 0) -/* EXYNOS_DP_ANALOG_CTL_3 */ +/* ANALOGIX_DP_ANALOG_CTL_3 */ #define DRIVE_DVDD_BIT_1_0625V (0x4 << 5) #define VCO_BIT_600_MICRO (0x5 << 0) -/* EXYNOS_DP_PLL_FILTER_CTL_1 */ +/* ANALOGIX_DP_PLL_FILTER_CTL_1 */ #define PD_RING_OSC (0x1 << 6) #define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4) #define TX_CUR1_2X (0x1 << 2) #define TX_CUR_16_MA (0x3 << 0) -/* EXYNOS_DP_TX_AMP_TUNING_CTL */ +/* ANALOGIX_DP_TX_AMP_TUNING_CTL */ #define CH3_AMP_400_MV (0x0 << 24) #define CH2_AMP_400_MV (0x0 << 16) #define CH1_AMP_400_MV (0x0 << 8) #define CH0_AMP_400_MV (0x0 << 0) -/* EXYNOS_DP_AUX_HW_RETRY_CTL */ +/* ANALOGIX_DP_AUX_HW_RETRY_CTL */ #define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8) #define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3) #define AUX_HW_RETRY_INTERVAL_600_MICROSECONDS (0x0 << 3) @@ -204,7 +216,7 @@ #define AUX_HW_RETRY_INTERVAL_1800_MICROSECONDS (0x3 << 3) #define AUX_HW_RETRY_COUNT_SEL(x) (((x) & 0x7) << 0) -/* EXYNOS_DP_COMMON_INT_STA_1 */ +/* ANALOGIX_DP_COMMON_INT_STA_1 */ #define VSYNC_DET (0x1 << 7) #define PLL_LOCK_CHG (0x1 << 6) #define SPDIF_ERR (0x1 << 5) @@ -214,19 +226,19 @@ #define VID_CLK_CHG (0x1 << 1) #define SW_INT (0x1 << 0) -/* EXYNOS_DP_COMMON_INT_STA_2 */ +/* ANALOGIX_DP_COMMON_INT_STA_2 */ #define ENC_EN_CHG (0x1 << 6) #define HW_BKSV_RDY (0x1 << 3) #define HW_SHA_DONE (0x1 << 2) #define HW_AUTH_STATE_CHG (0x1 << 1) #define HW_AUTH_DONE (0x1 << 0) -/* EXYNOS_DP_COMMON_INT_STA_3 */ +/* ANALOGIX_DP_COMMON_INT_STA_3 */ #define AFIFO_UNDER (0x1 << 7) #define AFIFO_OVER (0x1 << 6) #define R0_CHK_FLAG (0x1 << 5) -/* EXYNOS_DP_COMMON_INT_STA_4 */ +/* ANALOGIX_DP_COMMON_INT_STA_4 */ #define PSR_ACTIVE (0x1 << 7) #define PSR_INACTIVE (0x1 << 6) #define SPDIF_BI_PHASE_ERR (0x1 << 5) @@ -234,29 +246,29 @@ #define HPD_LOST (0x1 << 1) #define PLUG (0x1 << 0) -/* EXYNOS_DP_INT_STA */ +/* ANALOGIX_DP_INT_STA */ #define INT_HPD (0x1 << 6) #define HW_TRAINING_FINISH (0x1 << 5) #define RPLY_RECEIV (0x1 << 1) #define AUX_ERR (0x1 << 0) -/* EXYNOS_DP_INT_CTL */ +/* ANALOGIX_DP_INT_CTL */ #define SOFT_INT_CTRL (0x1 << 2) #define INT_POL1 (0x1 << 1) #define INT_POL0 (0x1 << 0) -/* EXYNOS_DP_SYS_CTL_1 */ +/* ANALOGIX_DP_SYS_CTL_1 */ #define DET_STA (0x1 << 2) #define FORCE_DET (0x1 << 1) #define DET_CTRL (0x1 << 0) -/* EXYNOS_DP_SYS_CTL_2 */ +/* ANALOGIX_DP_SYS_CTL_2 */ #define CHA_CRI(x) (((x) & 0xf) << 4) #define CHA_STA (0x1 << 2) #define FORCE_CHA (0x1 << 1) #define CHA_CTRL (0x1 << 0) -/* EXYNOS_DP_SYS_CTL_3 */ +/* ANALOGIX_DP_SYS_CTL_3 */ #define HPD_STATUS (0x1 << 6) #define F_HPD (0x1 << 5) #define HPD_CTRL (0x1 << 4) @@ -265,13 +277,13 @@ #define F_VALID (0x1 << 1) #define VALID_CTRL (0x1 << 0) -/* EXYNOS_DP_SYS_CTL_4 */ +/* ANALOGIX_DP_SYS_CTL_4 */ #define FIX_M_AUD (0x1 << 4) #define ENHANCED (0x1 << 3) #define FIX_M_VID (0x1 << 2) #define M_VID_UPDATE_CTRL (0x3 << 0) -/* EXYNOS_DP_TRAINING_PTN_SET */ +/* ANALOGIX_DP_TRAINING_PTN_SET */ #define SCRAMBLER_TYPE (0x1 << 9) #define HW_LINK_TRAINING_PATTERN (0x1 << 8) #define SCRAMBLING_DISABLE (0x1 << 5) @@ -285,24 +297,24 @@ #define SW_TRAINING_PATTERN_SET_PTN1 (0x1 << 0) #define SW_TRAINING_PATTERN_SET_NORMAL (0x0 << 0) -/* EXYNOS_DP_LN0_LINK_TRAINING_CTL */ +/* ANALOGIX_DP_LN0_LINK_TRAINING_CTL */ #define PRE_EMPHASIS_SET_MASK (0x3 << 3) #define PRE_EMPHASIS_SET_SHIFT (3) -/* EXYNOS_DP_DEBUG_CTL */ +/* ANALOGIX_DP_DEBUG_CTL */ #define PLL_LOCK (0x1 << 4) #define F_PLL_LOCK (0x1 << 3) #define PLL_LOCK_CTRL (0x1 << 2) #define PN_INV (0x1 << 0) -/* EXYNOS_DP_PLL_CTL */ +/* ANALOGIX_DP_PLL_CTL */ #define DP_PLL_PD (0x1 << 7) #define DP_PLL_RESET (0x1 << 6) #define DP_PLL_LOOP_BIT_DEFAULT (0x1 << 4) #define DP_PLL_REF_BIT_1_1250V (0x5 << 0) #define DP_PLL_REF_BIT_1_2500V (0x7 << 0) -/* EXYNOS_DP_PHY_PD */ +/* ANALOGIX_DP_PHY_PD */ #define DP_PHY_PD (0x1 << 5) #define AUX_PD (0x1 << 4) #define CH3_PD (0x1 << 3) @@ -310,28 +322,28 @@ #define CH1_PD (0x1 << 1) #define CH0_PD (0x1 << 0) -/* EXYNOS_DP_PHY_TEST */ +/* ANALOGIX_DP_PHY_TEST */ #define MACRO_RST (0x1 << 5) #define CH1_TEST (0x1 << 1) #define CH0_TEST (0x1 << 0) -/* EXYNOS_DP_AUX_CH_STA */ +/* ANALOGIX_DP_AUX_CH_STA */ #define AUX_BUSY (0x1 << 4) #define AUX_STATUS_MASK (0xf << 0) -/* EXYNOS_DP_AUX_CH_DEFER_CTL */ +/* ANALOGIX_DP_AUX_CH_DEFER_CTL */ #define DEFER_CTRL_EN (0x1 << 7) #define DEFER_COUNT(x) (((x) & 0x7f) << 0) -/* EXYNOS_DP_AUX_RX_COMM */ +/* ANALOGIX_DP_AUX_RX_COMM */ #define AUX_RX_COMM_I2C_DEFER (0x2 << 2) #define AUX_RX_COMM_AUX_DEFER (0x2 << 0) -/* EXYNOS_DP_BUFFER_DATA_CTL */ +/* ANALOGIX_DP_BUFFER_DATA_CTL */ #define BUF_CLR (0x1 << 7) #define BUF_DATA_COUNT(x) (((x) & 0x1f) << 0) -/* EXYNOS_DP_AUX_CH_CTL_1 */ +/* ANALOGIX_DP_AUX_CH_CTL_1 */ #define AUX_LENGTH(x) (((x - 1) & 0xf) << 4) #define AUX_TX_COMM_MASK (0xf << 0) #define AUX_TX_COMM_DP_TRANSACTION (0x1 << 3) @@ -340,20 +352,20 @@ #define AUX_TX_COMM_WRITE (0x0 << 0) #define AUX_TX_COMM_READ (0x1 << 0) -/* EXYNOS_DP_AUX_ADDR_7_0 */ +/* ANALOGIX_DP_AUX_ADDR_7_0 */ #define AUX_ADDR_7_0(x) (((x) >> 0) & 0xff) -/* EXYNOS_DP_AUX_ADDR_15_8 */ +/* ANALOGIX_DP_AUX_ADDR_15_8 */ #define AUX_ADDR_15_8(x) (((x) >> 8) & 0xff) -/* EXYNOS_DP_AUX_ADDR_19_16 */ +/* ANALOGIX_DP_AUX_ADDR_19_16 */ #define AUX_ADDR_19_16(x) (((x) >> 16) & 0x0f) -/* EXYNOS_DP_AUX_CH_CTL_2 */ +/* ANALOGIX_DP_AUX_CH_CTL_2 */ #define ADDR_ONLY (0x1 << 1) #define AUX_EN (0x1 << 0) -/* EXYNOS_DP_SOC_GENERAL_CTL */ +/* ANALOGIX_DP_SOC_GENERAL_CTL */ #define AUDIO_MODE_SPDIF_MODE (0x1 << 8) #define AUDIO_MODE_MASTER_MODE (0x0 << 8) #define MASTER_VIDEO_INTERLACE_EN (0x1 << 4) @@ -363,4 +375,4 @@ #define VIDEO_MODE_SLAVE_MODE (0x1 << 0) #define VIDEO_MODE_MASTER_MODE (0x0 << 0) -#endif /* _EXYNOS_DP_REG_H */ +#endif /* _ANALOGIX_DP_REG_H */ diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index 9795b72472ba..c9d941283d30 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1413,11 +1413,6 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge) mutex_unlock(&hdmi->mutex); } -static void dw_hdmi_bridge_nop(struct drm_bridge *bridge) -{ - /* do nothing */ -} - static enum drm_connector_status dw_hdmi_connector_detect(struct drm_connector *connector, bool force) { @@ -1536,8 +1531,6 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { .enable = dw_hdmi_bridge_enable, .disable = dw_hdmi_bridge_disable, - .pre_enable = dw_hdmi_bridge_nop, - .post_disable = dw_hdmi_bridge_nop, .mode_set = dw_hdmi_bridge_mode_set, }; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c index 7bc394ec9fb3..dc83f69da6f1 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -163,10 +163,8 @@ static struct pci_driver cirrus_pci_driver = { static int __init cirrus_init(void) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && cirrus_modeset == -1) return -EINVAL; -#endif if (cirrus_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4befe25c81c7..7bf678ee7f81 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -984,7 +984,17 @@ void drm_atomic_helper_commit_modeset_enables(struct drm_device *dev, } EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables); -static void wait_for_fences(struct drm_device *dev, +/** + * drm_atomic_helper_wait_for_fences - wait for fences stashed in plane state + * @dev: DRM device + * @state: atomic state object with old state structures + * + * For implicit sync, driver should fish the exclusive fence out from the + * incoming fb's and stash it in the drm_plane_state. This is called after + * drm_atomic_helper_swap_state() so it uses the current plane state (and + * just uses the atomic state to find the changed planes) + */ +void drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state) { struct drm_plane *plane; @@ -1002,6 +1012,7 @@ static void wait_for_fences(struct drm_device *dev, plane->state->fence = NULL; } } +EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); /** * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed @@ -1163,7 +1174,7 @@ int drm_atomic_helper_commit(struct drm_device *dev, * current layout. */ - wait_for_fences(dev, state); + drm_atomic_helper_wait_for_fences(dev, state); drm_atomic_helper_commit_modeset_disables(dev, state); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e08f962288d9..55ffde5a3a4a 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -1067,25 +1067,25 @@ void drm_connector_unregister(struct drm_connector *connector) } EXPORT_SYMBOL(drm_connector_unregister); - /** - * drm_connector_unplug_all - unregister connector userspace interfaces + * drm_connector_unregister_all - unregister connector userspace interfaces * @dev: drm device * - * This function unregisters all connector userspace interfaces in sysfs. Should - * be call when the device is disconnected, e.g. from an usb driver's - * ->disconnect callback. + * This functions unregisters all connectors from sysfs and other places so + * that userspace can no longer access them. Drivers should call this as the + * first step tearing down the device instace, or when the underlying + * physical device disappeared (e.g. USB unplug), right before calling + * drm_dev_unregister(). */ -void drm_connector_unplug_all(struct drm_device *dev) +void drm_connector_unregister_all(struct drm_device *dev) { struct drm_connector *connector; /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_for_each_connector(connector, dev) drm_connector_unregister(connector); - } -EXPORT_SYMBOL(drm_connector_unplug_all); +EXPORT_SYMBOL(drm_connector_unregister_all); /** * drm_encoder_init - Init a preallocated encoder diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 414d7f61aa05..558ef9fc39e6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -205,7 +205,7 @@ static const struct drm_display_mode drm_dmt_modes[] = { DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 0x0f - 1024x768@43Hz, interlace */ { DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, - 1208, 1264, 0, 768, 768, 772, 817, 0, + 1208, 1264, 0, 768, 768, 776, 817, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 0x10 - 1024x768@60Hz */ @@ -522,12 +522,12 @@ static const struct drm_display_mode edid_est_modes[] = { 720, 840, 0, 480, 481, 484, 500, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, - 704, 832, 0, 480, 489, 491, 520, 0, + 704, 832, 0, 480, 489, 492, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, 768, 864, 0, 480, 483, 486, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ - { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, + { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, 752, 800, 0, 480, 490, 492, 525, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, @@ -539,7 +539,7 @@ static const struct drm_display_mode edid_est_modes[] = { { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, 1440, 1688, 0, 1024, 1025, 1028, 1066, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ - { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, + { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, 1136, 1312, 0, 768, 769, 772, 800, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, @@ -2241,7 +2241,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing) { int i, j, m, modes = 0; struct drm_display_mode *mode; - u8 *est = ((u8 *)timing) + 5; + u8 *est = ((u8 *)timing) + 6; for (i = 0; i < 6; i++) { for (j = 7; j >= 0; j--) { diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 2e8c77e71e1f..da0c5320789f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj) fail: while (i--) - page_cache_release(pages[i]); + put_page(pages[i]); drm_free_large(pages); return ERR_CAST(p); @@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, mark_page_accessed(pages[i]); /* Undo the reference we took when populating the table */ - page_cache_release(pages[i]); + put_page(pages[i]); } drm_free_large(pages); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 881c5a6c180c..3c1a6f18e71c 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -863,10 +863,7 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ - if (delta_ns < 0) - etime = ktime_add_ns(etime, -delta_ns); - else - etime = ktime_sub_ns(etime, delta_ns); + etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); DRM_DEBUG_VBL("crtc %u : v 0x%x p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index d503f8e8c2d1..d7d8cecfb0e6 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -287,102 +287,6 @@ static ssize_t modes_show(struct device *device, return written; } -static ssize_t tv_subconnector_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - struct drm_property *prop; - uint64_t subconnector; - int ret; - - prop = dev->mode_config.tv_subconnector_property; - if (!prop) { - DRM_ERROR("Unable to find subconnector property\n"); - return 0; - } - - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); - if (ret) - return 0; - - return snprintf(buf, PAGE_SIZE, "%s", - drm_get_tv_subconnector_name((int)subconnector)); -} - -static ssize_t tv_select_subconnector_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - struct drm_property *prop; - uint64_t subconnector; - int ret; - - prop = dev->mode_config.tv_select_subconnector_property; - if (!prop) { - DRM_ERROR("Unable to find select subconnector property\n"); - return 0; - } - - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); - if (ret) - return 0; - - return snprintf(buf, PAGE_SIZE, "%s", - drm_get_tv_select_name((int)subconnector)); -} - -static ssize_t dvii_subconnector_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - struct drm_property *prop; - uint64_t subconnector; - int ret; - - prop = dev->mode_config.dvi_i_subconnector_property; - if (!prop) { - DRM_ERROR("Unable to find subconnector property\n"); - return 0; - } - - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); - if (ret) - return 0; - - return snprintf(buf, PAGE_SIZE, "%s", - drm_get_dvi_i_subconnector_name((int)subconnector)); -} - -static ssize_t dvii_select_subconnector_show(struct device *device, - struct device_attribute *attr, - char *buf) -{ - struct drm_connector *connector = to_drm_connector(device); - struct drm_device *dev = connector->dev; - struct drm_property *prop; - uint64_t subconnector; - int ret; - - prop = dev->mode_config.dvi_i_select_subconnector_property; - if (!prop) { - DRM_ERROR("Unable to find select subconnector property\n"); - return 0; - } - - ret = drm_object_property_get_value(&connector->base, prop, &subconnector); - if (ret) - return 0; - - return snprintf(buf, PAGE_SIZE, "%s", - drm_get_dvi_i_select_name((int)subconnector)); -} - static DEVICE_ATTR_RW(status); static DEVICE_ATTR_RO(enabled); static DEVICE_ATTR_RO(dpms); @@ -396,54 +300,6 @@ static struct attribute *connector_dev_attrs[] = { NULL }; -static DEVICE_ATTR_RO(tv_subconnector); -static DEVICE_ATTR_RO(tv_select_subconnector); - -static struct attribute *connector_tv_dev_attrs[] = { - &dev_attr_tv_subconnector.attr, - &dev_attr_tv_select_subconnector.attr, - NULL -}; - -static DEVICE_ATTR_RO(dvii_subconnector); -static DEVICE_ATTR_RO(dvii_select_subconnector); - -static struct attribute *connector_dvii_dev_attrs[] = { - &dev_attr_dvii_subconnector.attr, - &dev_attr_dvii_select_subconnector.attr, - NULL -}; - -/* Connector type related helpers */ -static int kobj_connector_type(struct kobject *kobj) -{ - struct device *dev = kobj_to_dev(kobj); - struct drm_connector *connector = to_drm_connector(dev); - - return connector->connector_type; -} - -static umode_t connector_is_dvii(struct kobject *kobj, - struct attribute *attr, int idx) -{ - return kobj_connector_type(kobj) == DRM_MODE_CONNECTOR_DVII ? - attr->mode : 0; -} - -static umode_t connector_is_tv(struct kobject *kobj, - struct attribute *attr, int idx) -{ - switch (kobj_connector_type(kobj)) { - case DRM_MODE_CONNECTOR_Composite: - case DRM_MODE_CONNECTOR_SVIDEO: - case DRM_MODE_CONNECTOR_Component: - case DRM_MODE_CONNECTOR_TV: - return attr->mode; - } - - return 0; -} - static struct bin_attribute edid_attr = { .attr.name = "edid", .attr.mode = 0444, @@ -461,20 +317,8 @@ static const struct attribute_group connector_dev_group = { .bin_attrs = connector_bin_attrs, }; -static const struct attribute_group connector_tv_dev_group = { - .attrs = connector_tv_dev_attrs, - .is_visible = connector_is_tv, -}; - -static const struct attribute_group connector_dvii_dev_group = { - .attrs = connector_dvii_dev_attrs, - .is_visible = connector_is_dvii, -}; - static const struct attribute_group *connector_dev_groups[] = { &connector_dev_group, - &connector_tv_dev_group, - &connector_dvii_dev_group, NULL }; diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index f17d39279596..2fadd8275fa5 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -71,8 +71,9 @@ config DRM_EXYNOS_DSI This enables support for Exynos MIPI-DSI device. config DRM_EXYNOS_DP - bool "Display Port" + bool "EXYNOS specific extensions for Analogix DP driver" depends on DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON + select DRM_ANALOGIX_DP default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile index 968b31c522b2..126b0a1915db 100644 --- a/drivers/gpu/drm/exynos/Makefile +++ b/drivers/gpu/drm/exynos/Makefile @@ -12,7 +12,7 @@ exynosdrm-$(CONFIG_DRM_EXYNOS5433_DECON) += exynos5433_drm_decon.o exynosdrm-$(CONFIG_DRM_EXYNOS7_DECON) += exynos7_drm_decon.o exynosdrm-$(CONFIG_DRM_EXYNOS_DPI) += exynos_drm_dpi.o exynosdrm-$(CONFIG_DRM_EXYNOS_DSI) += exynos_drm_dsi.o -exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp_core.o exynos_dp_reg.o +exynosdrm-$(CONFIG_DRM_EXYNOS_DP) += exynos_dp.o exynosdrm-$(CONFIG_DRM_EXYNOS_MIXER) += exynos_mixer.o exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynosdrm-$(CONFIG_DRM_EXYNOS_VIDI) += exynos_drm_vidi.o diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c new file mode 100644 index 000000000000..8ae3d51b5b33 --- /dev/null +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -0,0 +1,314 @@ +/* + * Samsung SoC DP (Display Port) interface driver. + * + * Copyright (C) 2012 Samsung Electronics Co., Ltd. + * Author: Jingoo Han <jg1.han@samsung.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/clk.h> +#include <linux/of_graph.h> +#include <linux/component.h> +#include <video/of_display_timing.h> +#include <video/of_videomode.h> +#include <video/videomode.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_panel.h> + +#include <drm/bridge/analogix_dp.h> +#include <drm/exynos_drm.h> + +#include "exynos_drm_crtc.h" + +#define to_dp(nm) container_of(nm, struct exynos_dp_device, nm) + +struct exynos_dp_device { + struct drm_encoder encoder; + struct drm_connector connector; + struct drm_bridge *ptn_bridge; + struct drm_device *drm_dev; + struct device *dev; + + struct videomode vm; + struct analogix_dp_plat_data plat_data; +}; + +int exynos_dp_crtc_clock_enable(struct analogix_dp_plat_data *plat_data, + bool enable) +{ + struct exynos_dp_device *dp = to_dp(plat_data); + struct drm_encoder *encoder = &dp->encoder; + struct exynos_drm_crtc *crtc; + + if (!encoder) + return -1; + + crtc = to_exynos_crtc(encoder->crtc); + if (crtc && crtc->ops && crtc->ops->clock_enable) + crtc->ops->clock_enable(crtc, enable); + + return 0; +} + +static int exynos_dp_poweron(struct analogix_dp_plat_data *plat_data) +{ + return exynos_dp_crtc_clock_enable(plat_data, true); +} + +static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data) +{ + return exynos_dp_crtc_clock_enable(plat_data, false); +} + +static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) +{ + struct exynos_dp_device *dp = to_dp(plat_data); + struct drm_connector *connector = &dp->connector; + struct drm_display_mode *mode; + int num_modes = 0; + + if (dp->plat_data.panel) + return num_modes; + + mode = drm_mode_create(connector->dev); + if (!mode) { + DRM_ERROR("failed to create a new display mode.\n"); + return num_modes; + } + + drm_display_mode_from_videomode(&dp->vm, mode); + connector->display_info.width_mm = mode->width_mm; + connector->display_info.height_mm = mode->height_mm; + + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + + return num_modes + 1; +} + +static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, + struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct exynos_dp_device *dp = to_dp(plat_data); + struct drm_encoder *encoder = &dp->encoder; + int ret; + + drm_connector_register(connector); + + /* Pre-empt DP connector creation if there's a bridge */ + if (dp->ptn_bridge) { + bridge->next = dp->ptn_bridge; + dp->ptn_bridge->encoder = encoder; + ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge); + if (ret) { + DRM_ERROR("Failed to attach bridge to drm\n"); + bridge->next = NULL; + return ret; + } + } + + return 0; +} + +static void exynos_dp_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void exynos_dp_nop(struct drm_encoder *encoder) +{ + /* do nothing */ +} + +static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { + .mode_set = exynos_dp_mode_set, + .enable = exynos_dp_nop, + .disable = exynos_dp_nop, +}; + +static const struct drm_encoder_funcs exynos_dp_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) +{ + int ret; + + ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE); + if (ret) { + DRM_ERROR("failed: of_get_videomode() : %d\n", ret); + return ret; + } + return 0; +} + +static int exynos_dp_bind(struct device *dev, struct device *master, void *data) +{ + struct exynos_dp_device *dp = dev_get_drvdata(dev); + struct drm_encoder *encoder = &dp->encoder; + struct drm_device *drm_dev = data; + int pipe, ret; + + /* + * Just like the probe function said, we don't need the + * device drvrate anymore, we should leave the charge to + * analogix dp driver, set the device drvdata to NULL. + */ + dev_set_drvdata(dev, NULL); + + dp->dev = dev; + dp->drm_dev = drm_dev; + + dp->plat_data.dev_type = EXYNOS_DP; + dp->plat_data.power_on = exynos_dp_poweron; + dp->plat_data.power_off = exynos_dp_poweroff; + dp->plat_data.attach = exynos_dp_bridge_attach; + dp->plat_data.get_modes = exynos_dp_get_modes; + + if (!dp->plat_data.panel && !dp->ptn_bridge) { + ret = exynos_dp_dt_parse_panel(dp); + if (ret) + return ret; + } + + pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + if (pipe < 0) + return pipe; + + encoder->possible_crtcs = 1 << pipe; + + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + + drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); + + dp->plat_data.encoder = encoder; + + return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); +} + +static void exynos_dp_unbind(struct device *dev, struct device *master, + void *data) +{ + return analogix_dp_unbind(dev, master, data); +} + +static const struct component_ops exynos_dp_ops = { + .bind = exynos_dp_bind, + .unbind = exynos_dp_unbind, +}; + +static int exynos_dp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = NULL, *endpoint = NULL; + struct exynos_dp_device *dp; + + dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), + GFP_KERNEL); + if (!dp) + return -ENOMEM; + + /* + * We just use the drvdata until driver run into component + * add function, and then we would set drvdata to null, so + * that analogix dp driver would take charge of the drvdata. + */ + platform_set_drvdata(pdev, dp); + + /* This is for the backward compatibility. */ + np = of_parse_phandle(dev->of_node, "panel", 0); + if (np) { + dp->plat_data.panel = of_drm_find_panel(np); + of_node_put(np); + if (!dp->plat_data.panel) + return -EPROBE_DEFER; + goto out; + } + + endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); + if (endpoint) { + np = of_graph_get_remote_port_parent(endpoint); + if (np) { + /* The remote port can be either a panel or a bridge */ + dp->plat_data.panel = of_drm_find_panel(np); + if (!dp->plat_data.panel) { + dp->ptn_bridge = of_drm_find_bridge(np); + if (!dp->ptn_bridge) { + of_node_put(np); + return -EPROBE_DEFER; + } + } + of_node_put(np); + } else { + DRM_ERROR("no remote endpoint device node found.\n"); + return -EINVAL; + } + } else { + DRM_ERROR("no port endpoint subnode found.\n"); + return -EINVAL; + } + +out: + return component_add(&pdev->dev, &exynos_dp_ops); +} + +static int exynos_dp_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &exynos_dp_ops); + + return 0; +} + +#ifdef CONFIG_PM +static int exynos_dp_suspend(struct device *dev) +{ + return analogix_dp_suspend(dev); +} + +static int exynos_dp_resume(struct device *dev) +{ + return analogix_dp_resume(dev); +} +#endif + +static const struct dev_pm_ops exynos_dp_pm_ops = { + SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL) +}; + +static const struct of_device_id exynos_dp_match[] = { + { .compatible = "samsung,exynos5-dp" }, + {}, +}; +MODULE_DEVICE_TABLE(of, exynos_dp_match); + +struct platform_driver dp_driver = { + .probe = exynos_dp_probe, + .remove = exynos_dp_remove, + .driver = { + .name = "exynos-dp", + .owner = THIS_MODULE, + .pm = &exynos_dp_pm_ops, + .of_match_table = exynos_dp_match, + }, +}; + +MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); +MODULE_DESCRIPTION("Samsung Specific Analogix-DP Driver Extension"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c deleted file mode 100644 index cff8dc788820..000000000000 --- a/drivers/gpu/drm/exynos/exynos_dp_core.c +++ /dev/null @@ -1,1499 +0,0 @@ -/* - * Samsung SoC DP (Display Port) interface driver. - * - * Copyright (C) 2012 Samsung Electronics Co., Ltd. - * Author: Jingoo Han <jg1.han@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/err.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/of.h> -#include <linux/of_gpio.h> -#include <linux/of_graph.h> -#include <linux/gpio.h> -#include <linux/component.h> -#include <linux/phy/phy.h> -#include <video/of_display_timing.h> -#include <video/of_videomode.h> - -#include <drm/drmP.h> -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_atomic_helper.h> -#include <drm/drm_panel.h> - -#include "exynos_dp_core.h" -#include "exynos_drm_crtc.h" - -#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ - connector) - -static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp) -{ - return to_exynos_crtc(dp->encoder.crtc); -} - -static inline struct exynos_dp_device *encoder_to_dp( - struct drm_encoder *e) -{ - return container_of(e, struct exynos_dp_device, encoder); -} - -struct bridge_init { - struct i2c_client *client; - struct device_node *node; -}; - -static void exynos_dp_init_dp(struct exynos_dp_device *dp) -{ - exynos_dp_reset(dp); - - exynos_dp_swreset(dp); - - exynos_dp_init_analog_param(dp); - exynos_dp_init_interrupt(dp); - - /* SW defined function Normal operation */ - exynos_dp_enable_sw_function(dp); - - exynos_dp_config_interrupt(dp); - exynos_dp_init_analog_func(dp); - - exynos_dp_init_hpd(dp); - exynos_dp_init_aux(dp); -} - -static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) -{ - int timeout_loop = 0; - - while (exynos_dp_get_plug_in_status(dp) != 0) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "failed to get hpd plug status\n"); - return -ETIMEDOUT; - } - usleep_range(10, 11); - } - - return 0; -} - -static unsigned char exynos_dp_calc_edid_check_sum(unsigned char *edid_data) -{ - int i; - unsigned char sum = 0; - - for (i = 0; i < EDID_BLOCK_LENGTH; i++) - sum = sum + edid_data[i]; - - return sum; -} - -static int exynos_dp_read_edid(struct exynos_dp_device *dp) -{ - unsigned char edid[EDID_BLOCK_LENGTH * 2]; - unsigned int extend_block = 0; - unsigned char sum; - unsigned char test_vector; - int retval; - - /* - * EDID device address is 0x50. - * However, if necessary, you must have set upper address - * into E-EDID in I2C device, 0x30. - */ - - /* Read Extension Flag, Number of 128-byte EDID extension blocks */ - retval = exynos_dp_read_byte_from_i2c(dp, I2C_EDID_DEVICE_ADDR, - EDID_EXTENSION_FLAG, - &extend_block); - if (retval) - return retval; - - if (extend_block > 0) { - dev_dbg(dp->dev, "EDID data includes a single extension!\n"); - - /* Read EDID data */ - retval = exynos_dp_read_bytes_from_i2c(dp, I2C_EDID_DEVICE_ADDR, - EDID_HEADER_PATTERN, - EDID_BLOCK_LENGTH, - &edid[EDID_HEADER_PATTERN]); - if (retval != 0) { - dev_err(dp->dev, "EDID Read failed!\n"); - return -EIO; - } - sum = exynos_dp_calc_edid_check_sum(edid); - if (sum != 0) { - dev_err(dp->dev, "EDID bad checksum!\n"); - return -EIO; - } - - /* Read additional EDID data */ - retval = exynos_dp_read_bytes_from_i2c(dp, - I2C_EDID_DEVICE_ADDR, - EDID_BLOCK_LENGTH, - EDID_BLOCK_LENGTH, - &edid[EDID_BLOCK_LENGTH]); - if (retval != 0) { - dev_err(dp->dev, "EDID Read failed!\n"); - return -EIO; - } - sum = exynos_dp_calc_edid_check_sum(&edid[EDID_BLOCK_LENGTH]); - if (sum != 0) { - dev_err(dp->dev, "EDID bad checksum!\n"); - return -EIO; - } - - exynos_dp_read_byte_from_dpcd(dp, DP_TEST_REQUEST, - &test_vector); - if (test_vector & DP_TEST_LINK_EDID_READ) { - exynos_dp_write_byte_to_dpcd(dp, - DP_TEST_EDID_CHECKSUM, - edid[EDID_BLOCK_LENGTH + EDID_CHECKSUM]); - exynos_dp_write_byte_to_dpcd(dp, - DP_TEST_RESPONSE, - DP_TEST_EDID_CHECKSUM_WRITE); - } - } else { - dev_info(dp->dev, "EDID data does not include any extensions.\n"); - - /* Read EDID data */ - retval = exynos_dp_read_bytes_from_i2c(dp, - I2C_EDID_DEVICE_ADDR, - EDID_HEADER_PATTERN, - EDID_BLOCK_LENGTH, - &edid[EDID_HEADER_PATTERN]); - if (retval != 0) { - dev_err(dp->dev, "EDID Read failed!\n"); - return -EIO; - } - sum = exynos_dp_calc_edid_check_sum(edid); - if (sum != 0) { - dev_err(dp->dev, "EDID bad checksum!\n"); - return -EIO; - } - - exynos_dp_read_byte_from_dpcd(dp, - DP_TEST_REQUEST, - &test_vector); - if (test_vector & DP_TEST_LINK_EDID_READ) { - exynos_dp_write_byte_to_dpcd(dp, - DP_TEST_EDID_CHECKSUM, - edid[EDID_CHECKSUM]); - exynos_dp_write_byte_to_dpcd(dp, - DP_TEST_RESPONSE, - DP_TEST_EDID_CHECKSUM_WRITE); - } - } - - dev_dbg(dp->dev, "EDID Read success!\n"); - return 0; -} - -static int exynos_dp_handle_edid(struct exynos_dp_device *dp) -{ - u8 buf[12]; - int i; - int retval; - - /* Read DPCD DP_DPCD_REV~RECEIVE_PORT1_CAP_1 */ - retval = exynos_dp_read_bytes_from_dpcd(dp, DP_DPCD_REV, - 12, buf); - if (retval) - return retval; - - /* Read EDID */ - for (i = 0; i < 3; i++) { - retval = exynos_dp_read_edid(dp); - if (!retval) - break; - } - - return retval; -} - -static void exynos_dp_enable_rx_to_enhanced_mode(struct exynos_dp_device *dp, - bool enable) -{ - u8 data; - - exynos_dp_read_byte_from_dpcd(dp, DP_LANE_COUNT_SET, &data); - - if (enable) - exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET, - DP_LANE_COUNT_ENHANCED_FRAME_EN | - DPCD_LANE_COUNT_SET(data)); - else - exynos_dp_write_byte_to_dpcd(dp, DP_LANE_COUNT_SET, - DPCD_LANE_COUNT_SET(data)); -} - -static int exynos_dp_is_enhanced_mode_available(struct exynos_dp_device *dp) -{ - u8 data; - int retval; - - exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data); - retval = DPCD_ENHANCED_FRAME_CAP(data); - - return retval; -} - -static void exynos_dp_set_enhanced_mode(struct exynos_dp_device *dp) -{ - u8 data; - - data = exynos_dp_is_enhanced_mode_available(dp); - exynos_dp_enable_rx_to_enhanced_mode(dp, data); - exynos_dp_enable_enhanced_mode(dp, data); -} - -static void exynos_dp_training_pattern_dis(struct exynos_dp_device *dp) -{ - exynos_dp_set_training_pattern(dp, DP_NONE); - - exynos_dp_write_byte_to_dpcd(dp, - DP_TRAINING_PATTERN_SET, - DP_TRAINING_PATTERN_DISABLE); -} - -static void exynos_dp_set_lane_lane_pre_emphasis(struct exynos_dp_device *dp, - int pre_emphasis, int lane) -{ - switch (lane) { - case 0: - exynos_dp_set_lane0_pre_emphasis(dp, pre_emphasis); - break; - case 1: - exynos_dp_set_lane1_pre_emphasis(dp, pre_emphasis); - break; - - case 2: - exynos_dp_set_lane2_pre_emphasis(dp, pre_emphasis); - break; - - case 3: - exynos_dp_set_lane3_pre_emphasis(dp, pre_emphasis); - break; - } -} - -static int exynos_dp_link_start(struct exynos_dp_device *dp) -{ - u8 buf[4]; - int lane, lane_count, pll_tries, retval; - - lane_count = dp->link_train.lane_count; - - dp->link_train.lt_state = CLOCK_RECOVERY; - dp->link_train.eq_loop = 0; - - for (lane = 0; lane < lane_count; lane++) - dp->link_train.cr_loop[lane] = 0; - - /* Set link rate and count as you want to establish*/ - exynos_dp_set_link_bandwidth(dp, dp->link_train.link_rate); - exynos_dp_set_lane_count(dp, dp->link_train.lane_count); - - /* Setup RX configuration */ - buf[0] = dp->link_train.link_rate; - buf[1] = dp->link_train.lane_count; - retval = exynos_dp_write_bytes_to_dpcd(dp, DP_LINK_BW_SET, - 2, buf); - if (retval) - return retval; - - /* Set TX pre-emphasis to minimum */ - for (lane = 0; lane < lane_count; lane++) - exynos_dp_set_lane_lane_pre_emphasis(dp, - PRE_EMPHASIS_LEVEL_0, lane); - - /* Wait for PLL lock */ - pll_tries = 0; - while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - if (pll_tries == DP_TIMEOUT_LOOP_COUNT) { - dev_err(dp->dev, "Wait for PLL lock timed out\n"); - return -ETIMEDOUT; - } - - pll_tries++; - usleep_range(90, 120); - } - - /* Set training pattern 1 */ - exynos_dp_set_training_pattern(dp, TRAINING_PTN1); - - /* Set RX training pattern */ - retval = exynos_dp_write_byte_to_dpcd(dp, - DP_TRAINING_PATTERN_SET, - DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_1); - if (retval) - return retval; - - for (lane = 0; lane < lane_count; lane++) - buf[lane] = DP_TRAIN_PRE_EMPH_LEVEL_0 | - DP_TRAIN_VOLTAGE_SWING_LEVEL_0; - - retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, - lane_count, buf); - - return retval; -} - -static unsigned char exynos_dp_get_lane_status(u8 link_status[2], int lane) -{ - int shift = (lane & 1) * 4; - u8 link_value = link_status[lane>>1]; - - return (link_value >> shift) & 0xf; -} - -static int exynos_dp_clock_recovery_ok(u8 link_status[2], int lane_count) -{ - int lane; - u8 lane_status; - - for (lane = 0; lane < lane_count; lane++) { - lane_status = exynos_dp_get_lane_status(link_status, lane); - if ((lane_status & DP_LANE_CR_DONE) == 0) - return -EINVAL; - } - return 0; -} - -static int exynos_dp_channel_eq_ok(u8 link_status[2], u8 link_align, - int lane_count) -{ - int lane; - u8 lane_status; - - if ((link_align & DP_INTERLANE_ALIGN_DONE) == 0) - return -EINVAL; - - for (lane = 0; lane < lane_count; lane++) { - lane_status = exynos_dp_get_lane_status(link_status, lane); - lane_status &= DP_CHANNEL_EQ_BITS; - if (lane_status != DP_CHANNEL_EQ_BITS) - return -EINVAL; - } - - return 0; -} - -static unsigned char exynos_dp_get_adjust_request_voltage(u8 adjust_request[2], - int lane) -{ - int shift = (lane & 1) * 4; - u8 link_value = adjust_request[lane>>1]; - - return (link_value >> shift) & 0x3; -} - -static unsigned char exynos_dp_get_adjust_request_pre_emphasis( - u8 adjust_request[2], - int lane) -{ - int shift = (lane & 1) * 4; - u8 link_value = adjust_request[lane>>1]; - - return ((link_value >> shift) & 0xc) >> 2; -} - -static void exynos_dp_set_lane_link_training(struct exynos_dp_device *dp, - u8 training_lane_set, int lane) -{ - switch (lane) { - case 0: - exynos_dp_set_lane0_link_training(dp, training_lane_set); - break; - case 1: - exynos_dp_set_lane1_link_training(dp, training_lane_set); - break; - - case 2: - exynos_dp_set_lane2_link_training(dp, training_lane_set); - break; - - case 3: - exynos_dp_set_lane3_link_training(dp, training_lane_set); - break; - } -} - -static unsigned int exynos_dp_get_lane_link_training( - struct exynos_dp_device *dp, - int lane) -{ - u32 reg; - - switch (lane) { - case 0: - reg = exynos_dp_get_lane0_link_training(dp); - break; - case 1: - reg = exynos_dp_get_lane1_link_training(dp); - break; - case 2: - reg = exynos_dp_get_lane2_link_training(dp); - break; - case 3: - reg = exynos_dp_get_lane3_link_training(dp); - break; - default: - WARN_ON(1); - return 0; - } - - return reg; -} - -static void exynos_dp_reduce_link_rate(struct exynos_dp_device *dp) -{ - exynos_dp_training_pattern_dis(dp); - exynos_dp_set_enhanced_mode(dp); - - dp->link_train.lt_state = FAILED; -} - -static void exynos_dp_get_adjust_training_lane(struct exynos_dp_device *dp, - u8 adjust_request[2]) -{ - int lane, lane_count; - u8 voltage_swing, pre_emphasis, training_lane; - - lane_count = dp->link_train.lane_count; - for (lane = 0; lane < lane_count; lane++) { - voltage_swing = exynos_dp_get_adjust_request_voltage( - adjust_request, lane); - pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis( - adjust_request, lane); - training_lane = DPCD_VOLTAGE_SWING_SET(voltage_swing) | - DPCD_PRE_EMPHASIS_SET(pre_emphasis); - - if (voltage_swing == VOLTAGE_LEVEL_3) - training_lane |= DP_TRAIN_MAX_SWING_REACHED; - if (pre_emphasis == PRE_EMPHASIS_LEVEL_3) - training_lane |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; - - dp->link_train.training_lane[lane] = training_lane; - } -} - -static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) -{ - int lane, lane_count, retval; - u8 voltage_swing, pre_emphasis, training_lane; - u8 link_status[2], adjust_request[2]; - - usleep_range(100, 101); - - lane_count = dp->link_train.lane_count; - - retval = exynos_dp_read_bytes_from_dpcd(dp, - DP_LANE0_1_STATUS, 2, link_status); - if (retval) - return retval; - - retval = exynos_dp_read_bytes_from_dpcd(dp, - DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request); - if (retval) - return retval; - - if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { - /* set training pattern 2 for EQ */ - exynos_dp_set_training_pattern(dp, TRAINING_PTN2); - - retval = exynos_dp_write_byte_to_dpcd(dp, - DP_TRAINING_PATTERN_SET, - DP_LINK_SCRAMBLING_DISABLE | - DP_TRAINING_PATTERN_2); - if (retval) - return retval; - - dev_info(dp->dev, "Link Training Clock Recovery success\n"); - dp->link_train.lt_state = EQUALIZER_TRAINING; - } else { - for (lane = 0; lane < lane_count; lane++) { - training_lane = exynos_dp_get_lane_link_training( - dp, lane); - voltage_swing = exynos_dp_get_adjust_request_voltage( - adjust_request, lane); - pre_emphasis = exynos_dp_get_adjust_request_pre_emphasis( - adjust_request, lane); - - if (DPCD_VOLTAGE_SWING_GET(training_lane) == - voltage_swing && - DPCD_PRE_EMPHASIS_GET(training_lane) == - pre_emphasis) - dp->link_train.cr_loop[lane]++; - - if (dp->link_train.cr_loop[lane] == MAX_CR_LOOP || - voltage_swing == VOLTAGE_LEVEL_3 || - pre_emphasis == PRE_EMPHASIS_LEVEL_3) { - dev_err(dp->dev, "CR Max reached (%d,%d,%d)\n", - dp->link_train.cr_loop[lane], - voltage_swing, pre_emphasis); - exynos_dp_reduce_link_rate(dp); - return -EIO; - } - } - } - - exynos_dp_get_adjust_training_lane(dp, adjust_request); - - for (lane = 0; lane < lane_count; lane++) - exynos_dp_set_lane_link_training(dp, - dp->link_train.training_lane[lane], lane); - - retval = exynos_dp_write_bytes_to_dpcd(dp, - DP_TRAINING_LANE0_SET, lane_count, - dp->link_train.training_lane); - if (retval) - return retval; - - return retval; -} - -static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) -{ - int lane, lane_count, retval; - u32 reg; - u8 link_align, link_status[2], adjust_request[2]; - - usleep_range(400, 401); - - lane_count = dp->link_train.lane_count; - - retval = exynos_dp_read_bytes_from_dpcd(dp, - DP_LANE0_1_STATUS, 2, link_status); - if (retval) - return retval; - - if (exynos_dp_clock_recovery_ok(link_status, lane_count)) { - exynos_dp_reduce_link_rate(dp); - return -EIO; - } - - retval = exynos_dp_read_bytes_from_dpcd(dp, - DP_ADJUST_REQUEST_LANE0_1, 2, adjust_request); - if (retval) - return retval; - - retval = exynos_dp_read_byte_from_dpcd(dp, - DP_LANE_ALIGN_STATUS_UPDATED, &link_align); - if (retval) - return retval; - - exynos_dp_get_adjust_training_lane(dp, adjust_request); - - if (!exynos_dp_channel_eq_ok(link_status, link_align, lane_count)) { - /* traing pattern Set to Normal */ - exynos_dp_training_pattern_dis(dp); - - dev_info(dp->dev, "Link Training success!\n"); - - exynos_dp_get_link_bandwidth(dp, ®); - dp->link_train.link_rate = reg; - dev_dbg(dp->dev, "final bandwidth = %.2x\n", - dp->link_train.link_rate); - - exynos_dp_get_lane_count(dp, ®); - dp->link_train.lane_count = reg; - dev_dbg(dp->dev, "final lane count = %.2x\n", - dp->link_train.lane_count); - - /* set enhanced mode if available */ - exynos_dp_set_enhanced_mode(dp); - dp->link_train.lt_state = FINISHED; - - return 0; - } - - /* not all locked */ - dp->link_train.eq_loop++; - - if (dp->link_train.eq_loop > MAX_EQ_LOOP) { - dev_err(dp->dev, "EQ Max loop\n"); - exynos_dp_reduce_link_rate(dp); - return -EIO; - } - - for (lane = 0; lane < lane_count; lane++) - exynos_dp_set_lane_link_training(dp, - dp->link_train.training_lane[lane], lane); - - retval = exynos_dp_write_bytes_to_dpcd(dp, DP_TRAINING_LANE0_SET, - lane_count, dp->link_train.training_lane); - - return retval; -} - -static void exynos_dp_get_max_rx_bandwidth(struct exynos_dp_device *dp, - u8 *bandwidth) -{ - u8 data; - - /* - * For DP rev.1.1, Maximum link rate of Main Link lanes - * 0x06 = 1.62 Gbps, 0x0a = 2.7 Gbps - */ - exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LINK_RATE, &data); - *bandwidth = data; -} - -static void exynos_dp_get_max_rx_lane_count(struct exynos_dp_device *dp, - u8 *lane_count) -{ - u8 data; - - /* - * For DP rev.1.1, Maximum number of Main Link lanes - * 0x01 = 1 lane, 0x02 = 2 lanes, 0x04 = 4 lanes - */ - exynos_dp_read_byte_from_dpcd(dp, DP_MAX_LANE_COUNT, &data); - *lane_count = DPCD_MAX_LANE_COUNT(data); -} - -static void exynos_dp_init_training(struct exynos_dp_device *dp, - enum link_lane_count_type max_lane, - enum link_rate_type max_rate) -{ - /* - * MACRO_RST must be applied after the PLL_LOCK to avoid - * the DP inter pair skew issue for at least 10 us - */ - exynos_dp_reset_macro(dp); - - /* Initialize by reading RX's DPCD */ - exynos_dp_get_max_rx_bandwidth(dp, &dp->link_train.link_rate); - exynos_dp_get_max_rx_lane_count(dp, &dp->link_train.lane_count); - - if ((dp->link_train.link_rate != LINK_RATE_1_62GBPS) && - (dp->link_train.link_rate != LINK_RATE_2_70GBPS)) { - dev_err(dp->dev, "Rx Max Link Rate is abnormal :%x !\n", - dp->link_train.link_rate); - dp->link_train.link_rate = LINK_RATE_1_62GBPS; - } - - if (dp->link_train.lane_count == 0) { - dev_err(dp->dev, "Rx Max Lane count is abnormal :%x !\n", - dp->link_train.lane_count); - dp->link_train.lane_count = (u8)LANE_COUNT1; - } - - /* Setup TX lane count & rate */ - if (dp->link_train.lane_count > max_lane) - dp->link_train.lane_count = max_lane; - if (dp->link_train.link_rate > max_rate) - dp->link_train.link_rate = max_rate; - - /* All DP analog module power up */ - exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); -} - -static int exynos_dp_sw_link_training(struct exynos_dp_device *dp) -{ - int retval = 0, training_finished = 0; - - dp->link_train.lt_state = START; - - /* Process here */ - while (!retval && !training_finished) { - switch (dp->link_train.lt_state) { - case START: - retval = exynos_dp_link_start(dp); - if (retval) - dev_err(dp->dev, "LT link start failed!\n"); - break; - case CLOCK_RECOVERY: - retval = exynos_dp_process_clock_recovery(dp); - if (retval) - dev_err(dp->dev, "LT CR failed!\n"); - break; - case EQUALIZER_TRAINING: - retval = exynos_dp_process_equalizer_training(dp); - if (retval) - dev_err(dp->dev, "LT EQ failed!\n"); - break; - case FINISHED: - training_finished = 1; - break; - case FAILED: - return -EREMOTEIO; - } - } - if (retval) - dev_err(dp->dev, "eDP link training failed (%d)\n", retval); - - return retval; -} - -static int exynos_dp_set_link_train(struct exynos_dp_device *dp, - u32 count, - u32 bwtype) -{ - int i; - int retval; - - for (i = 0; i < DP_TIMEOUT_LOOP_COUNT; i++) { - exynos_dp_init_training(dp, count, bwtype); - retval = exynos_dp_sw_link_training(dp); - if (retval == 0) - break; - - usleep_range(100, 110); - } - - return retval; -} - -static int exynos_dp_config_video(struct exynos_dp_device *dp) -{ - int retval = 0; - int timeout_loop = 0; - int done_count = 0; - - exynos_dp_config_video_slave_mode(dp); - - exynos_dp_set_video_color_format(dp); - - if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - dev_err(dp->dev, "PLL is not locked yet.\n"); - return -EINVAL; - } - - for (;;) { - timeout_loop++; - if (exynos_dp_is_slave_video_stream_clock_on(dp) == 0) - break; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "Timeout of video streamclk ok\n"); - return -ETIMEDOUT; - } - - usleep_range(1, 2); - } - - /* Set to use the register calculated M/N video */ - exynos_dp_set_video_cr_mn(dp, CALCULATED_M, 0, 0); - - /* For video bist, Video timing must be generated by register */ - exynos_dp_set_video_timing_mode(dp, VIDEO_TIMING_FROM_CAPTURE); - - /* Disable video mute */ - exynos_dp_enable_video_mute(dp, 0); - - /* Configure video slave mode */ - exynos_dp_enable_video_master(dp, 0); - - timeout_loop = 0; - - for (;;) { - timeout_loop++; - if (exynos_dp_is_video_stream_on(dp) == 0) { - done_count++; - if (done_count > 10) - break; - } else if (done_count) { - done_count = 0; - } - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "Timeout of video streamclk ok\n"); - return -ETIMEDOUT; - } - - usleep_range(1000, 1001); - } - - if (retval != 0) - dev_err(dp->dev, "Video stream is not detected!\n"); - - return retval; -} - -static void exynos_dp_enable_scramble(struct exynos_dp_device *dp, bool enable) -{ - u8 data; - - if (enable) { - exynos_dp_enable_scrambling(dp); - - exynos_dp_read_byte_from_dpcd(dp, - DP_TRAINING_PATTERN_SET, - &data); - exynos_dp_write_byte_to_dpcd(dp, - DP_TRAINING_PATTERN_SET, - (u8)(data & ~DP_LINK_SCRAMBLING_DISABLE)); - } else { - exynos_dp_disable_scrambling(dp); - - exynos_dp_read_byte_from_dpcd(dp, - DP_TRAINING_PATTERN_SET, - &data); - exynos_dp_write_byte_to_dpcd(dp, - DP_TRAINING_PATTERN_SET, - (u8)(data | DP_LINK_SCRAMBLING_DISABLE)); - } -} - -static irqreturn_t exynos_dp_irq_handler(int irq, void *arg) -{ - struct exynos_dp_device *dp = arg; - - enum dp_irq_type irq_type; - - irq_type = exynos_dp_get_irq_type(dp); - switch (irq_type) { - case DP_IRQ_TYPE_HP_CABLE_IN: - dev_dbg(dp->dev, "Received irq - cable in\n"); - schedule_work(&dp->hotplug_work); - exynos_dp_clear_hotplug_interrupts(dp); - break; - case DP_IRQ_TYPE_HP_CABLE_OUT: - dev_dbg(dp->dev, "Received irq - cable out\n"); - exynos_dp_clear_hotplug_interrupts(dp); - break; - case DP_IRQ_TYPE_HP_CHANGE: - /* - * We get these change notifications once in a while, but there - * is nothing we can do with them. Just ignore it for now and - * only handle cable changes. - */ - dev_dbg(dp->dev, "Received irq - hotplug change; ignoring.\n"); - exynos_dp_clear_hotplug_interrupts(dp); - break; - default: - dev_err(dp->dev, "Received irq - unknown type!\n"); - break; - } - return IRQ_HANDLED; -} - -static void exynos_dp_hotplug(struct work_struct *work) -{ - struct exynos_dp_device *dp; - - dp = container_of(work, struct exynos_dp_device, hotplug_work); - - if (dp->drm_dev) - drm_helper_hpd_irq_event(dp->drm_dev); -} - -static void exynos_dp_commit(struct drm_encoder *encoder) -{ - struct exynos_dp_device *dp = encoder_to_dp(encoder); - int ret; - - /* Keep the panel disabled while we configure video */ - if (dp->panel) { - if (drm_panel_disable(dp->panel)) - DRM_ERROR("failed to disable the panel\n"); - } - - ret = exynos_dp_detect_hpd(dp); - if (ret) { - /* Cable has been disconnected, we're done */ - return; - } - - ret = exynos_dp_handle_edid(dp); - if (ret) { - dev_err(dp->dev, "unable to handle edid\n"); - return; - } - - ret = exynos_dp_set_link_train(dp, dp->video_info->lane_count, - dp->video_info->link_rate); - if (ret) { - dev_err(dp->dev, "unable to do link train\n"); - return; - } - - exynos_dp_enable_scramble(dp, 1); - exynos_dp_enable_rx_to_enhanced_mode(dp, 1); - exynos_dp_enable_enhanced_mode(dp, 1); - - exynos_dp_set_lane_count(dp, dp->video_info->lane_count); - exynos_dp_set_link_bandwidth(dp, dp->video_info->link_rate); - - exynos_dp_init_video(dp); - ret = exynos_dp_config_video(dp); - if (ret) - dev_err(dp->dev, "unable to config video\n"); - - /* Safe to enable the panel now */ - if (dp->panel) { - if (drm_panel_enable(dp->panel)) - DRM_ERROR("failed to enable the panel\n"); - } - - /* Enable video */ - exynos_dp_start_video(dp); -} - -static enum drm_connector_status exynos_dp_detect( - struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - -static void exynos_dp_connector_destroy(struct drm_connector *connector) -{ - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - -static const struct drm_connector_funcs exynos_dp_connector_funcs = { - .dpms = drm_atomic_helper_connector_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = exynos_dp_detect, - .destroy = exynos_dp_connector_destroy, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int exynos_dp_get_modes(struct drm_connector *connector) -{ - struct exynos_dp_device *dp = ctx_from_connector(connector); - struct drm_display_mode *mode; - - if (dp->panel) - return drm_panel_get_modes(dp->panel); - - mode = drm_mode_create(connector->dev); - if (!mode) { - DRM_ERROR("failed to create a new display mode.\n"); - return 0; - } - - drm_display_mode_from_videomode(&dp->vm, mode); - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - - return 1; -} - -static struct drm_encoder *exynos_dp_best_encoder( - struct drm_connector *connector) -{ - struct exynos_dp_device *dp = ctx_from_connector(connector); - - return &dp->encoder; -} - -static const struct drm_connector_helper_funcs exynos_dp_connector_helper_funcs = { - .get_modes = exynos_dp_get_modes, - .best_encoder = exynos_dp_best_encoder, -}; - -/* returns the number of bridges attached */ -static int exynos_drm_attach_lcd_bridge(struct exynos_dp_device *dp, - struct drm_encoder *encoder) -{ - int ret; - - encoder->bridge->next = dp->ptn_bridge; - dp->ptn_bridge->encoder = encoder; - ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge); - if (ret) { - DRM_ERROR("Failed to attach bridge to drm\n"); - return ret; - } - - return 0; -} - -static int exynos_dp_bridge_attach(struct drm_bridge *bridge) -{ - struct exynos_dp_device *dp = bridge->driver_private; - struct drm_encoder *encoder = &dp->encoder; - struct drm_connector *connector = &dp->connector; - int ret; - - /* Pre-empt DP connector creation if there's a bridge */ - if (dp->ptn_bridge) { - ret = exynos_drm_attach_lcd_bridge(dp, encoder); - if (!ret) - return 0; - } - - connector->polled = DRM_CONNECTOR_POLL_HPD; - - ret = drm_connector_init(dp->drm_dev, connector, - &exynos_dp_connector_funcs, DRM_MODE_CONNECTOR_eDP); - if (ret) { - DRM_ERROR("Failed to initialize connector with drm\n"); - return ret; - } - - drm_connector_helper_add(connector, &exynos_dp_connector_helper_funcs); - drm_connector_register(connector); - drm_mode_connector_attach_encoder(connector, encoder); - - if (dp->panel) - ret = drm_panel_attach(dp->panel, &dp->connector); - - return ret; -} - -static void exynos_dp_bridge_enable(struct drm_bridge *bridge) -{ - struct exynos_dp_device *dp = bridge->driver_private; - struct exynos_drm_crtc *crtc = dp_to_crtc(dp); - - if (dp->dpms_mode == DRM_MODE_DPMS_ON) - return; - - pm_runtime_get_sync(dp->dev); - - if (dp->panel) { - if (drm_panel_prepare(dp->panel)) { - DRM_ERROR("failed to setup the panel\n"); - return; - } - } - - if (crtc->ops->clock_enable) - crtc->ops->clock_enable(dp_to_crtc(dp), true); - - phy_power_on(dp->phy); - exynos_dp_init_dp(dp); - enable_irq(dp->irq); - exynos_dp_commit(&dp->encoder); - - dp->dpms_mode = DRM_MODE_DPMS_ON; -} - -static void exynos_dp_bridge_disable(struct drm_bridge *bridge) -{ - struct exynos_dp_device *dp = bridge->driver_private; - struct exynos_drm_crtc *crtc = dp_to_crtc(dp); - - if (dp->dpms_mode != DRM_MODE_DPMS_ON) - return; - - if (dp->panel) { - if (drm_panel_disable(dp->panel)) { - DRM_ERROR("failed to disable the panel\n"); - return; - } - } - - disable_irq(dp->irq); - flush_work(&dp->hotplug_work); - phy_power_off(dp->phy); - - if (crtc->ops->clock_enable) - crtc->ops->clock_enable(dp_to_crtc(dp), false); - - if (dp->panel) { - if (drm_panel_unprepare(dp->panel)) - DRM_ERROR("failed to turnoff the panel\n"); - } - - pm_runtime_put_sync(dp->dev); - - dp->dpms_mode = DRM_MODE_DPMS_OFF; -} - -static void exynos_dp_bridge_nop(struct drm_bridge *bridge) -{ - /* do nothing */ -} - -static const struct drm_bridge_funcs exynos_dp_bridge_funcs = { - .enable = exynos_dp_bridge_enable, - .disable = exynos_dp_bridge_disable, - .pre_enable = exynos_dp_bridge_nop, - .post_disable = exynos_dp_bridge_nop, - .attach = exynos_dp_bridge_attach, -}; - -static int exynos_dp_create_connector(struct drm_encoder *encoder) -{ - struct exynos_dp_device *dp = encoder_to_dp(encoder); - struct drm_device *drm_dev = dp->drm_dev; - struct drm_bridge *bridge; - int ret; - - bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("failed to allocate for drm bridge\n"); - return -ENOMEM; - } - - dp->bridge = bridge; - - encoder->bridge = bridge; - bridge->driver_private = dp; - bridge->encoder = encoder; - bridge->funcs = &exynos_dp_bridge_funcs; - - ret = drm_bridge_attach(drm_dev, bridge); - if (ret) { - DRM_ERROR("failed to attach drm bridge\n"); - return -EINVAL; - } - - return 0; -} - -static void exynos_dp_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ -} - -static void exynos_dp_enable(struct drm_encoder *encoder) -{ -} - -static void exynos_dp_disable(struct drm_encoder *encoder) -{ -} - -static const struct drm_encoder_helper_funcs exynos_dp_encoder_helper_funcs = { - .mode_set = exynos_dp_mode_set, - .enable = exynos_dp_enable, - .disable = exynos_dp_disable, -}; - -static const struct drm_encoder_funcs exynos_dp_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - -static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) -{ - struct device_node *dp_node = dev->of_node; - struct video_info *dp_video_config; - - dp_video_config = devm_kzalloc(dev, - sizeof(*dp_video_config), GFP_KERNEL); - if (!dp_video_config) - return ERR_PTR(-ENOMEM); - - dp_video_config->h_sync_polarity = - of_property_read_bool(dp_node, "hsync-active-high"); - - dp_video_config->v_sync_polarity = - of_property_read_bool(dp_node, "vsync-active-high"); - - dp_video_config->interlaced = - of_property_read_bool(dp_node, "interlaced"); - - if (of_property_read_u32(dp_node, "samsung,color-space", - &dp_video_config->color_space)) { - dev_err(dev, "failed to get color-space\n"); - return ERR_PTR(-EINVAL); - } - - if (of_property_read_u32(dp_node, "samsung,dynamic-range", - &dp_video_config->dynamic_range)) { - dev_err(dev, "failed to get dynamic-range\n"); - return ERR_PTR(-EINVAL); - } - - if (of_property_read_u32(dp_node, "samsung,ycbcr-coeff", - &dp_video_config->ycbcr_coeff)) { - dev_err(dev, "failed to get ycbcr-coeff\n"); - return ERR_PTR(-EINVAL); - } - - if (of_property_read_u32(dp_node, "samsung,color-depth", - &dp_video_config->color_depth)) { - dev_err(dev, "failed to get color-depth\n"); - return ERR_PTR(-EINVAL); - } - - if (of_property_read_u32(dp_node, "samsung,link-rate", - &dp_video_config->link_rate)) { - dev_err(dev, "failed to get link-rate\n"); - return ERR_PTR(-EINVAL); - } - - if (of_property_read_u32(dp_node, "samsung,lane-count", - &dp_video_config->lane_count)) { - dev_err(dev, "failed to get lane-count\n"); - return ERR_PTR(-EINVAL); - } - - return dp_video_config; -} - -static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) -{ - int ret; - - ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE); - if (ret) { - DRM_ERROR("failed: of_get_videomode() : %d\n", ret); - return ret; - } - return 0; -} - -static int exynos_dp_bind(struct device *dev, struct device *master, void *data) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - struct platform_device *pdev = to_platform_device(dev); - struct drm_device *drm_dev = data; - struct drm_encoder *encoder = &dp->encoder; - struct resource *res; - unsigned int irq_flags; - int pipe, ret = 0; - - dp->dev = &pdev->dev; - dp->dpms_mode = DRM_MODE_DPMS_OFF; - - dp->video_info = exynos_dp_dt_parse_pdata(&pdev->dev); - if (IS_ERR(dp->video_info)) - return PTR_ERR(dp->video_info); - - dp->phy = devm_phy_get(dp->dev, "dp"); - if (IS_ERR(dp->phy)) { - dev_err(dp->dev, "no DP phy configured\n"); - ret = PTR_ERR(dp->phy); - if (ret) { - /* - * phy itself is not enabled, so we can move forward - * assigning NULL to phy pointer. - */ - if (ret == -ENOSYS || ret == -ENODEV) - dp->phy = NULL; - else - return ret; - } - } - - if (!dp->panel && !dp->ptn_bridge) { - ret = exynos_dp_dt_parse_panel(dp); - if (ret) - return ret; - } - - dp->clock = devm_clk_get(&pdev->dev, "dp"); - if (IS_ERR(dp->clock)) { - dev_err(&pdev->dev, "failed to get clock\n"); - return PTR_ERR(dp->clock); - } - - clk_prepare_enable(dp->clock); - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - - dp->reg_base = devm_ioremap_resource(&pdev->dev, res); - if (IS_ERR(dp->reg_base)) - return PTR_ERR(dp->reg_base); - - dp->hpd_gpio = of_get_named_gpio(dev->of_node, "samsung,hpd-gpio", 0); - - if (gpio_is_valid(dp->hpd_gpio)) { - /* - * Set up the hotplug GPIO from the device tree as an interrupt. - * Simply specifying a different interrupt in the device tree - * doesn't work since we handle hotplug rather differently when - * using a GPIO. We also need the actual GPIO specifier so - * that we can get the current state of the GPIO. - */ - ret = devm_gpio_request_one(&pdev->dev, dp->hpd_gpio, GPIOF_IN, - "hpd_gpio"); - if (ret) { - dev_err(&pdev->dev, "failed to get hpd gpio\n"); - return ret; - } - dp->irq = gpio_to_irq(dp->hpd_gpio); - irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING; - } else { - dp->hpd_gpio = -ENODEV; - dp->irq = platform_get_irq(pdev, 0); - irq_flags = 0; - } - - if (dp->irq == -ENXIO) { - dev_err(&pdev->dev, "failed to get irq\n"); - return -ENODEV; - } - - INIT_WORK(&dp->hotplug_work, exynos_dp_hotplug); - - ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, - irq_flags, "exynos-dp", dp); - if (ret) { - dev_err(&pdev->dev, "failed to request irq\n"); - return ret; - } - disable_irq(dp->irq); - - dp->drm_dev = drm_dev; - - pipe = exynos_drm_crtc_get_pipe_from_type(drm_dev, - EXYNOS_DISPLAY_TYPE_LCD); - if (pipe < 0) - return pipe; - - encoder->possible_crtcs = 1 << pipe; - - DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); - - drm_encoder_init(drm_dev, encoder, &exynos_dp_encoder_funcs, - DRM_MODE_ENCODER_TMDS, NULL); - - drm_encoder_helper_add(encoder, &exynos_dp_encoder_helper_funcs); - - ret = exynos_dp_create_connector(encoder); - if (ret) { - DRM_ERROR("failed to create connector ret = %d\n", ret); - drm_encoder_cleanup(encoder); - return ret; - } - - return 0; -} - -static void exynos_dp_unbind(struct device *dev, struct device *master, - void *data) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - exynos_dp_disable(&dp->encoder); -} - -static const struct component_ops exynos_dp_ops = { - .bind = exynos_dp_bind, - .unbind = exynos_dp_unbind, -}; - -static int exynos_dp_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct device_node *np = NULL, *endpoint = NULL; - struct exynos_dp_device *dp; - int ret; - - dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), - GFP_KERNEL); - if (!dp) - return -ENOMEM; - - platform_set_drvdata(pdev, dp); - - /* This is for the backward compatibility. */ - np = of_parse_phandle(dev->of_node, "panel", 0); - if (np) { - dp->panel = of_drm_find_panel(np); - of_node_put(np); - if (!dp->panel) - return -EPROBE_DEFER; - goto out; - } - - endpoint = of_graph_get_next_endpoint(dev->of_node, NULL); - if (endpoint) { - np = of_graph_get_remote_port_parent(endpoint); - if (np) { - /* The remote port can be either a panel or a bridge */ - dp->panel = of_drm_find_panel(np); - if (!dp->panel) { - dp->ptn_bridge = of_drm_find_bridge(np); - if (!dp->ptn_bridge) { - of_node_put(np); - return -EPROBE_DEFER; - } - } - of_node_put(np); - } else { - DRM_ERROR("no remote endpoint device node found.\n"); - return -EINVAL; - } - } else { - DRM_ERROR("no port endpoint subnode found.\n"); - return -EINVAL; - } - -out: - pm_runtime_enable(dev); - - ret = component_add(&pdev->dev, &exynos_dp_ops); - if (ret) - goto err_disable_pm_runtime; - - return ret; - -err_disable_pm_runtime: - pm_runtime_disable(dev); - - return ret; -} - -static int exynos_dp_remove(struct platform_device *pdev) -{ - pm_runtime_disable(&pdev->dev); - component_del(&pdev->dev, &exynos_dp_ops); - - return 0; -} - -#ifdef CONFIG_PM -static int exynos_dp_suspend(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - - clk_disable_unprepare(dp->clock); - - return 0; -} - -static int exynos_dp_resume(struct device *dev) -{ - struct exynos_dp_device *dp = dev_get_drvdata(dev); - int ret; - - ret = clk_prepare_enable(dp->clock); - if (ret < 0) { - DRM_ERROR("Failed to prepare_enable the clock clk [%d]\n", ret); - return ret; - } - - return 0; -} -#endif - -static const struct dev_pm_ops exynos_dp_pm_ops = { - SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL) -}; - -static const struct of_device_id exynos_dp_match[] = { - { .compatible = "samsung,exynos5-dp" }, - {}, -}; -MODULE_DEVICE_TABLE(of, exynos_dp_match); - -struct platform_driver dp_driver = { - .probe = exynos_dp_probe, - .remove = exynos_dp_remove, - .driver = { - .name = "exynos-dp", - .owner = THIS_MODULE, - .pm = &exynos_dp_pm_ops, - .of_match_table = exynos_dp_match, - }, -}; - -MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>"); -MODULE_DESCRIPTION("Samsung SoC DP Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h deleted file mode 100644 index b5c2d8f47f9c..000000000000 --- a/drivers/gpu/drm/exynos/exynos_dp_core.h +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Header file for Samsung DP (Display Port) interface driver. - * - * Copyright (C) 2012 Samsung Electronics Co., Ltd. - * Author: Jingoo Han <jg1.han@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DP_CORE_H -#define _EXYNOS_DP_CORE_H - -#include <drm/drm_crtc.h> -#include <drm/drm_dp_helper.h> -#include <drm/exynos_drm.h> -#include <video/videomode.h> - -#include "exynos_drm_drv.h" - -#define DP_TIMEOUT_LOOP_COUNT 100 -#define MAX_CR_LOOP 5 -#define MAX_EQ_LOOP 5 - -enum link_rate_type { - LINK_RATE_1_62GBPS = 0x06, - LINK_RATE_2_70GBPS = 0x0a -}; - -enum link_lane_count_type { - LANE_COUNT1 = 1, - LANE_COUNT2 = 2, - LANE_COUNT4 = 4 -}; - -enum link_training_state { - START, - CLOCK_RECOVERY, - EQUALIZER_TRAINING, - FINISHED, - FAILED -}; - -enum voltage_swing_level { - VOLTAGE_LEVEL_0, - VOLTAGE_LEVEL_1, - VOLTAGE_LEVEL_2, - VOLTAGE_LEVEL_3, -}; - -enum pre_emphasis_level { - PRE_EMPHASIS_LEVEL_0, - PRE_EMPHASIS_LEVEL_1, - PRE_EMPHASIS_LEVEL_2, - PRE_EMPHASIS_LEVEL_3, -}; - -enum pattern_set { - PRBS7, - D10_2, - TRAINING_PTN1, - TRAINING_PTN2, - DP_NONE -}; - -enum color_space { - COLOR_RGB, - COLOR_YCBCR422, - COLOR_YCBCR444 -}; - -enum color_depth { - COLOR_6, - COLOR_8, - COLOR_10, - COLOR_12 -}; - -enum color_coefficient { - COLOR_YCBCR601, - COLOR_YCBCR709 -}; - -enum dynamic_range { - VESA, - CEA -}; - -enum pll_status { - PLL_UNLOCKED, - PLL_LOCKED -}; - -enum clock_recovery_m_value_type { - CALCULATED_M, - REGISTER_M -}; - -enum video_timing_recognition_type { - VIDEO_TIMING_FROM_CAPTURE, - VIDEO_TIMING_FROM_REGISTER -}; - -enum analog_power_block { - AUX_BLOCK, - CH0_BLOCK, - CH1_BLOCK, - CH2_BLOCK, - CH3_BLOCK, - ANALOG_TOTAL, - POWER_ALL -}; - -enum dp_irq_type { - DP_IRQ_TYPE_HP_CABLE_IN, - DP_IRQ_TYPE_HP_CABLE_OUT, - DP_IRQ_TYPE_HP_CHANGE, - DP_IRQ_TYPE_UNKNOWN, -}; - -struct video_info { - char *name; - - bool h_sync_polarity; - bool v_sync_polarity; - bool interlaced; - - enum color_space color_space; - enum dynamic_range dynamic_range; - enum color_coefficient ycbcr_coeff; - enum color_depth color_depth; - - enum link_rate_type link_rate; - enum link_lane_count_type lane_count; -}; - -struct link_train { - int eq_loop; - int cr_loop[4]; - - u8 link_rate; - u8 lane_count; - u8 training_lane[4]; - - enum link_training_state lt_state; -}; - -struct exynos_dp_device { - struct drm_encoder encoder; - struct device *dev; - struct drm_device *drm_dev; - struct drm_connector connector; - struct drm_panel *panel; - struct drm_bridge *bridge; - struct drm_bridge *ptn_bridge; - struct clk *clock; - unsigned int irq; - void __iomem *reg_base; - - struct video_info *video_info; - struct link_train link_train; - struct work_struct hotplug_work; - struct phy *phy; - int dpms_mode; - int hpd_gpio; - struct videomode vm; -}; - -/* exynos_dp_reg.c */ -void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable); -void exynos_dp_stop_video(struct exynos_dp_device *dp); -void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable); -void exynos_dp_init_analog_param(struct exynos_dp_device *dp); -void exynos_dp_init_interrupt(struct exynos_dp_device *dp); -void exynos_dp_reset(struct exynos_dp_device *dp); -void exynos_dp_swreset(struct exynos_dp_device *dp); -void exynos_dp_config_interrupt(struct exynos_dp_device *dp); -enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp); -void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable); -void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, - enum analog_power_block block, - bool enable); -void exynos_dp_init_analog_func(struct exynos_dp_device *dp); -void exynos_dp_init_hpd(struct exynos_dp_device *dp); -enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp); -void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp); -void exynos_dp_reset_aux(struct exynos_dp_device *dp); -void exynos_dp_init_aux(struct exynos_dp_device *dp); -int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp); -void exynos_dp_enable_sw_function(struct exynos_dp_device *dp); -int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp); -int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned char data); -int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned char *data); -int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned int count, - unsigned char data[]); -int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned int count, - unsigned char data[]); -int exynos_dp_select_i2c_device(struct exynos_dp_device *dp, - unsigned int device_addr, - unsigned int reg_addr); -int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp, - unsigned int device_addr, - unsigned int reg_addr, - unsigned int *data); -int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp, - unsigned int device_addr, - unsigned int reg_addr, - unsigned int count, - unsigned char edid[]); -void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); -void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); -void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); -void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); -void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable); -void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, - enum pattern_set pattern); -void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level); -void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level); -void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level); -void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level); -void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp, - u32 training_lane); -void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp, - u32 training_lane); -void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp, - u32 training_lane); -void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp, - u32 training_lane); -u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp); -u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp); -u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp); -u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp); -void exynos_dp_reset_macro(struct exynos_dp_device *dp); -void exynos_dp_init_video(struct exynos_dp_device *dp); - -void exynos_dp_set_video_color_format(struct exynos_dp_device *dp); -int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp); -void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp, - enum clock_recovery_m_value_type type, - u32 m_value, - u32 n_value); -void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type); -void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable); -void exynos_dp_start_video(struct exynos_dp_device *dp); -int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp); -void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp); -void exynos_dp_enable_scrambling(struct exynos_dp_device *dp); -void exynos_dp_disable_scrambling(struct exynos_dp_device *dp); - -/* I2C EDID Chip ID, Slave Address */ -#define I2C_EDID_DEVICE_ADDR 0x50 -#define I2C_E_EDID_DEVICE_ADDR 0x30 - -#define EDID_BLOCK_LENGTH 0x80 -#define EDID_HEADER_PATTERN 0x00 -#define EDID_EXTENSION_FLAG 0x7e -#define EDID_CHECKSUM 0x7f - -/* DP_MAX_LANE_COUNT */ -#define DPCD_ENHANCED_FRAME_CAP(x) (((x) >> 7) & 0x1) -#define DPCD_MAX_LANE_COUNT(x) ((x) & 0x1f) - -/* DP_LANE_COUNT_SET */ -#define DPCD_LANE_COUNT_SET(x) ((x) & 0x1f) - -/* DP_TRAINING_LANE0_SET */ -#define DPCD_PRE_EMPHASIS_SET(x) (((x) & 0x3) << 3) -#define DPCD_PRE_EMPHASIS_GET(x) (((x) >> 3) & 0x3) -#define DPCD_VOLTAGE_SWING_SET(x) (((x) & 0x3) << 0) -#define DPCD_VOLTAGE_SWING_GET(x) (((x) >> 0) & 0x3) - -#endif /* _EXYNOS_DP_CORE_H */ diff --git a/drivers/gpu/drm/exynos/exynos_dp_reg.c b/drivers/gpu/drm/exynos/exynos_dp_reg.c deleted file mode 100644 index c1f87a2a9284..000000000000 --- a/drivers/gpu/drm/exynos/exynos_dp_reg.c +++ /dev/null @@ -1,1263 +0,0 @@ -/* - * Samsung DP (Display port) register interface driver. - * - * Copyright (C) 2012 Samsung Electronics Co., Ltd. - * Author: Jingoo Han <jg1.han@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <linux/device.h> -#include <linux/io.h> -#include <linux/delay.h> -#include <linux/gpio.h> - -#include "exynos_dp_core.h" -#include "exynos_dp_reg.h" - -#define COMMON_INT_MASK_1 0 -#define COMMON_INT_MASK_2 0 -#define COMMON_INT_MASK_3 0 -#define COMMON_INT_MASK_4 (HOTPLUG_CHG | HPD_LOST | PLUG) -#define INT_STA_MASK INT_HPD - -void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable) -{ - u32 reg; - - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); - reg |= HDCP_VIDEO_MUTE; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); - reg &= ~HDCP_VIDEO_MUTE; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); - } -} - -void exynos_dp_stop_video(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); - reg &= ~VIDEO_EN; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); -} - -void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable) -{ - u32 reg; - - if (enable) - reg = LANE3_MAP_LOGIC_LANE_0 | LANE2_MAP_LOGIC_LANE_1 | - LANE1_MAP_LOGIC_LANE_2 | LANE0_MAP_LOGIC_LANE_3; - else - reg = LANE3_MAP_LOGIC_LANE_3 | LANE2_MAP_LOGIC_LANE_2 | - LANE1_MAP_LOGIC_LANE_1 | LANE0_MAP_LOGIC_LANE_0; - - writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); -} - -void exynos_dp_init_analog_param(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = TX_TERMINAL_CTRL_50_OHM; - writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1); - - reg = SEL_24M | TX_DVDD_BIT_1_0625V; - writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2); - - reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO; - writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3); - - reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM | - TX_CUR1_2X | TX_CUR_16_MA; - writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1); - - reg = CH3_AMP_400_MV | CH2_AMP_400_MV | - CH1_AMP_400_MV | CH0_AMP_400_MV; - writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL); -} - -void exynos_dp_init_interrupt(struct exynos_dp_device *dp) -{ - /* Set interrupt pin assertion polarity as high */ - writel(INT_POL1 | INT_POL0, dp->reg_base + EXYNOS_DP_INT_CTL); - - /* Clear pending regisers */ - writel(0xff, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); - writel(0x4f, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_2); - writel(0xe0, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_3); - writel(0xe7, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); - writel(0x63, dp->reg_base + EXYNOS_DP_INT_STA); - - /* 0:mask,1: unmask */ - writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); - writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); - writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); - writel(0x00, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); - writel(0x00, dp->reg_base + EXYNOS_DP_INT_STA_MASK); -} - -void exynos_dp_reset(struct exynos_dp_device *dp) -{ - u32 reg; - - exynos_dp_stop_video(dp); - exynos_dp_enable_video_mute(dp, 0); - - reg = MASTER_VID_FUNC_EN_N | SLAVE_VID_FUNC_EN_N | - AUD_FIFO_FUNC_EN_N | AUD_FUNC_EN_N | - HDCP_FUNC_EN_N | SW_FUNC_EN_N; - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); - - reg = SSC_FUNC_EN_N | AUX_FUNC_EN_N | - SERDES_FIFO_FUNC_EN_N | - LS_CLK_DOMAIN_FUNC_EN_N; - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); - - usleep_range(20, 30); - - exynos_dp_lane_swap(dp, 0); - - writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_1); - writel(0x40, dp->reg_base + EXYNOS_DP_SYS_CTL_2); - writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_3); - writel(0x0, dp->reg_base + EXYNOS_DP_SYS_CTL_4); - - writel(0x0, dp->reg_base + EXYNOS_DP_PKT_SEND_CTL); - writel(0x0, dp->reg_base + EXYNOS_DP_HDCP_CTL); - - writel(0x5e, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_L); - writel(0x1a, dp->reg_base + EXYNOS_DP_HPD_DEGLITCH_H); - - writel(0x10, dp->reg_base + EXYNOS_DP_LINK_DEBUG_CTL); - - writel(0x0, dp->reg_base + EXYNOS_DP_PHY_TEST); - - writel(0x0, dp->reg_base + EXYNOS_DP_VIDEO_FIFO_THRD); - writel(0x20, dp->reg_base + EXYNOS_DP_AUDIO_MARGIN); - - writel(0x4, dp->reg_base + EXYNOS_DP_M_VID_GEN_FILTER_TH); - writel(0x2, dp->reg_base + EXYNOS_DP_M_AUD_GEN_FILTER_TH); - - writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); -} - -void exynos_dp_swreset(struct exynos_dp_device *dp) -{ - writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET); -} - -void exynos_dp_config_interrupt(struct exynos_dp_device *dp) -{ - u32 reg; - - /* 0: mask, 1: unmask */ - reg = COMMON_INT_MASK_1; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_1); - - reg = COMMON_INT_MASK_2; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_2); - - reg = COMMON_INT_MASK_3; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_3); - - reg = COMMON_INT_MASK_4; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_MASK_4); - - reg = INT_STA_MASK; - writel(reg, dp->reg_base + EXYNOS_DP_INT_STA_MASK); -} - -enum pll_status exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); - if (reg & PLL_LOCK) - return PLL_LOCKED; - else - return PLL_UNLOCKED; -} - -void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable) -{ - u32 reg; - - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); - reg |= DP_PLL_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PLL_CTL); - reg &= ~DP_PLL_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PLL_CTL); - } -} - -void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp, - enum analog_power_block block, - bool enable) -{ - u32 reg; - - switch (block) { - case AUX_BLOCK: - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg |= AUX_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg &= ~AUX_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - case CH0_BLOCK: - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg |= CH0_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg &= ~CH0_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - case CH1_BLOCK: - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg |= CH1_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg &= ~CH1_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - case CH2_BLOCK: - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg |= CH2_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg &= ~CH2_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - case CH3_BLOCK: - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg |= CH3_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg &= ~CH3_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - case ANALOG_TOTAL: - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg |= DP_PHY_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_PHY_PD); - reg &= ~DP_PHY_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - case POWER_ALL: - if (enable) { - reg = DP_PHY_PD | AUX_PD | CH3_PD | CH2_PD | - CH1_PD | CH0_PD; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_PD); - } else { - writel(0x00, dp->reg_base + EXYNOS_DP_PHY_PD); - } - break; - default: - break; - } -} - -void exynos_dp_init_analog_func(struct exynos_dp_device *dp) -{ - u32 reg; - int timeout_loop = 0; - - exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); - - reg = PLL_LOCK_CHG; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); - - reg = readl(dp->reg_base + EXYNOS_DP_DEBUG_CTL); - reg &= ~(F_PLL_LOCK | PLL_LOCK_CTRL); - writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); - - /* Power up PLL */ - if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - exynos_dp_set_pll_power_down(dp, 0); - - while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "failed to get pll lock status\n"); - return; - } - usleep_range(10, 20); - } - } - - /* Enable Serdes FIFO function and Link symbol clock domain module */ - reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); - reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N - | AUX_FUNC_EN_N); - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); -} - -void exynos_dp_clear_hotplug_interrupts(struct exynos_dp_device *dp) -{ - u32 reg; - - if (gpio_is_valid(dp->hpd_gpio)) - return; - - reg = HOTPLUG_CHG | HPD_LOST | PLUG; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); - - reg = INT_HPD; - writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); -} - -void exynos_dp_init_hpd(struct exynos_dp_device *dp) -{ - u32 reg; - - if (gpio_is_valid(dp->hpd_gpio)) - return; - - exynos_dp_clear_hotplug_interrupts(dp); - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); - reg &= ~(F_HPD | HPD_CTRL); - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); -} - -enum dp_irq_type exynos_dp_get_irq_type(struct exynos_dp_device *dp) -{ - u32 reg; - - if (gpio_is_valid(dp->hpd_gpio)) { - reg = gpio_get_value(dp->hpd_gpio); - if (reg) - return DP_IRQ_TYPE_HP_CABLE_IN; - else - return DP_IRQ_TYPE_HP_CABLE_OUT; - } else { - /* Parse hotplug interrupt status register */ - reg = readl(dp->reg_base + EXYNOS_DP_COMMON_INT_STA_4); - - if (reg & PLUG) - return DP_IRQ_TYPE_HP_CABLE_IN; - - if (reg & HPD_LOST) - return DP_IRQ_TYPE_HP_CABLE_OUT; - - if (reg & HOTPLUG_CHG) - return DP_IRQ_TYPE_HP_CHANGE; - - return DP_IRQ_TYPE_UNKNOWN; - } -} - -void exynos_dp_reset_aux(struct exynos_dp_device *dp) -{ - u32 reg; - - /* Disable AUX channel module */ - reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); - reg |= AUX_FUNC_EN_N; - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); -} - -void exynos_dp_init_aux(struct exynos_dp_device *dp) -{ - u32 reg; - - /* Clear inerrupts related to AUX channel */ - reg = RPLY_RECEIV | AUX_ERR; - writel(reg, dp->reg_base + EXYNOS_DP_INT_STA); - - exynos_dp_reset_aux(dp); - - /* Disable AUX transaction H/W retry */ - reg = AUX_BIT_PERIOD_EXPECTED_DELAY(3) | AUX_HW_RETRY_COUNT_SEL(0)| - AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_HW_RETRY_CTL); - - /* Receive AUX Channel DEFER commands equal to DEFFER_COUNT*64 */ - reg = DEFER_CTRL_EN | DEFER_COUNT(1); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_DEFER_CTL); - - /* Enable AUX channel module */ - reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); - reg &= ~AUX_FUNC_EN_N; - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); -} - -int exynos_dp_get_plug_in_status(struct exynos_dp_device *dp) -{ - u32 reg; - - if (gpio_is_valid(dp->hpd_gpio)) { - if (gpio_get_value(dp->hpd_gpio)) - return 0; - } else { - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); - if (reg & HPD_STATUS) - return 0; - } - - return -EINVAL; -} - -void exynos_dp_enable_sw_function(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); - reg &= ~SW_FUNC_EN_N; - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); -} - -int exynos_dp_start_aux_transaction(struct exynos_dp_device *dp) -{ - int reg; - int retval = 0; - int timeout_loop = 0; - - /* Enable AUX CH operation */ - reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); - reg |= AUX_EN; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); - - /* Is AUX CH command reply received? */ - reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); - while (!(reg & RPLY_RECEIV)) { - timeout_loop++; - if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) { - dev_err(dp->dev, "AUX CH command reply failed!\n"); - return -ETIMEDOUT; - } - reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); - usleep_range(10, 11); - } - - /* Clear interrupt source for AUX CH command reply */ - writel(RPLY_RECEIV, dp->reg_base + EXYNOS_DP_INT_STA); - - /* Clear interrupt source for AUX CH access error */ - reg = readl(dp->reg_base + EXYNOS_DP_INT_STA); - if (reg & AUX_ERR) { - writel(AUX_ERR, dp->reg_base + EXYNOS_DP_INT_STA); - return -EREMOTEIO; - } - - /* Check AUX CH error access status */ - reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_STA); - if ((reg & AUX_STATUS_MASK) != 0) { - dev_err(dp->dev, "AUX CH error happens: %d\n\n", - reg & AUX_STATUS_MASK); - return -EREMOTEIO; - } - - return retval; -} - -int exynos_dp_write_byte_to_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned char data) -{ - u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); - - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); - - /* Write data buffer */ - reg = (unsigned int)data; - writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0); - - /* - * Set DisplayPort transaction and write 1 byte - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval == 0) - break; - else - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", - __func__); - } - - return retval; -} - -int exynos_dp_read_byte_from_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned char *data) -{ - u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); - - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); - - /* - * Set DisplayPort transaction and read 1 byte - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval == 0) - break; - else - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", - __func__); - } - - /* Read data buffer */ - reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); - *data = (unsigned char)(reg & 0xff); - - return retval; -} - -int exynos_dp_write_bytes_to_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned int count, - unsigned char data[]) -{ - u32 reg; - unsigned int start_offset; - unsigned int cur_data_count; - unsigned int cur_data_idx; - int i; - int retval = 0; - - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); - - start_offset = 0; - while (start_offset < count) { - /* Buffer size of AUX CH is 16 * 4bytes */ - if ((count - start_offset) > 16) - cur_data_count = 16; - else - cur_data_count = count - start_offset; - - for (i = 0; i < 3; i++) { - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr + start_offset); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr + start_offset); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr + start_offset); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); - - for (cur_data_idx = 0; cur_data_idx < cur_data_count; - cur_data_idx++) { - reg = data[start_offset + cur_data_idx]; - writel(reg, dp->reg_base + EXYNOS_DP_BUF_DATA_0 - + 4 * cur_data_idx); - } - - /* - * Set DisplayPort transaction and write - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_LENGTH(cur_data_count) | - AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval == 0) - break; - else - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", - __func__); - } - - start_offset += cur_data_count; - } - - return retval; -} - -int exynos_dp_read_bytes_from_dpcd(struct exynos_dp_device *dp, - unsigned int reg_addr, - unsigned int count, - unsigned char data[]) -{ - u32 reg; - unsigned int start_offset; - unsigned int cur_data_count; - unsigned int cur_data_idx; - int i; - int retval = 0; - - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); - - start_offset = 0; - while (start_offset < count) { - /* Buffer size of AUX CH is 16 * 4bytes */ - if ((count - start_offset) > 16) - cur_data_count = 16; - else - cur_data_count = count - start_offset; - - /* AUX CH Request Transaction process */ - for (i = 0; i < 3; i++) { - /* Select DPCD device address */ - reg = AUX_ADDR_7_0(reg_addr + start_offset); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); - reg = AUX_ADDR_15_8(reg_addr + start_offset); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); - reg = AUX_ADDR_19_16(reg_addr + start_offset); - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); - - /* - * Set DisplayPort transaction and read - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_LENGTH(cur_data_count) | - AUX_TX_COMM_DP_TRANSACTION | AUX_TX_COMM_READ; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval == 0) - break; - else - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", - __func__); - } - - for (cur_data_idx = 0; cur_data_idx < cur_data_count; - cur_data_idx++) { - reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 - + 4 * cur_data_idx); - data[start_offset + cur_data_idx] = - (unsigned char)reg; - } - - start_offset += cur_data_count; - } - - return retval; -} - -int exynos_dp_select_i2c_device(struct exynos_dp_device *dp, - unsigned int device_addr, - unsigned int reg_addr) -{ - u32 reg; - int retval; - - /* Set EDID device address */ - reg = device_addr; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_ADDR_7_0); - writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_15_8); - writel(0x0, dp->reg_base + EXYNOS_DP_AUX_ADDR_19_16); - - /* Set offset from base address of EDID device */ - writel(reg_addr, dp->reg_base + EXYNOS_DP_BUF_DATA_0); - - /* - * Set I2C transaction and write address - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_I2C_TRANSACTION | AUX_TX_COMM_MOT | - AUX_TX_COMM_WRITE; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval != 0) - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", __func__); - - return retval; -} - -int exynos_dp_read_byte_from_i2c(struct exynos_dp_device *dp, - unsigned int device_addr, - unsigned int reg_addr, - unsigned int *data) -{ - u32 reg; - int i; - int retval; - - for (i = 0; i < 3; i++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); - - /* Select EDID device */ - retval = exynos_dp_select_i2c_device(dp, device_addr, reg_addr); - if (retval != 0) - continue; - - /* - * Set I2C transaction and read data - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_TX_COMM_I2C_TRANSACTION | - AUX_TX_COMM_READ; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval == 0) - break; - else - dev_dbg(dp->dev, "%s: Aux Transaction fail!\n", - __func__); - } - - /* Read data */ - if (retval == 0) - *data = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0); - - return retval; -} - -int exynos_dp_read_bytes_from_i2c(struct exynos_dp_device *dp, - unsigned int device_addr, - unsigned int reg_addr, - unsigned int count, - unsigned char edid[]) -{ - u32 reg; - unsigned int i, j; - unsigned int cur_data_idx; - unsigned int defer = 0; - int retval = 0; - - for (i = 0; i < count; i += 16) { - for (j = 0; j < 3; j++) { - /* Clear AUX CH data buffer */ - reg = BUF_CLR; - writel(reg, dp->reg_base + EXYNOS_DP_BUFFER_DATA_CTL); - - /* Set normal AUX CH command */ - reg = readl(dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); - reg &= ~ADDR_ONLY; - writel(reg, dp->reg_base + EXYNOS_DP_AUX_CH_CTL_2); - - /* - * If Rx sends defer, Tx sends only reads - * request without sending address - */ - if (!defer) - retval = exynos_dp_select_i2c_device(dp, - device_addr, reg_addr + i); - else - defer = 0; - - if (retval == 0) { - /* - * Set I2C transaction and write data - * If bit 3 is 1, DisplayPort transaction. - * If Bit 3 is 0, I2C transaction. - */ - reg = AUX_LENGTH(16) | - AUX_TX_COMM_I2C_TRANSACTION | - AUX_TX_COMM_READ; - writel(reg, dp->reg_base + - EXYNOS_DP_AUX_CH_CTL_1); - - /* Start AUX transaction */ - retval = exynos_dp_start_aux_transaction(dp); - if (retval == 0) - break; - else - dev_dbg(dp->dev, - "%s: Aux Transaction fail!\n", - __func__); - } - /* Check if Rx sends defer */ - reg = readl(dp->reg_base + EXYNOS_DP_AUX_RX_COMM); - if (reg == AUX_RX_COMM_AUX_DEFER || - reg == AUX_RX_COMM_I2C_DEFER) { - dev_err(dp->dev, "Defer: %d\n\n", reg); - defer = 1; - } - } - - for (cur_data_idx = 0; cur_data_idx < 16; cur_data_idx++) { - reg = readl(dp->reg_base + EXYNOS_DP_BUF_DATA_0 - + 4 * cur_data_idx); - edid[i + cur_data_idx] = (unsigned char)reg; - } - } - - return retval; -} - -void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype) -{ - u32 reg; - - reg = bwtype; - if ((bwtype == LINK_RATE_2_70GBPS) || (bwtype == LINK_RATE_1_62GBPS)) - writel(reg, dp->reg_base + EXYNOS_DP_LINK_BW_SET); -} - -void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LINK_BW_SET); - *bwtype = reg; -} - -void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count) -{ - u32 reg; - - reg = count; - writel(reg, dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); -} - -void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LANE_COUNT_SET); - *count = reg; -} - -void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable) -{ - u32 reg; - - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); - reg |= ENHANCED; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); - reg &= ~ENHANCED; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); - } -} - -void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, - enum pattern_set pattern) -{ - u32 reg; - - switch (pattern) { - case PRBS7: - reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_PRBS7; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - break; - case D10_2: - reg = SCRAMBLING_ENABLE | LINK_QUAL_PATTERN_SET_D10_2; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - break; - case TRAINING_PTN1: - reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN1; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - break; - case TRAINING_PTN2: - reg = SCRAMBLING_DISABLE | SW_TRAINING_PATTERN_SET_PTN2; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - break; - case DP_NONE: - reg = SCRAMBLING_ENABLE | - LINK_QUAL_PATTERN_SET_DISABLE | - SW_TRAINING_PATTERN_SET_NORMAL; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - break; - default: - break; - } -} - -void exynos_dp_set_lane0_pre_emphasis(struct exynos_dp_device *dp, u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane1_pre_emphasis(struct exynos_dp_device *dp, u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane2_pre_emphasis(struct exynos_dp_device *dp, u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane3_pre_emphasis(struct exynos_dp_device *dp, u32 level) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); - reg &= ~PRE_EMPHASIS_SET_MASK; - reg |= level << PRE_EMPHASIS_SET_SHIFT; - writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane0_link_training(struct exynos_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane1_link_training(struct exynos_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane2_link_training(struct exynos_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); -} - -void exynos_dp_set_lane3_link_training(struct exynos_dp_device *dp, - u32 training_lane) -{ - u32 reg; - - reg = training_lane; - writel(reg, dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); -} - -u32 exynos_dp_get_lane0_link_training(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN0_LINK_TRAINING_CTL); - return reg; -} - -u32 exynos_dp_get_lane1_link_training(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN1_LINK_TRAINING_CTL); - return reg; -} - -u32 exynos_dp_get_lane2_link_training(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN2_LINK_TRAINING_CTL); - return reg; -} - -u32 exynos_dp_get_lane3_link_training(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_LN3_LINK_TRAINING_CTL); - return reg; -} - -void exynos_dp_reset_macro(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_PHY_TEST); - reg |= MACRO_RST; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); - - /* 10 us is the minimum reset time. */ - usleep_range(10, 20); - - reg &= ~MACRO_RST; - writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); -} - -void exynos_dp_init_video(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = VSYNC_DET | VID_FORMAT_CHG | VID_CLK_CHG; - writel(reg, dp->reg_base + EXYNOS_DP_COMMON_INT_STA_1); - - reg = 0x0; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); - - reg = CHA_CRI(4) | CHA_CTRL; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); - - reg = 0x0; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); - - reg = VID_HRES_TH(2) | VID_VRES_TH(0); - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_8); -} - -void exynos_dp_set_video_color_format(struct exynos_dp_device *dp) -{ - u32 reg; - - /* Configure the input color depth, color space, dynamic range */ - reg = (dp->video_info->dynamic_range << IN_D_RANGE_SHIFT) | - (dp->video_info->color_depth << IN_BPC_SHIFT) | - (dp->video_info->color_space << IN_COLOR_F_SHIFT); - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_2); - - /* Set Input Color YCbCr Coefficients to ITU601 or ITU709 */ - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); - reg &= ~IN_YC_COEFFI_MASK; - if (dp->video_info->ycbcr_coeff) - reg |= IN_YC_COEFFI_ITU709; - else - reg |= IN_YC_COEFFI_ITU601; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_3); -} - -int exynos_dp_is_slave_video_stream_clock_on(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_1); - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_1); - - if (!(reg & DET_STA)) { - dev_dbg(dp->dev, "Input stream clock not detected.\n"); - return -EINVAL; - } - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_2); - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_2); - dev_dbg(dp->dev, "wait SYS_CTL_2.\n"); - - if (reg & CHA_STA) { - dev_dbg(dp->dev, "Input stream clk is changing\n"); - return -EINVAL; - } - - return 0; -} - -void exynos_dp_set_video_cr_mn(struct exynos_dp_device *dp, - enum clock_recovery_m_value_type type, - u32 m_value, - u32 n_value) -{ - u32 reg; - - if (type == REGISTER_M) { - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); - reg |= FIX_M_VID; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); - reg = m_value & 0xff; - writel(reg, dp->reg_base + EXYNOS_DP_M_VID_0); - reg = (m_value >> 8) & 0xff; - writel(reg, dp->reg_base + EXYNOS_DP_M_VID_1); - reg = (m_value >> 16) & 0xff; - writel(reg, dp->reg_base + EXYNOS_DP_M_VID_2); - - reg = n_value & 0xff; - writel(reg, dp->reg_base + EXYNOS_DP_N_VID_0); - reg = (n_value >> 8) & 0xff; - writel(reg, dp->reg_base + EXYNOS_DP_N_VID_1); - reg = (n_value >> 16) & 0xff; - writel(reg, dp->reg_base + EXYNOS_DP_N_VID_2); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_4); - reg &= ~FIX_M_VID; - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_4); - - writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_0); - writel(0x80, dp->reg_base + EXYNOS_DP_N_VID_1); - writel(0x00, dp->reg_base + EXYNOS_DP_N_VID_2); - } -} - -void exynos_dp_set_video_timing_mode(struct exynos_dp_device *dp, u32 type) -{ - u32 reg; - - if (type == VIDEO_TIMING_FROM_CAPTURE) { - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - reg &= ~FORMAT_SEL; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - reg |= FORMAT_SEL; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - } -} - -void exynos_dp_enable_video_master(struct exynos_dp_device *dp, bool enable) -{ - u32 reg; - - if (enable) { - reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); - reg &= ~VIDEO_MODE_MASK; - reg |= VIDEO_MASTER_MODE_EN | VIDEO_MODE_MASTER_MODE; - writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); - } else { - reg = readl(dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); - reg &= ~VIDEO_MODE_MASK; - reg |= VIDEO_MODE_SLAVE_MODE; - writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); - } -} - -void exynos_dp_start_video(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); - reg |= VIDEO_EN; - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_1); -} - -int exynos_dp_is_video_stream_on(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); - writel(reg, dp->reg_base + EXYNOS_DP_SYS_CTL_3); - - reg = readl(dp->reg_base + EXYNOS_DP_SYS_CTL_3); - if (!(reg & STRM_VALID)) { - dev_dbg(dp->dev, "Input video stream is not detected.\n"); - return -EINVAL; - } - - return 0; -} - -void exynos_dp_config_video_slave_mode(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_1); - reg &= ~(MASTER_VID_FUNC_EN_N|SLAVE_VID_FUNC_EN_N); - reg |= MASTER_VID_FUNC_EN_N; - writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_1); - - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - reg &= ~INTERACE_SCAN_CFG; - reg |= (dp->video_info->interlaced << 2); - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - reg &= ~VSYNC_POLARITY_CFG; - reg |= (dp->video_info->v_sync_polarity << 1); - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - - reg = readl(dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - reg &= ~HSYNC_POLARITY_CFG; - reg |= (dp->video_info->h_sync_polarity << 0); - writel(reg, dp->reg_base + EXYNOS_DP_VIDEO_CTL_10); - - reg = AUDIO_MODE_SPDIF_MODE | VIDEO_MODE_SLAVE_MODE; - writel(reg, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); -} - -void exynos_dp_enable_scrambling(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - reg &= ~SCRAMBLING_DISABLE; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); -} - -void exynos_dp_disable_scrambling(struct exynos_dp_device *dp) -{ - u32 reg; - - reg = readl(dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); - reg |= SCRAMBLING_DISABLE; - writel(reg, dp->reg_base + EXYNOS_DP_TRAINING_PTN_SET); -} diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index 7bb1f1aff932..c52f9adf5e04 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -220,7 +220,7 @@ i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter) * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon. */ -static int __deprecated +static int i2c_dp_aux_add_bus(struct i2c_adapter *adapter) { int error; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 20a5d0455e19..29a32b11953b 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -56,3 +56,9 @@ config DRM_I915_USERPTR selected to enabled full userptr support. If in doubt, say "Y". + +menu "drm/i915 Debugging" +depends on DRM_I915 +depends on EXPERT +source drivers/gpu/drm/i915/Kconfig.debug +endmenu diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug new file mode 100644 index 000000000000..649a562ddf17 --- /dev/null +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -0,0 +1,12 @@ +config DRM_I915_DEBUG + bool "Enable additional driver debugging" + depends on DRM_I915 + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will catch some internal issues. + + Recommended for driver developers only. + + If in doubt, say "N". + diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 0851de07bd13..7ffb51b0cbc2 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -55,7 +55,9 @@ i915-y += intel_audio.o \ intel_atomic.o \ intel_atomic_plane.o \ intel_bios.o \ + intel_color.o \ intel_display.o \ + intel_dpll_mgr.o \ intel_fbc.o \ intel_fifo_underrun.o \ intel_frontbuffer.o \ diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 814d894ed925..a337f33bec5b 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -444,6 +444,7 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(CL_PRIMITIVES_COUNT), REG64(PS_INVOCATION_COUNT), REG64(PS_DEPTH_COUNT), + REG64_IDX(RING_TIMESTAMP, RENDER_RING_BASE), REG32(OACONTROL), /* Only allowed for LRI and SRM. See below. */ REG64(MI_PREDICATE_SRC0), REG64(MI_PREDICATE_SRC1), @@ -471,6 +472,25 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG32(GEN7_L3SQCREG1), REG32(GEN7_L3CNTLREG2), REG32(GEN7_L3CNTLREG3), +}; + +static const struct drm_i915_reg_descriptor hsw_render_regs[] = { + REG64_IDX(HSW_CS_GPR, 0), + REG64_IDX(HSW_CS_GPR, 1), + REG64_IDX(HSW_CS_GPR, 2), + REG64_IDX(HSW_CS_GPR, 3), + REG64_IDX(HSW_CS_GPR, 4), + REG64_IDX(HSW_CS_GPR, 5), + REG64_IDX(HSW_CS_GPR, 6), + REG64_IDX(HSW_CS_GPR, 7), + REG64_IDX(HSW_CS_GPR, 8), + REG64_IDX(HSW_CS_GPR, 9), + REG64_IDX(HSW_CS_GPR, 10), + REG64_IDX(HSW_CS_GPR, 11), + REG64_IDX(HSW_CS_GPR, 12), + REG64_IDX(HSW_CS_GPR, 13), + REG64_IDX(HSW_CS_GPR, 14), + REG64_IDX(HSW_CS_GPR, 15), REG32(HSW_SCRATCH1, .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE, .value = 0), @@ -500,6 +520,33 @@ static const struct drm_i915_reg_descriptor hsw_master_regs[] = { #undef REG64 #undef REG32 +struct drm_i915_reg_table { + const struct drm_i915_reg_descriptor *regs; + int num_regs; + bool master; +}; + +static const struct drm_i915_reg_table ivb_render_reg_tables[] = { + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, + { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, +}; + +static const struct drm_i915_reg_table ivb_blt_reg_tables[] = { + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, + { ivb_master_regs, ARRAY_SIZE(ivb_master_regs), true }, +}; + +static const struct drm_i915_reg_table hsw_render_reg_tables[] = { + { gen7_render_regs, ARRAY_SIZE(gen7_render_regs), false }, + { hsw_render_regs, ARRAY_SIZE(hsw_render_regs), false }, + { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, +}; + +static const struct drm_i915_reg_table hsw_blt_reg_tables[] = { + { gen7_blt_regs, ARRAY_SIZE(gen7_blt_regs), false }, + { hsw_master_regs, ARRAY_SIZE(hsw_master_regs), true }, +}; + static u32 gen7_render_get_cmd_length_mask(u32 cmd_header) { u32 client = (cmd_header & INSTR_CLIENT_MASK) >> INSTR_CLIENT_SHIFT; @@ -555,7 +602,7 @@ static u32 gen7_blt_get_cmd_length_mask(u32 cmd_header) return 0; } -static bool validate_cmds_sorted(struct intel_engine_cs *ring, +static bool validate_cmds_sorted(struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) { @@ -577,7 +624,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *ring, if (curr < previous) { DRM_ERROR("CMD: table not sorted ring=%d table=%d entry=%d cmd=0x%08X prev=0x%08X\n", - ring->id, i, j, curr, previous); + engine->id, i, j, curr, previous); ret = false; } @@ -611,11 +658,18 @@ static bool check_sorted(int ring_id, return ret; } -static bool validate_regs_sorted(struct intel_engine_cs *ring) +static bool validate_regs_sorted(struct intel_engine_cs *engine) { - return check_sorted(ring->id, ring->reg_table, ring->reg_count) && - check_sorted(ring->id, ring->master_reg_table, - ring->master_reg_count); + int i; + const struct drm_i915_reg_table *table; + + for (i = 0; i < engine->reg_table_count; i++) { + table = &engine->reg_tables[i]; + if (!check_sorted(engine->id, table->regs, table->num_regs)) + return false; + } + + return true; } struct cmd_node { @@ -639,13 +693,13 @@ struct cmd_node { */ #define CMD_HASH_MASK STD_MI_OPCODE_MASK -static int init_hash_table(struct intel_engine_cs *ring, +static int init_hash_table(struct intel_engine_cs *engine, const struct drm_i915_cmd_table *cmd_tables, int cmd_table_count) { int i, j; - hash_init(ring->cmd_hash); + hash_init(engine->cmd_hash); for (i = 0; i < cmd_table_count; i++) { const struct drm_i915_cmd_table *table = &cmd_tables[i]; @@ -660,7 +714,7 @@ static int init_hash_table(struct intel_engine_cs *ring, return -ENOMEM; desc_node->desc = desc; - hash_add(ring->cmd_hash, &desc_node->node, + hash_add(engine->cmd_hash, &desc_node->node, desc->cmd.value & CMD_HASH_MASK); } } @@ -668,13 +722,13 @@ static int init_hash_table(struct intel_engine_cs *ring, return 0; } -static void fini_hash_table(struct intel_engine_cs *ring) +static void fini_hash_table(struct intel_engine_cs *engine) { struct hlist_node *tmp; struct cmd_node *desc_node; int i; - hash_for_each_safe(ring->cmd_hash, i, tmp, desc_node, node) { + hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) { hash_del(&desc_node->node); kfree(desc_node); } @@ -690,18 +744,18 @@ static void fini_hash_table(struct intel_engine_cs *ring) * * Return: non-zero if initialization fails */ -int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) +int i915_cmd_parser_init_ring(struct intel_engine_cs *engine) { const struct drm_i915_cmd_table *cmd_tables; int cmd_table_count; int ret; - if (!IS_GEN7(ring->dev)) + if (!IS_GEN7(engine->dev)) return 0; - switch (ring->id) { + switch (engine->id) { case RCS: - if (IS_HASWELL(ring->dev)) { + if (IS_HASWELL(engine->dev)) { cmd_tables = hsw_render_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_render_ring_cmds); @@ -710,26 +764,23 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) cmd_table_count = ARRAY_SIZE(gen7_render_cmds); } - ring->reg_table = gen7_render_regs; - ring->reg_count = ARRAY_SIZE(gen7_render_regs); - - if (IS_HASWELL(ring->dev)) { - ring->master_reg_table = hsw_master_regs; - ring->master_reg_count = ARRAY_SIZE(hsw_master_regs); + if (IS_HASWELL(engine->dev)) { + engine->reg_tables = hsw_render_reg_tables; + engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); } else { - ring->master_reg_table = ivb_master_regs; - ring->master_reg_count = ARRAY_SIZE(ivb_master_regs); + engine->reg_tables = ivb_render_reg_tables; + engine->reg_table_count = ARRAY_SIZE(ivb_render_reg_tables); } - ring->get_cmd_length_mask = gen7_render_get_cmd_length_mask; + engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask; break; case VCS: cmd_tables = gen7_video_cmds; cmd_table_count = ARRAY_SIZE(gen7_video_cmds); - ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; + engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; case BCS: - if (IS_HASWELL(ring->dev)) { + if (IS_HASWELL(engine->dev)) { cmd_tables = hsw_blt_ring_cmds; cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); } else { @@ -737,44 +788,41 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); } - ring->reg_table = gen7_blt_regs; - ring->reg_count = ARRAY_SIZE(gen7_blt_regs); - - if (IS_HASWELL(ring->dev)) { - ring->master_reg_table = hsw_master_regs; - ring->master_reg_count = ARRAY_SIZE(hsw_master_regs); + if (IS_HASWELL(engine->dev)) { + engine->reg_tables = hsw_blt_reg_tables; + engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); } else { - ring->master_reg_table = ivb_master_regs; - ring->master_reg_count = ARRAY_SIZE(ivb_master_regs); + engine->reg_tables = ivb_blt_reg_tables; + engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables); } - ring->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; + engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask; break; case VECS: cmd_tables = hsw_vebox_cmds; cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds); /* VECS can use the same length_mask function as VCS */ - ring->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; + engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; default: DRM_ERROR("CMD: cmd_parser_init with unknown ring: %d\n", - ring->id); + engine->id); BUG(); } - BUG_ON(!validate_cmds_sorted(ring, cmd_tables, cmd_table_count)); - BUG_ON(!validate_regs_sorted(ring)); + BUG_ON(!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)); + BUG_ON(!validate_regs_sorted(engine)); - WARN_ON(!hash_empty(ring->cmd_hash)); + WARN_ON(!hash_empty(engine->cmd_hash)); - ret = init_hash_table(ring, cmd_tables, cmd_table_count); + ret = init_hash_table(engine, cmd_tables, cmd_table_count); if (ret) { DRM_ERROR("CMD: cmd_parser_init failed!\n"); - fini_hash_table(ring); + fini_hash_table(engine); return ret; } - ring->needs_cmd_parser = true; + engine->needs_cmd_parser = true; return 0; } @@ -786,21 +834,21 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *ring) * Releases any resources related to command parsing that may have been * initialized for the specified ring. */ -void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring) +void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine) { - if (!ring->needs_cmd_parser) + if (!engine->needs_cmd_parser) return; - fini_hash_table(ring); + fini_hash_table(engine); } static const struct drm_i915_cmd_descriptor* -find_cmd_in_table(struct intel_engine_cs *ring, +find_cmd_in_table(struct intel_engine_cs *engine, u32 cmd_header) { struct cmd_node *desc_node; - hash_for_each_possible(ring->cmd_hash, desc_node, node, + hash_for_each_possible(engine->cmd_hash, desc_node, node, cmd_header & CMD_HASH_MASK) { const struct drm_i915_cmd_descriptor *desc = desc_node->desc; u32 masked_cmd = desc->cmd.mask & cmd_header; @@ -822,18 +870,18 @@ find_cmd_in_table(struct intel_engine_cs *ring, * ring's default length encoding and returns default_desc. */ static const struct drm_i915_cmd_descriptor* -find_cmd(struct intel_engine_cs *ring, +find_cmd(struct intel_engine_cs *engine, u32 cmd_header, struct drm_i915_cmd_descriptor *default_desc) { const struct drm_i915_cmd_descriptor *desc; u32 mask; - desc = find_cmd_in_table(ring, cmd_header); + desc = find_cmd_in_table(engine, cmd_header); if (desc) return desc; - mask = ring->get_cmd_length_mask(cmd_header); + mask = engine->get_cmd_length_mask(cmd_header); if (!mask) return NULL; @@ -848,12 +896,31 @@ static const struct drm_i915_reg_descriptor * find_reg(const struct drm_i915_reg_descriptor *table, int count, u32 addr) { - if (table) { - int i; + int i; + + for (i = 0; i < count; i++) { + if (i915_mmio_reg_offset(table[i].addr) == addr) + return &table[i]; + } - for (i = 0; i < count; i++) { - if (i915_mmio_reg_offset(table[i].addr) == addr) - return &table[i]; + return NULL; +} + +static const struct drm_i915_reg_descriptor * +find_reg_in_tables(const struct drm_i915_reg_table *tables, + int count, bool is_master, u32 addr) +{ + int i; + const struct drm_i915_reg_table *table; + const struct drm_i915_reg_descriptor *reg; + + for (i = 0; i < count; i++) { + table = &tables[i]; + if (!table->master || is_master) { + reg = find_reg(table->regs, table->num_regs, + addr); + if (reg != NULL) + return reg; } } @@ -963,18 +1030,18 @@ unpin_src: * * Return: true if the ring requires software command parsing */ -bool i915_needs_cmd_parser(struct intel_engine_cs *ring) +bool i915_needs_cmd_parser(struct intel_engine_cs *engine) { - if (!ring->needs_cmd_parser) + if (!engine->needs_cmd_parser) return false; - if (!USES_PPGTT(ring->dev)) + if (!USES_PPGTT(engine->dev)) return false; return (i915.enable_cmd_parser == 1); } -static bool check_cmd(const struct intel_engine_cs *ring, +static bool check_cmd(const struct intel_engine_cs *engine, const struct drm_i915_cmd_descriptor *desc, const u32 *cmd, u32 length, const bool is_master, @@ -1004,17 +1071,14 @@ static bool check_cmd(const struct intel_engine_cs *ring, offset += step) { const u32 reg_addr = cmd[offset] & desc->reg.mask; const struct drm_i915_reg_descriptor *reg = - find_reg(ring->reg_table, ring->reg_count, - reg_addr); - - if (!reg && is_master) - reg = find_reg(ring->master_reg_table, - ring->master_reg_count, - reg_addr); + find_reg_in_tables(engine->reg_tables, + engine->reg_table_count, + is_master, + reg_addr); if (!reg) { DRM_DEBUG_DRIVER("CMD: Rejected register 0x%08X in command: 0x%08X (ring=%d)\n", - reg_addr, *cmd, ring->id); + reg_addr, *cmd, engine->id); return false; } @@ -1087,7 +1151,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, *cmd, desc->bits[i].mask, desc->bits[i].expected, - dword, ring->id); + dword, engine->id); return false; } } @@ -1113,7 +1177,7 @@ static bool check_cmd(const struct intel_engine_cs *ring, * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing */ -int i915_parse_cmds(struct intel_engine_cs *ring, +int i915_parse_cmds(struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *shadow_batch_obj, u32 batch_start_offset, @@ -1147,7 +1211,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, if (*cmd == MI_BATCH_BUFFER_END) break; - desc = find_cmd(ring, *cmd, &default_desc); + desc = find_cmd(engine, *cmd, &default_desc); if (!desc) { DRM_DEBUG_DRIVER("CMD: Unrecognized command: 0x%08X\n", *cmd); @@ -1179,7 +1243,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring, break; } - if (!check_cmd(ring, desc, cmd, length, is_master, + if (!check_cmd(engine, desc, cmd, length, is_master, &oacontrol_set)) { ret = -EINVAL; break; @@ -1223,6 +1287,7 @@ int i915_cmd_parser_get_version(void) * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 5. GPGPU dispatch compute indirect registers. + * 6. TIMESTAMP register and Haswell CS GPR registers */ - return 5; + return 6; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a0f1bd711b53..2d11b4948a74 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -129,10 +129,12 @@ static void describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) { struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct i915_vma *vma; int pin_count = 0; - int i; + enum intel_engine_id id; + + lockdep_assert_held(&obj->base.dev->struct_mutex); seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ", &obj->base, @@ -143,9 +145,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) obj->base.size / 1024, obj->base.read_domains, obj->base.write_domain); - for_each_ring(ring, dev_priv, i) + for_each_engine_id(engine, dev_priv, id) seq_printf(m, "%x ", - i915_gem_request_get_seqno(obj->last_read_req[i])); + i915_gem_request_get_seqno(obj->last_read_req[id])); seq_printf(m, "] %x %x%s%s%s", i915_gem_request_get_seqno(obj->last_write_req), i915_gem_request_get_seqno(obj->last_fenced_req), @@ -184,7 +186,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) } if (obj->last_write_req != NULL) seq_printf(m, " (%s)", - i915_gem_request_get_ring(obj->last_write_req)->name); + i915_gem_request_get_engine(obj->last_write_req)->name); if (obj->frontbuffer_bits) seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); } @@ -202,8 +204,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) uintptr_t list = (uintptr_t) node->info_ent->data; struct list_head *head; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; u64 total_obj_size, total_gtt_size; int count, ret; @@ -216,11 +218,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) switch (list) { case ACTIVE_LIST: seq_puts(m, "Active:\n"); - head = &vm->active_list; + head = &ggtt->base.active_list; break; case INACTIVE_LIST: seq_puts(m, "Inactive:\n"); - head = &vm->inactive_list; + head = &ggtt->base.inactive_list; break; default: mutex_unlock(&dev->struct_mutex); @@ -397,15 +399,15 @@ static void print_batch_pool_stats(struct seq_file *m, { struct drm_i915_gem_object *obj; struct file_stats stats; - struct intel_engine_cs *ring; - int i, j; + struct intel_engine_cs *engine; + int j; memset(&stats, 0, sizeof(stats)); - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { + for_each_engine(engine, dev_priv) { + for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { list_for_each_entry(obj, - &ring->batch_pool.cache_list[j], + &engine->batch_pool.cache_list[j], batch_pool_link) per_file_stats(0, obj, &stats); } @@ -429,11 +431,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data) { struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; u32 count, mappable_count, purgeable_count; u64 size, mappable_size, purgeable_size; struct drm_i915_gem_object *obj; - struct i915_address_space *vm = &dev_priv->gtt.base; struct drm_file *file; struct i915_vma *vma; int ret; @@ -452,12 +454,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->active_list, vm_link); + count_vmas(&ggtt->base.active_list, vm_link); seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); size = count = mappable_size = mappable_count = 0; - count_vmas(&vm->inactive_list, vm_link); + count_vmas(&ggtt->base.inactive_list, vm_link); seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", count, mappable_count, size, mappable_size); @@ -492,8 +494,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) count, size); seq_printf(m, "%llu [%llu] gtt total\n", - dev_priv->gtt.base.total, - (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start); + ggtt->base.total, ggtt->mappable_end - ggtt->base.start); seq_putc(m, '\n'); print_batch_pool_stats(m, dev_priv); @@ -591,14 +592,13 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) pipe, plane); } if (work->flip_queued_req) { - struct intel_engine_cs *ring = - i915_gem_request_get_ring(work->flip_queued_req); + struct intel_engine_cs *engine = i915_gem_request_get_engine(work->flip_queued_req); seq_printf(m, "Flip queued on %s at seqno %x, next seqno %x [current breadcrumb %x], completed? %d\n", - ring->name, + engine->name, i915_gem_request_get_seqno(work->flip_queued_req), dev_priv->next_seqno, - ring->get_seqno(ring, true), + engine->get_seqno(engine), i915_gem_request_completed(work->flip_queued_req, true)); } else seq_printf(m, "Flip not associated with any ring\n"); @@ -637,28 +637,28 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int total = 0; - int ret, i, j; + int ret, j; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < ARRAY_SIZE(ring->batch_pool.cache_list); j++) { + for_each_engine(engine, dev_priv) { + for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) { int count; count = 0; list_for_each_entry(obj, - &ring->batch_pool.cache_list[j], + &engine->batch_pool.cache_list[j], batch_pool_link) count++; seq_printf(m, "%s cache[%d]: %d objects\n", - ring->name, j, count); + engine->name, j, count); list_for_each_entry(obj, - &ring->batch_pool.cache_list[j], + &engine->batch_pool.cache_list[j], batch_pool_link) { seq_puts(m, " "); describe_obj(m, obj); @@ -681,26 +681,26 @@ static int i915_gem_request_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_i915_gem_request *req; - int ret, any, i; + int ret, any; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; any = 0; - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { int count; count = 0; - list_for_each_entry(req, &ring->request_list, list) + list_for_each_entry(req, &engine->request_list, list) count++; if (count == 0) continue; - seq_printf(m, "%s requests: %d\n", ring->name, count); - list_for_each_entry(req, &ring->request_list, list) { + seq_printf(m, "%s requests: %d\n", engine->name, count); + list_for_each_entry(req, &engine->request_list, list) { struct task_struct *task; rcu_read_lock(); @@ -726,12 +726,12 @@ static int i915_gem_request_info(struct seq_file *m, void *data) } static void i915_ring_seqno_info(struct seq_file *m, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - if (ring->get_seqno) { - seq_printf(m, "Current sequence (%s): %x\n", - ring->name, ring->get_seqno(ring, false)); - } + seq_printf(m, "Current sequence (%s): %x\n", + engine->name, engine->get_seqno(engine)); + seq_printf(m, "Current user interrupts (%s): %x\n", + engine->name, READ_ONCE(engine->user_interrupts)); } static int i915_gem_seqno_info(struct seq_file *m, void *data) @@ -739,16 +739,16 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int ret, i; + struct intel_engine_cs *engine; + int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, i) - i915_ring_seqno_info(m, ring); + for_each_engine(engine, dev_priv) + i915_ring_seqno_info(m, engine); intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -762,7 +762,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int ret, i, pipe; ret = mutex_lock_interruptible(&dev->struct_mutex); @@ -934,13 +934,13 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "Graphics Interrupt mask: %08x\n", I915_READ(GTIMR)); } - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { if (INTEL_INFO(dev)->gen >= 6) { seq_printf(m, "Graphics Interrupt mask (%s): %08x\n", - ring->name, I915_READ_IMR(ring)); + engine->name, I915_READ_IMR(engine)); } - i915_ring_seqno_info(m, ring); + i915_ring_seqno_info(m, engine); } intel_runtime_pm_put(dev_priv); mutex_unlock(&dev->struct_mutex); @@ -981,12 +981,12 @@ static int i915_hws_info(struct seq_file *m, void *data) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; const u32 *hws; int i; - ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; - hws = ring->status_page.page_addr; + engine = &dev_priv->engine[(uintptr_t)node->info_ent->data]; + hws = engine->status_page.page_addr; if (hws == NULL) return 0; @@ -1331,11 +1331,12 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - u64 acthd[I915_NUM_RINGS]; - u32 seqno[I915_NUM_RINGS]; + struct intel_engine_cs *engine; + u64 acthd[I915_NUM_ENGINES]; + u32 seqno[I915_NUM_ENGINES]; u32 instdone[I915_NUM_INSTDONE_REG]; - int i, j; + enum intel_engine_id id; + int j; if (!i915.enable_hangcheck) { seq_printf(m, "Hangcheck disabled\n"); @@ -1344,9 +1345,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, i) { - seqno[i] = ring->get_seqno(ring, false); - acthd[i] = intel_ring_get_active_head(ring); + for_each_engine_id(engine, dev_priv, id) { + acthd[id] = intel_ring_get_active_head(engine); + seqno[id] = engine->get_seqno(engine); } i915_get_extra_instdone(dev, instdone); @@ -1360,19 +1361,22 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) } else seq_printf(m, "Hangcheck inactive\n"); - for_each_ring(ring, dev_priv, i) { - seq_printf(m, "%s:\n", ring->name); - seq_printf(m, "\tseqno = %x [current %x]\n", - ring->hangcheck.seqno, seqno[i]); + for_each_engine_id(engine, dev_priv, id) { + seq_printf(m, "%s:\n", engine->name); + seq_printf(m, "\tseqno = %x [current %x, last %x]\n", + engine->hangcheck.seqno, + seqno[id], + engine->last_submitted_seqno); + seq_printf(m, "\tuser interrupts = %x [current %x]\n", + engine->hangcheck.user_interrupts, + READ_ONCE(engine->user_interrupts)); seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", - (long long)ring->hangcheck.acthd, - (long long)acthd[i]); - seq_printf(m, "\tmax ACTHD = 0x%08llx\n", - (long long)ring->hangcheck.max_acthd); - seq_printf(m, "\tscore = %d\n", ring->hangcheck.score); - seq_printf(m, "\taction = %d\n", ring->hangcheck.action); - - if (ring->id == RCS) { + (long long)engine->hangcheck.acthd, + (long long)acthd[id]); + seq_printf(m, "\tscore = %d\n", engine->hangcheck.score); + seq_printf(m, "\taction = %d\n", engine->hangcheck.action); + + if (engine->id == RCS) { seq_puts(m, "\tinstdone read ="); for (j = 0; j < I915_NUM_INSTDONE_REG; j++) @@ -1382,7 +1386,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused) for (j = 0; j < I915_NUM_INSTDONE_REG; j++) seq_printf(m, " 0x%08x", - ring->hangcheck.instdone[j]); + engine->hangcheck.instdone[j]); seq_puts(m, "\n"); } @@ -1465,12 +1469,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_uncore_forcewake_domain *fw_domain; - int i; spin_lock_irq(&dev_priv->uncore.lock); - for_each_fw_domain(fw_domain, dev_priv, i) { + for_each_fw_domain(fw_domain, dev_priv) { seq_printf(m, "%s.wake_count = %u\n", - intel_uncore_forcewake_domain_to_str(i), + intel_uncore_forcewake_domain_to_str(fw_domain->id), fw_domain->wake_count); } spin_unlock_irq(&dev_priv->uncore.lock); @@ -1897,6 +1900,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_device *dev = node->minor->dev; struct intel_framebuffer *fbdev_fb = NULL; struct drm_framebuffer *drm_fb; + int ret; + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret) + return ret; #ifdef CONFIG_DRM_FBDEV_EMULATION if (to_i915(dev)->fbdev) { @@ -1931,6 +1939,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.fb_lock); + mutex_unlock(&dev->struct_mutex); return 0; } @@ -1948,9 +1957,10 @@ static int i915_context_status(struct seq_file *m, void *unused) struct drm_info_node *node = m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx; - int ret, i; + enum intel_engine_id id; + int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -1968,13 +1978,13 @@ static int i915_context_status(struct seq_file *m, void *unused) if (i915.enable_execlists) { seq_putc(m, '\n'); - for_each_ring(ring, dev_priv, i) { + for_each_engine_id(engine, dev_priv, id) { struct drm_i915_gem_object *ctx_obj = - ctx->engine[i].state; + ctx->engine[id].state; struct intel_ringbuffer *ringbuf = - ctx->engine[i].ringbuf; + ctx->engine[id].ringbuf; - seq_printf(m, "%s: ", ring->name); + seq_printf(m, "%s: ", engine->name); if (ctx_obj) describe_obj(m, ctx_obj); if (ringbuf) @@ -1995,22 +2005,22 @@ static int i915_context_status(struct seq_file *m, void *unused) static void i915_dump_lrc_obj(struct seq_file *m, struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { struct page *page; uint32_t *reg_state; int j; - struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; unsigned long ggtt_offset = 0; if (ctx_obj == NULL) { seq_printf(m, "Context on %s with no gem object\n", - ring->name); + engine->name); return; } - seq_printf(m, "CONTEXT: %s %u\n", ring->name, - intel_execlists_ctx_id(ctx, ring)); + seq_printf(m, "CONTEXT: %s %u\n", engine->name, + intel_execlists_ctx_id(ctx, engine)); if (!i915_gem_obj_ggtt_bound(ctx_obj)) seq_puts(m, "\tNot bound in GGTT\n"); @@ -2043,9 +2053,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx; - int ret, i; + int ret; if (!i915.enable_execlists) { seq_printf(m, "Logical Ring Contexts are disabled\n"); @@ -2058,8 +2068,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused) list_for_each_entry(ctx, &dev_priv->context_list, link) if (ctx != dev_priv->kernel_context) - for_each_ring(ring, dev_priv, i) - i915_dump_lrc_obj(m, ctx, ring); + for_each_engine(engine, dev_priv) + i915_dump_lrc_obj(m, ctx, engine); mutex_unlock(&dev->struct_mutex); @@ -2071,15 +2081,14 @@ static int i915_execlists(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; u32 status_pointer; u8 read_pointer; u8 write_pointer; u32 status; u32 ctx_id; struct list_head *cursor; - int ring_id, i; - int ret; + int i, ret; if (!i915.enable_execlists) { seq_puts(m, "Logical Ring Contexts are disabled\n"); @@ -2092,22 +2101,21 @@ static int i915_execlists(struct seq_file *m, void *data) intel_runtime_pm_get(dev_priv); - for_each_ring(ring, dev_priv, ring_id) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_request *head_req = NULL; int count = 0; - unsigned long flags; - seq_printf(m, "%s\n", ring->name); + seq_printf(m, "%s\n", engine->name); - status = I915_READ(RING_EXECLIST_STATUS_LO(ring)); - ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(ring)); + status = I915_READ(RING_EXECLIST_STATUS_LO(engine)); + ctx_id = I915_READ(RING_EXECLIST_STATUS_HI(engine)); seq_printf(m, "\tExeclist status: 0x%08X, context: %u\n", status, ctx_id); - status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); + status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(engine)); seq_printf(m, "\tStatus pointer: 0x%08X\n", status_pointer); - read_pointer = ring->next_context_status_buffer; + read_pointer = engine->next_context_status_buffer; write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) write_pointer += GEN8_CSB_ENTRIES; @@ -2115,24 +2123,25 @@ static int i915_execlists(struct seq_file *m, void *data) read_pointer, write_pointer); for (i = 0; i < GEN8_CSB_ENTRIES; i++) { - status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, i)); - ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, i)); + status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, i)); + ctx_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, i)); seq_printf(m, "\tStatus buffer %d: 0x%08X, context: %u\n", i, status, ctx_id); } - spin_lock_irqsave(&ring->execlist_lock, flags); - list_for_each(cursor, &ring->execlist_queue) + spin_lock_bh(&engine->execlist_lock); + list_for_each(cursor, &engine->execlist_queue) count++; - head_req = list_first_entry_or_null(&ring->execlist_queue, - struct drm_i915_gem_request, execlist_link); - spin_unlock_irqrestore(&ring->execlist_lock, flags); + head_req = list_first_entry_or_null(&engine->execlist_queue, + struct drm_i915_gem_request, + execlist_link); + spin_unlock_bh(&engine->execlist_lock); seq_printf(m, "\t%d requests in queue\n", count); if (head_req) { seq_printf(m, "\tHead request id: %u\n", - intel_execlists_ctx_id(head_req->ctx, ring)); + intel_execlists_ctx_id(head_req->ctx, engine)); seq_printf(m, "\tHead request tail: %u\n", head_req->tail); } @@ -2248,19 +2257,19 @@ static int per_file_ctx(int id, void *ptr, void *data) static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; - int unused, i; + int i; if (!ppgtt) return; - for_each_ring(ring, dev_priv, unused) { - seq_printf(m, "%s\n", ring->name); + for_each_engine(engine, dev_priv) { + seq_printf(m, "%s\n", engine->name); for (i = 0; i < 4; i++) { - u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); + u64 pdp = I915_READ(GEN8_RING_PDP_UDW(engine, i)); pdp <<= 32; - pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i)); + pdp |= I915_READ(GEN8_RING_PDP_LDW(engine, i)); seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); } } @@ -2269,19 +2278,22 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; if (INTEL_INFO(dev)->gen == 6) seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); - for_each_ring(ring, dev_priv, i) { - seq_printf(m, "%s\n", ring->name); + for_each_engine(engine, dev_priv) { + seq_printf(m, "%s\n", engine->name); if (INTEL_INFO(dev)->gen == 7) - seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(RING_MODE_GEN7(ring))); - seq_printf(m, "PP_DIR_BASE: 0x%08x\n", I915_READ(RING_PP_DIR_BASE(ring))); - seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", I915_READ(RING_PP_DIR_BASE_READ(ring))); - seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", I915_READ(RING_PP_DIR_DCLV(ring))); + seq_printf(m, "GFX_MODE: 0x%08x\n", + I915_READ(RING_MODE_GEN7(engine))); + seq_printf(m, "PP_DIR_BASE: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE(engine))); + seq_printf(m, "PP_DIR_BASE_READ: 0x%08x\n", + I915_READ(RING_PP_DIR_BASE_READ(engine))); + seq_printf(m, "PP_DIR_DCLV: 0x%08x\n", + I915_READ(RING_PP_DIR_DCLV(engine))); } if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; @@ -2336,12 +2348,11 @@ out_put: static int count_irq_waiters(struct drm_i915_private *i915) { - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int count = 0; - int i; - for_each_ring(ring, i915, i) - count += ring->irq_refcount; + for_each_engine(engine, i915) + count += engine->irq_refcount; return count; } @@ -2408,7 +2419,7 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data) struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; u32 tmp, i; - if (!HAS_GUC_UCODE(dev_priv->dev)) + if (!HAS_GUC_UCODE(dev_priv)) return 0; seq_printf(m, "GuC firmware status:\n"); @@ -2449,9 +2460,8 @@ static void i915_guc_client_info(struct seq_file *m, struct drm_i915_private *dev_priv, struct i915_guc_client *client) { - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; uint64_t tot = 0; - uint32_t i; seq_printf(m, "\tPriority %d, GuC ctx index: %u, PD offset 0x%x\n", client->priority, client->ctx_index, client->proc_desc_offset); @@ -2464,11 +2474,11 @@ static void i915_guc_client_info(struct seq_file *m, seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); seq_printf(m, "\tLast submission result: %d\n", client->retcode); - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { seq_printf(m, "\tSubmissions: %llu %s\n", - client->submissions[ring->guc_id], - ring->name); - tot += client->submissions[ring->guc_id]; + client->submissions[engine->guc_id], + engine->name); + tot += client->submissions[engine->guc_id]; } seq_printf(m, "\tTotal: %llu\n", tot); } @@ -2480,11 +2490,10 @@ static int i915_guc_info(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc guc; struct i915_guc_client client = {}; - struct intel_engine_cs *ring; - enum intel_ring_id i; + struct intel_engine_cs *engine; u64 total = 0; - if (!HAS_GUC_SCHED(dev_priv->dev)) + if (!HAS_GUC_SCHED(dev_priv)) return 0; if (mutex_lock_interruptible(&dev->struct_mutex)) @@ -2504,11 +2513,11 @@ static int i915_guc_info(struct seq_file *m, void *data) seq_printf(m, "GuC last action error code: %d\n", guc.action_err); seq_printf(m, "\nGuC submissions:\n"); - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", - ring->name, guc.submissions[ring->guc_id], - guc.last_seqno[ring->guc_id]); - total += guc.submissions[ring->guc_id]; + engine->name, guc.submissions[engine->guc_id], + guc.last_seqno[engine->guc_id]); + total += guc.submissions[engine->guc_id]; } seq_printf(m, "\t%s: %llu\n", "Total", total); @@ -2688,10 +2697,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (!HAS_RUNTIME_PM(dev)) { - seq_puts(m, "not supported\n"); - return 0; - } + if (!HAS_RUNTIME_PM(dev_priv)) + seq_puts(m, "Runtime power management not supported\n"); seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); seq_printf(m, "IRQs disabled: %s\n", @@ -2702,6 +2709,9 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) #else seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); #endif + seq_printf(m, "PCI device power state: %s [%d]\n", + pci_power_name(dev_priv->dev->pdev->current_state), + dev_priv->dev->pdev->current_state); return 0; } @@ -3130,9 +3140,10 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); - int i, j, ret; + enum intel_engine_id id; + int j, ret; if (!i915_semaphore_is_enabled(dev)) { seq_puts(m, "Semaphores are disabled\n"); @@ -3151,14 +3162,14 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) page = i915_gem_object_get_page(dev_priv->semaphore_obj, 0); seqno = (uint64_t *)kmap_atomic(page); - for_each_ring(ring, dev_priv, i) { + for_each_engine_id(engine, dev_priv, id) { uint64_t offset; - seq_printf(m, "%s\n", ring->name); + seq_printf(m, "%s\n", engine->name); seq_puts(m, " Last signal:"); for (j = 0; j < num_rings; j++) { - offset = i * I915_NUM_RINGS + j; + offset = id * I915_NUM_ENGINES + j; seq_printf(m, "0x%08llx (0x%02llx) ", seqno[offset], offset * 8); } @@ -3166,7 +3177,7 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) seq_puts(m, " Last wait: "); for (j = 0; j < num_rings; j++) { - offset = i + (j * I915_NUM_RINGS); + offset = id + (j * I915_NUM_ENGINES); seq_printf(m, "0x%08llx (0x%02llx) ", seqno[offset], offset * 8); } @@ -3176,18 +3187,18 @@ static int i915_semaphore_status(struct seq_file *m, void *unused) kunmap_atomic(seqno); } else { seq_puts(m, " Last signal:"); - for_each_ring(ring, dev_priv, i) + for_each_engine(engine, dev_priv) for (j = 0; j < num_rings; j++) seq_printf(m, "0x%08x\n", - I915_READ(ring->semaphore.mbox.signal[j])); + I915_READ(engine->semaphore.mbox.signal[j])); seq_putc(m, '\n'); } seq_puts(m, "\nSync seqno:\n"); - for_each_ring(ring, dev_priv, i) { - for (j = 0; j < num_rings; j++) { - seq_printf(m, " 0x%08x ", ring->semaphore.sync_seqno[j]); - } + for_each_engine(engine, dev_priv) { + for (j = 0; j < num_rings; j++) + seq_printf(m, " 0x%08x ", + engine->semaphore.sync_seqno[j]); seq_putc(m, '\n'); } seq_putc(m, '\n'); @@ -3209,8 +3220,8 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused) struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); - seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n", - pll->config.crtc_mask, pll->active, yesno(pll->on)); + seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n", + pll->config.crtc_mask, pll->active_mask, yesno(pll->on)); seq_printf(m, " tracked hardware state:\n"); seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll); seq_printf(m, " dpll_md: 0x%08x\n", @@ -3228,11 +3239,12 @@ static int i915_wa_registers(struct seq_file *m, void *unused) { int i; int ret; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_device *dev = node->minor->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct i915_workarounds *workarounds = &dev_priv->workarounds; + enum intel_engine_id id; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) @@ -3241,9 +3253,9 @@ static int i915_wa_registers(struct seq_file *m, void *unused) intel_runtime_pm_get(dev_priv); seq_printf(m, "Workarounds applied: %d\n", workarounds->count); - for_each_ring(ring, dev_priv, i) + for_each_engine_id(engine, dev_priv, id) seq_printf(m, "HW whitelist count for %s: %d\n", - ring->name, workarounds->hw_whitelist_count[i]); + engine->name, workarounds->hw_whitelist_count[id]); for (i = 0; i < workarounds->count; ++i) { i915_reg_t addr; u32 mask, value, read; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 1c6d227aae7c..b377753717d1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -50,6 +50,66 @@ #include <linux/pm_runtime.h> #include <linux/oom.h> +static unsigned int i915_load_fail_count; + +bool __i915_inject_load_failure(const char *func, int line) +{ + if (i915_load_fail_count >= i915.inject_load_failure) + return false; + + if (++i915_load_fail_count == i915.inject_load_failure) { + DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n", + i915.inject_load_failure, func, line); + return true; + } + + return false; +} + +#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI" +#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \ + "providing the dmesg log by booting with drm.debug=0xf" + +void +__i915_printk(struct drm_i915_private *dev_priv, const char *level, + const char *fmt, ...) +{ + static bool shown_bug_once; + struct device *dev = dev_priv->dev->dev; + bool is_error = level[1] <= KERN_ERR[1]; + bool is_debug = level[1] == KERN_DEBUG[1]; + struct va_format vaf; + va_list args; + + if (is_debug && !(drm_debug & DRM_UT_DRIVER)) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV", + __builtin_return_address(0), &vaf); + + if (is_error && !shown_bug_once) { + dev_notice(dev, "%s", FDO_BUG_MSG); + shown_bug_once = true; + } + + va_end(args); +} + +static bool i915_error_injected(struct drm_i915_private *dev_priv) +{ + return i915.inject_load_failure && + i915_load_fail_count == i915.inject_load_failure; +} + +#define i915_load_error(dev_priv, fmt, ...) \ + __i915_printk(dev_priv, \ + i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ + fmt, ##__VA_ARGS__) static int i915_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -87,16 +147,16 @@ static int i915_getparam(struct drm_device *dev, void *data, value = 1; break; case I915_PARAM_HAS_BSD: - value = intel_ring_initialized(&dev_priv->ring[VCS]); + value = intel_engine_initialized(&dev_priv->engine[VCS]); break; case I915_PARAM_HAS_BLT: - value = intel_ring_initialized(&dev_priv->ring[BCS]); + value = intel_engine_initialized(&dev_priv->engine[BCS]); break; case I915_PARAM_HAS_VEBOX: - value = intel_ring_initialized(&dev_priv->ring[VECS]); + value = intel_engine_initialized(&dev_priv->engine[VECS]); break; case I915_PARAM_HAS_BSD2: - value = intel_ring_initialized(&dev_priv->ring[VCS2]); + value = intel_engine_initialized(&dev_priv->engine[VCS2]); break; case I915_PARAM_HAS_RELAXED_FENCING: value = 1; @@ -370,6 +430,9 @@ static int i915_load_modeset_init(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int ret; + if (i915_inject_load_failure()) + return -ENODEV; + ret = intel_bios_init(dev_priv); if (ret) DRM_INFO("failed to find VBIOS tables\n"); @@ -444,7 +507,7 @@ static int i915_load_modeset_init(struct drm_device *dev) cleanup_gem: mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); cleanup_irq: @@ -453,6 +516,7 @@ cleanup_irq: intel_teardown_gmbus(dev); cleanup_csr: intel_csr_ucode_fini(dev_priv); + intel_power_domains_fini(dev_priv); vga_switcheroo_unregister_client(dev->pdev); cleanup_vga_client: vga_client_register(dev->pdev, NULL, NULL, NULL); @@ -465,6 +529,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) { struct apertures_struct *ap; struct pci_dev *pdev = dev_priv->dev->pdev; + struct i915_ggtt *ggtt = &dev_priv->ggtt; bool primary; int ret; @@ -472,8 +537,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) if (!ap) return -ENOMEM; - ap->ranges[0].base = dev_priv->gtt.mappable_base; - ap->ranges[0].size = dev_priv->gtt.mappable_end; + ap->ranges[0].base = ggtt->mappable_base; + ap->ranges[0].size = ggtt->mappable_end; primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; @@ -853,6 +918,10 @@ static void intel_device_info_runtime_init(struct drm_device *dev) else if (INTEL_INFO(dev)->gen >= 9) gen9_sseu_info_init(dev); + /* Snooping is broken on BXT A stepping. */ + info->has_snoop = !info->has_llc; + info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1); + DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total); DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total); DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice); @@ -929,6 +998,84 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv) destroy_workqueue(dev_priv->wq); } +/** + * i915_driver_init_early - setup state not requiring device access + * @dev_priv: device private + * + * Initialize everything that is a "SW-only" state, that is state not + * requiring accessing the device or exposing the driver via kernel internal + * or userspace interfaces. Example steps belonging here: lock initialization, + * system memory allocation, setting up device specific attributes and + * function hooks not requiring accessing the device. + */ +static int i915_driver_init_early(struct drm_i915_private *dev_priv, + struct drm_device *dev, + struct intel_device_info *info) +{ + struct intel_device_info *device_info; + int ret = 0; + + if (i915_inject_load_failure()) + return -ENODEV; + + /* Setup the write-once "constant" device info */ + device_info = (struct intel_device_info *)&dev_priv->info; + memcpy(device_info, info, sizeof(dev_priv->info)); + device_info->device_id = dev->pdev->device; + + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->gpu_error.lock); + mutex_init(&dev_priv->backlight_lock); + spin_lock_init(&dev_priv->uncore.lock); + spin_lock_init(&dev_priv->mm.object_stat_lock); + spin_lock_init(&dev_priv->mmio_flip_lock); + mutex_init(&dev_priv->sb_lock); + mutex_init(&dev_priv->modeset_restore_lock); + mutex_init(&dev_priv->av_mutex); + mutex_init(&dev_priv->wm.wm_mutex); + mutex_init(&dev_priv->pps_mutex); + + ret = i915_workqueues_init(dev_priv); + if (ret < 0) + return ret; + + /* This must be called before any calls to HAS_PCH_* */ + intel_detect_pch(dev); + + intel_pm_setup(dev); + intel_init_dpio(dev_priv); + intel_power_domains_init(dev_priv); + intel_irq_init(dev_priv); + intel_init_display_hooks(dev_priv); + intel_init_clock_gating_hooks(dev_priv); + intel_init_audio_hooks(dev_priv); + i915_gem_load_init(dev); + + intel_display_crc_init(dev); + + i915_dump_device_info(dev_priv); + + /* Not all pre-production machines fall into this category, only the + * very first ones. Almost everything should work, except for maybe + * suspend/resume. And we don't implement workarounds that affect only + * pre-production machines. */ + if (IS_HSW_EARLY_SDV(dev)) + DRM_INFO("This is an early pre-production Haswell machine. " + "It may not be fully functional.\n"); + + return 0; +} + +/** + * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early() + * @dev_priv: device private + */ +static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv) +{ + i915_gem_load_cleanup(dev_priv->dev); + i915_workqueues_cleanup(dev_priv); +} + static int i915_mmio_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); @@ -970,97 +1117,87 @@ static void i915_mmio_cleanup(struct drm_device *dev) } /** - * i915_driver_load - setup chip and create an initial config - * @dev: DRM device - * @flags: startup flags + * i915_driver_init_mmio - setup device MMIO + * @dev_priv: device private * - * The driver load routine has to do several things: - * - drive output discovery via intel_modeset_init() - * - initialize the memory manager - * - allocate initial config memory - * - setup the DRM framebuffer with the allocated memory + * Setup minimal device state necessary for MMIO accesses later in the + * initialization sequence. The setup here should avoid any other device-wide + * side effects or exposing the driver via kernel internal or user space + * interfaces. */ -int i915_driver_load(struct drm_device *dev, unsigned long flags) +static int i915_driver_init_mmio(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv; - struct intel_device_info *info, *device_info; - int ret = 0; - uint32_t aperture_size; - - info = (struct intel_device_info *) flags; - - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (dev_priv == NULL) - return -ENOMEM; - - dev->dev_private = dev_priv; - dev_priv->dev = dev; + struct drm_device *dev = dev_priv->dev; + int ret; - /* Setup the write-once "constant" device info */ - device_info = (struct intel_device_info *)&dev_priv->info; - memcpy(device_info, info, sizeof(dev_priv->info)); - device_info->device_id = dev->pdev->device; + if (i915_inject_load_failure()) + return -ENODEV; - spin_lock_init(&dev_priv->irq_lock); - spin_lock_init(&dev_priv->gpu_error.lock); - mutex_init(&dev_priv->backlight_lock); - spin_lock_init(&dev_priv->uncore.lock); - spin_lock_init(&dev_priv->mm.object_stat_lock); - spin_lock_init(&dev_priv->mmio_flip_lock); - mutex_init(&dev_priv->sb_lock); - mutex_init(&dev_priv->modeset_restore_lock); - mutex_init(&dev_priv->av_mutex); + if (i915_get_bridge_dev(dev)) + return -EIO; - ret = i915_workqueues_init(dev_priv); + ret = i915_mmio_setup(dev); if (ret < 0) - goto out_free_priv; + goto put_bridge; - intel_pm_setup(dev); + intel_uncore_init(dev); - intel_runtime_pm_get(dev_priv); + return 0; - intel_display_crc_init(dev); +put_bridge: + pci_dev_put(dev_priv->bridge_dev); - i915_dump_device_info(dev_priv); + return ret; +} - /* Not all pre-production machines fall into this category, only the - * very first ones. Almost everything should work, except for maybe - * suspend/resume. And we don't implement workarounds that affect only - * pre-production machines. */ - if (IS_HSW_EARLY_SDV(dev)) - DRM_INFO("This is an early pre-production Haswell machine. " - "It may not be fully functional.\n"); +/** + * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio() + * @dev_priv: device private + */ +static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; - if (i915_get_bridge_dev(dev)) { - ret = -EIO; - goto out_runtime_pm_put; - } + intel_uncore_fini(dev); + i915_mmio_cleanup(dev); + pci_dev_put(dev_priv->bridge_dev); +} - ret = i915_mmio_setup(dev); - if (ret < 0) - goto put_bridge; +/** + * i915_driver_init_hw - setup state requiring device access + * @dev_priv: device private + * + * Setup state that requires accessing the device, but doesn't require + * exposing the driver via kernel internal or userspace interfaces. + */ +static int i915_driver_init_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + uint32_t aperture_size; + int ret; - /* This must be called before any calls to HAS_PCH_* */ - intel_detect_pch(dev); + if (i915_inject_load_failure()) + return -ENODEV; - intel_uncore_init(dev); + intel_device_info_runtime_init(dev); - ret = i915_gem_gtt_init(dev); + ret = i915_ggtt_init_hw(dev); if (ret) - goto out_uncore_fini; + return ret; /* WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_gtt; + goto out_ggtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_gtt; + goto out_ggtt; } pci_set_master(dev->pdev); @@ -1080,26 +1217,27 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); - aperture_size = dev_priv->gtt.mappable_end; + aperture_size = ggtt->mappable_end; - dev_priv->gtt.mappable = - io_mapping_create_wc(dev_priv->gtt.mappable_base, + ggtt->mappable = + io_mapping_create_wc(ggtt->mappable_base, aperture_size); - if (dev_priv->gtt.mappable == NULL) { + if (!ggtt->mappable) { ret = -EIO; - goto out_gtt; + goto out_ggtt; } - dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base, + ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, aperture_size); - intel_irq_init(dev_priv); + pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + intel_uncore_sanitize(dev); intel_opregion_setup(dev); - i915_gem_load_init(dev); - i915_gem_shrinker_init(dev_priv); + i915_gem_load_init_fences(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the * integrated graphics even though the support isn't actually there @@ -1117,24 +1255,44 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) DRM_DEBUG_DRIVER("can't enable MSI"); } - intel_device_info_runtime_init(dev); + return 0; - intel_init_dpio(dev_priv); +out_ggtt: + i915_ggtt_cleanup_hw(dev); - if (INTEL_INFO(dev)->num_pipes) { - ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); - if (ret) - goto out_gem_unload; - } + return ret; +} - intel_power_domains_init(dev_priv); +/** + * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw() + * @dev_priv: device private + */ +static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + struct i915_ggtt *ggtt = &dev_priv->ggtt; - ret = i915_load_modeset_init(dev); - if (ret < 0) { - DRM_ERROR("failed to init modeset\n"); - goto out_power_well; - } + if (dev->pdev->msi_enabled) + pci_disable_msi(dev->pdev); + + pm_qos_remove_request(&dev_priv->pm_qos); + arch_phys_wc_del(ggtt->mtrr); + io_mapping_free(ggtt->mappable); + i915_ggtt_cleanup_hw(dev); +} + +/** + * i915_driver_register - register the driver with the rest of the system + * @dev_priv: device private + * + * Perform any steps necessary to make the driver available via kernel + * internal or userspace interfaces. + */ +static void i915_driver_register(struct drm_i915_private *dev_priv) +{ + struct drm_device *dev = dev_priv->dev; + i915_gem_shrinker_init(dev_priv); /* * Notify a valid surface after modesetting, * when running inside a VM. @@ -1144,48 +1302,107 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) i915_setup_sysfs(dev); - if (INTEL_INFO(dev)->num_pipes) { + if (INTEL_INFO(dev_priv)->num_pipes) { /* Must be done after probing outputs */ intel_opregion_init(dev); acpi_video_register(); } - if (IS_GEN5(dev)) + if (IS_GEN5(dev_priv)) intel_gpu_ips_init(dev_priv); - intel_runtime_pm_enable(dev_priv); - i915_audio_component_init(dev_priv); +} + +/** + * i915_driver_unregister - cleanup the registration done in i915_driver_regiser() + * @dev_priv: device private + */ +static void i915_driver_unregister(struct drm_i915_private *dev_priv) +{ + i915_audio_component_cleanup(dev_priv); + intel_gpu_ips_teardown(); + acpi_video_unregister(); + intel_opregion_fini(dev_priv->dev); + i915_teardown_sysfs(dev_priv->dev); + i915_gem_shrinker_cleanup(dev_priv); +} + +/** + * i915_driver_load - setup chip and create an initial config + * @dev: DRM device + * @flags: startup flags + * + * The driver load routine has to do several things: + * - drive output discovery via intel_modeset_init() + * - initialize the memory manager + * - allocate initial config memory + * - setup the DRM framebuffer with the allocated memory + */ +int i915_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct drm_i915_private *dev_priv; + int ret = 0; + + dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); + if (dev_priv == NULL) + return -ENOMEM; + + dev->dev_private = dev_priv; + /* Must be set before calling __i915_printk */ + dev_priv->dev = dev; + + ret = i915_driver_init_early(dev_priv, dev, + (struct intel_device_info *)flags); + + if (ret < 0) + goto out_free_priv; + + intel_runtime_pm_get(dev_priv); + + ret = i915_driver_init_mmio(dev_priv); + if (ret < 0) + goto out_runtime_pm_put; + + ret = i915_driver_init_hw(dev_priv); + if (ret < 0) + goto out_cleanup_mmio; + + /* + * TODO: move the vblank init and parts of modeset init steps into one + * of the i915_driver_init_/i915_driver_register functions according + * to the role/effect of the given init step. + */ + if (INTEL_INFO(dev)->num_pipes) { + ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes); + if (ret) + goto out_cleanup_hw; + } + + ret = i915_load_modeset_init(dev); + if (ret < 0) + goto out_cleanup_vblank; + + i915_driver_register(dev_priv); + + intel_runtime_pm_enable(dev_priv); intel_runtime_pm_put(dev_priv); return 0; -out_power_well: - intel_power_domains_fini(dev_priv); +out_cleanup_vblank: drm_vblank_cleanup(dev); -out_gem_unload: - i915_gem_shrinker_cleanup(dev_priv); - - if (dev->pdev->msi_enabled) - pci_disable_msi(dev->pdev); - - intel_teardown_mchbar(dev); - pm_qos_remove_request(&dev_priv->pm_qos); - arch_phys_wc_del(dev_priv->gtt.mtrr); - io_mapping_free(dev_priv->gtt.mappable); -out_gtt: - i915_global_gtt_cleanup(dev); -out_uncore_fini: - intel_uncore_fini(dev); - i915_mmio_cleanup(dev); -put_bridge: - pci_dev_put(dev_priv->bridge_dev); - i915_gem_load_cleanup(dev); +out_cleanup_hw: + i915_driver_cleanup_hw(dev_priv); +out_cleanup_mmio: + i915_driver_cleanup_mmio(dev_priv); out_runtime_pm_put: intel_runtime_pm_put(dev_priv); - i915_workqueues_cleanup(dev_priv); + i915_driver_cleanup_early(dev_priv); out_free_priv: + i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret); + kfree(dev_priv); return ret; @@ -1198,26 +1415,15 @@ int i915_driver_unload(struct drm_device *dev) intel_fbdev_fini(dev); - i915_audio_component_cleanup(dev_priv); - ret = i915_gem_suspend(dev); if (ret) { DRM_ERROR("failed to idle hardware: %d\n", ret); return ret; } - intel_power_domains_fini(dev_priv); - - intel_gpu_ips_teardown(); + intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - i915_teardown_sysfs(dev); - - i915_gem_shrinker_cleanup(dev_priv); - - io_mapping_free(dev_priv->gtt.mappable); - arch_phys_wc_del(dev_priv->gtt.mtrr); - - acpi_video_unregister(); + i915_driver_unregister(dev_priv); drm_vblank_cleanup(dev); @@ -1246,31 +1452,24 @@ int i915_driver_unload(struct drm_device *dev) cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); i915_destroy_error_state(dev); - if (dev->pdev->msi_enabled) - pci_disable_msi(dev->pdev); - - intel_opregion_fini(dev); - /* Flush any outstanding unpin_work. */ flush_workqueue(dev_priv->wq); intel_guc_ucode_fini(dev); mutex_lock(&dev->struct_mutex); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); i915_gem_context_fini(dev); mutex_unlock(&dev->struct_mutex); intel_fbc_cleanup_cfb(dev_priv); - pm_qos_remove_request(&dev_priv->pm_qos); + intel_power_domains_fini(dev_priv); - i915_global_gtt_cleanup(dev); + i915_driver_cleanup_hw(dev_priv); + i915_driver_cleanup_mmio(dev_priv); - intel_uncore_fini(dev); - i915_mmio_cleanup(dev); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); - i915_gem_load_cleanup(dev); - pci_dev_put(dev_priv->bridge_dev); - i915_workqueues_cleanup(dev_priv); + i915_driver_cleanup_early(dev_priv); kfree(dev_priv); return 0; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 20e82008b8b6..29b4e79c85a6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -66,6 +66,11 @@ static struct drm_driver driver; #define IVB_CURSOR_OFFSETS \ .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET } +#define BDW_COLORS \ + .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 } +#define CHV_COLORS \ + .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 } + static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, .has_overlay = 1, .overlay_needs_physical = 1, @@ -288,24 +293,28 @@ static const struct intel_device_info intel_haswell_m_info = { .is_mobile = 1, }; +#define BDW_FEATURES \ + HSW_FEATURES, \ + BDW_COLORS + static const struct intel_device_info intel_broadwell_d_info = { - HSW_FEATURES, + BDW_FEATURES, .gen = 8, }; static const struct intel_device_info intel_broadwell_m_info = { - HSW_FEATURES, + BDW_FEATURES, .gen = 8, .is_mobile = 1, }; static const struct intel_device_info intel_broadwell_gt3d_info = { - HSW_FEATURES, + BDW_FEATURES, .gen = 8, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; static const struct intel_device_info intel_broadwell_gt3m_info = { - HSW_FEATURES, + BDW_FEATURES, .gen = 8, .is_mobile = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, }; @@ -318,16 +327,17 @@ static const struct intel_device_info intel_cherryview_info = { .display_mmio_offset = VLV_DISPLAY_BASE, GEN_CHV_PIPEOFFSETS, CURSOR_OFFSETS, + CHV_COLORS, }; static const struct intel_device_info intel_skylake_info = { - HSW_FEATURES, + BDW_FEATURES, .is_skylake = 1, .gen = 9, }; static const struct intel_device_info intel_skylake_gt3_info = { - HSW_FEATURES, + BDW_FEATURES, .is_skylake = 1, .gen = 9, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, @@ -345,18 +355,17 @@ static const struct intel_device_info intel_broxton_info = { .has_fbc = 1, GEN_DEFAULT_PIPEOFFSETS, IVB_CURSOR_OFFSETS, + BDW_COLORS, }; static const struct intel_device_info intel_kabylake_info = { - HSW_FEATURES, - .is_preliminary = 1, + BDW_FEATURES, .is_kabylake = 1, .gen = 9, }; static const struct intel_device_info intel_kabylake_gt3_info = { - HSW_FEATURES, - .is_preliminary = 1, + BDW_FEATURES, .is_kabylake = 1, .gen = 9, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, @@ -504,6 +513,7 @@ void intel_detect_pch(struct drm_device *dev) WARN_ON(!IS_SKYLAKE(dev) && !IS_KABYLAKE(dev)); } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || + (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && pch->subsystem_vendor == 0x1af4 && pch->subsystem_device == 0x1100)) { @@ -758,10 +768,10 @@ static int i915_drm_resume(struct drm_device *dev) dev_priv->display.hpd_irq_setup(dev); spin_unlock_irq(&dev_priv->irq_lock); - intel_display_resume(dev); - intel_dp_mst_resume(dev); + intel_display_resume(dev); + /* * ... but also need to make sure that hotplug processing * doesn't cause havoc. Like in the driver load code we don't @@ -881,7 +891,7 @@ int i915_reset(struct drm_device *dev) simulated = dev_priv->gpu_error.stop_rings != 0; - ret = intel_gpu_reset(dev); + ret = intel_gpu_reset(dev, ALL_ENGINES); /* Also reset the gpu hangman. */ if (simulated) { @@ -1390,7 +1400,7 @@ static int vlv_suspend_complete(struct drm_i915_private *dev_priv) if (err) goto err2; - if (!IS_CHERRYVIEW(dev_priv->dev)) + if (!IS_CHERRYVIEW(dev_priv)) vlv_save_gunit_s0ix_state(dev_priv); err = vlv_force_gfx_clock(dev_priv, false); @@ -1422,7 +1432,7 @@ static int vlv_resume_prepare(struct drm_i915_private *dev_priv, */ ret = vlv_force_gfx_clock(dev_priv, true); - if (!IS_CHERRYVIEW(dev_priv->dev)) + if (!IS_CHERRYVIEW(dev_priv)) vlv_restore_gunit_s0ix_state(dev_priv); err = vlv_allow_gt_wake(dev_priv, true); @@ -1742,10 +1752,8 @@ static int __init i915_init(void) if (i915.modeset == 0) driver.driver_features &= ~DRIVER_MODESET; -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && i915.modeset == -1) driver.driver_features &= ~DRIVER_MODESET; -#endif if (!(driver.driver_features & DRIVER_MODESET)) { /* Silently fail loading to not upset userspace. */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 10480939159c..d1e6e580a92d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -53,13 +53,14 @@ #include <linux/kref.h> #include <linux/pm_qos.h> #include "intel_guc.h" +#include "intel_dpll_mgr.h" /* General customization: */ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20160229" +#define DRIVER_DATE "20160411" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -97,6 +98,10 @@ #define I915_STATE_WARN_ON(x) \ I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")") +bool __i915_inject_load_failure(const char *func, int line); +#define i915_inject_load_failure() \ + __i915_inject_load_failure(__func__, __LINE__) + static inline const char *yesno(bool v) { return v ? "yes" : "no"; @@ -122,9 +127,35 @@ enum transcoder { TRANSCODER_B, TRANSCODER_C, TRANSCODER_EDP, + TRANSCODER_DSI_A, + TRANSCODER_DSI_C, I915_MAX_TRANSCODERS }; -#define transcoder_name(t) ((t) + 'A') + +static inline const char *transcoder_name(enum transcoder transcoder) +{ + switch (transcoder) { + case TRANSCODER_A: + return "A"; + case TRANSCODER_B: + return "B"; + case TRANSCODER_C: + return "C"; + case TRANSCODER_EDP: + return "EDP"; + case TRANSCODER_DSI_A: + return "DSI A"; + case TRANSCODER_DSI_C: + return "DSI C"; + default: + return "<invalid>"; + } +} + +static inline bool transcoder_is_dsi(enum transcoder transcoder) +{ + return transcoder == TRANSCODER_DSI_A || transcoder == TRANSCODER_DSI_C; +} /* * I915_MAX_PLANES in the enum below is the maximum (across all platforms) @@ -176,6 +207,8 @@ enum intel_display_power_domain { POWER_DOMAIN_TRANSCODER_B, POWER_DOMAIN_TRANSCODER_C, POWER_DOMAIN_TRANSCODER_EDP, + POWER_DOMAIN_TRANSCODER_DSI_A, + POWER_DOMAIN_TRANSCODER_DSI_C, POWER_DOMAIN_PORT_DDI_A_LANES, POWER_DOMAIN_PORT_DDI_B_LANES, POWER_DOMAIN_PORT_DDI_C_LANES, @@ -273,6 +306,10 @@ struct i915_hotplug { (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ (__s)++) +#define for_each_port_masked(__port, __ports_mask) \ + for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ + for_each_if ((__ports_mask) & (1 << (__port))) + #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) @@ -340,81 +377,6 @@ struct drm_i915_file_private { unsigned int bsd_ring; }; -enum intel_dpll_id { - DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ - /* real shared dpll ids must be >= 0 */ - DPLL_ID_PCH_PLL_A = 0, - DPLL_ID_PCH_PLL_B = 1, - /* hsw/bdw */ - DPLL_ID_WRPLL1 = 0, - DPLL_ID_WRPLL2 = 1, - DPLL_ID_SPLL = 2, - - /* skl */ - DPLL_ID_SKL_DPLL1 = 0, - DPLL_ID_SKL_DPLL2 = 1, - DPLL_ID_SKL_DPLL3 = 2, -}; -#define I915_NUM_PLLS 3 - -struct intel_dpll_hw_state { - /* i9xx, pch plls */ - uint32_t dpll; - uint32_t dpll_md; - uint32_t fp0; - uint32_t fp1; - - /* hsw, bdw */ - uint32_t wrpll; - uint32_t spll; - - /* skl */ - /* - * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in - * lower part of ctrl1 and they get shifted into position when writing - * the register. This allows us to easily compare the state to share - * the DPLL. - */ - uint32_t ctrl1; - /* HDMI only, 0 when used for DP */ - uint32_t cfgcr1, cfgcr2; - - /* bxt */ - uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, - pcsdw12; -}; - -struct intel_shared_dpll_config { - unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ - struct intel_dpll_hw_state hw_state; -}; - -struct intel_shared_dpll { - struct intel_shared_dpll_config config; - - int active; /* count of number of active CRTCs (i.e. DPMS on) */ - bool on; /* is the PLL actually active? Disabled during modeset */ - const char *name; - /* should match the index in the dev_priv->shared_dplls array */ - enum intel_dpll_id id; - /* The mode_set hook is optional and should be used together with the - * intel_prepare_shared_dpll function. */ - void (*mode_set)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - void (*enable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - void (*disable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - bool (*get_hw_state)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state); -}; - -#define SKL_DPLL0 0 -#define SKL_DPLL1 1 -#define SKL_DPLL2 2 -#define SKL_DPLL3 3 - /* Used by dp and fdi links */ struct intel_link_m_n { uint32_t tu; @@ -533,7 +495,8 @@ struct drm_i915_error_state { u32 cpu_ring_head; u32 cpu_ring_tail; - u32 semaphore_seqno[I915_NUM_RINGS - 1]; + u32 last_seqno; + u32 semaphore_seqno[I915_NUM_ENGINES - 1]; /* Register state */ u32 start; @@ -553,7 +516,7 @@ struct drm_i915_error_state { u32 fault_reg; u64 faddr; u32 rc_psmi; /* sleep state */ - u32 semaphore_mboxes[I915_NUM_RINGS - 1]; + u32 semaphore_mboxes[I915_NUM_ENGINES - 1]; struct drm_i915_error_object { int page_count; @@ -561,6 +524,8 @@ struct drm_i915_error_state { u32 *pages[0]; } *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; + struct drm_i915_error_object *wa_ctx; + struct drm_i915_error_request { long jiffies; u32 seqno; @@ -577,12 +542,12 @@ struct drm_i915_error_state { pid_t pid; char comm[TASK_COMM_LEN]; - } ring[I915_NUM_RINGS]; + } ring[I915_NUM_ENGINES]; struct drm_i915_error_buffer { u32 size; u32 name; - u32 rseqno[I915_NUM_RINGS], wseqno; + u32 rseqno[I915_NUM_ENGINES], wseqno; u64 gtt_offset; u32 read_domains; u32 write_domain; @@ -611,27 +576,12 @@ struct dpll; struct drm_i915_display_funcs { int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); - /** - * find_dpll() - Find the best values for the PLL - * @limit: limits for the PLL - * @crtc: current CRTC - * @target: target frequency in kHz - * @refclk: reference clock frequency in kHz - * @match_clock: if provided, @best_clock P divider must - * match the P divider from @match_clock - * used for LVDS downclocking - * @best_clock: best PLL values found - * - * Returns true on success, false on failure. - */ - bool (*find_dpll)(const struct intel_limit *limit, - struct intel_crtc_state *crtc_state, - int target, int refclk, - struct dpll *match_clock, - struct dpll *best_clock); - int (*compute_pipe_wm)(struct intel_crtc *crtc, - struct drm_atomic_state *state); - void (*program_watermarks)(struct intel_crtc_state *cstate); + int (*compute_pipe_wm)(struct intel_crtc_state *cstate); + int (*compute_intermediate_wm)(struct drm_device *dev, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *newstate); + void (*initial_watermarks)(struct intel_crtc_state *cstate); + void (*optimize_watermarks)(struct intel_crtc_state *cstate); void (*update_wm)(struct drm_crtc *crtc); int (*modeset_calc_cdclk)(struct drm_atomic_state *state); void (*modeset_commit_cdclk)(struct drm_atomic_state *state); @@ -662,6 +612,9 @@ struct drm_i915_display_funcs { /* render clock increase/decrease */ /* display clock increase/decrease */ /* pll clock increase/decrease */ + + void (*load_csc_matrix)(struct drm_crtc_state *crtc_state); + void (*load_luts)(struct drm_crtc_state *crtc_state); }; enum forcewake_domain_id { @@ -681,6 +634,13 @@ enum forcewake_domains { FORCEWAKE_MEDIA) }; +#define FW_REG_READ (1) +#define FW_REG_WRITE (2) + +enum forcewake_domains +intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, + i915_reg_t reg, unsigned int op); + struct intel_uncore_funcs { void (*force_wake_get)(struct drm_i915_private *dev_priv, enum forcewake_domains domains); @@ -713,8 +673,9 @@ struct intel_uncore { struct intel_uncore_forcewake_domain { struct drm_i915_private *i915; enum forcewake_domain_id id; + enum forcewake_domains mask; unsigned wake_count; - struct timer_list timer; + struct hrtimer timer; i915_reg_t reg_set; u32 val_set; u32 val_clear; @@ -727,14 +688,14 @@ struct intel_uncore { }; /* Iterate over initialised fw domains */ -#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \ - for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ - (i__) < FW_DOMAIN_ID_COUNT; \ - (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \ - for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__))) +#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \ + for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \ + (domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \ + (domain__)++) \ + for_each_if ((mask__) & (domain__)->mask) -#define for_each_fw_domain(domain__, dev_priv__, i__) \ - for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__) +#define for_each_fw_domain(domain__, dev_priv__) \ + for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__) #define CSR_VERSION(major, minor) ((major) << 16 | (minor)) #define CSR_VERSION_MAJOR(version) ((version) >> 16) @@ -750,6 +711,7 @@ struct intel_csr { i915_reg_t mmioaddr[8]; uint32_t mmiodata[8]; uint32_t dc_state; + uint32_t allowed_dc_mask; }; #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -779,6 +741,7 @@ struct intel_csr { func(overlay_needs_physical) sep \ func(supports_tv) sep \ func(has_llc) sep \ + func(has_snoop) sep \ func(has_ddi) sep \ func(has_fpga_dbg) @@ -810,6 +773,11 @@ struct intel_device_info { u8 has_slice_pg:1; u8 has_subslice_pg:1; u8 has_eu_pg:1; + + struct color_luts { + u16 degamma_lut_size; + u16 gamma_lut_size; + } color; }; #undef DEFINE_FLAG @@ -891,7 +859,7 @@ struct intel_context { struct i915_vma *lrc_vma; u64 lrc_desc; uint32_t *lrc_reg_state; - } engine[I915_NUM_RINGS]; + } engine[I915_NUM_ENGINES]; struct list_head link; }; @@ -1036,6 +1004,7 @@ struct intel_fbc_work; struct intel_gmbus { struct i2c_adapter adapter; +#define GMBUS_FORCE_BIT_RETRY (1U << 31) u32 force_bit; u32 reg0; i915_reg_t gpio_reg; @@ -1159,6 +1128,7 @@ struct intel_gen6_power_mgmt { u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ + u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ u8 up_threshold; /* Current %busy required to uplock */ u8 down_threshold; /* Current %busy required to downclock */ @@ -1298,6 +1268,7 @@ struct i915_gem_mm { struct i915_hw_ppgtt *aliasing_ppgtt; struct notifier_block oom_notifier; + struct notifier_block vmap_notifier; struct shrinker shrinker; bool shrinker_no_lock_stealing; @@ -1482,21 +1453,23 @@ struct intel_vbt_data { unsigned int lvds_use_ssc:1; unsigned int display_clock_mode:1; unsigned int fdi_rx_polarity_inverted:1; - unsigned int has_mipi:1; + unsigned int panel_type:4; int lvds_ssc_freq; unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ enum drrs_support_type drrs_type; - /* eDP */ - int edp_rate; - int edp_lanes; - int edp_preemphasis; - int edp_vswing; - bool edp_initialized; - bool edp_support; - int edp_bpp; - struct edp_power_seq edp_pps; + struct { + int rate; + int lanes; + int preemphasis; + int vswing; + bool low_vswing; + bool initialized; + bool support; + int bpp; + struct edp_power_seq pps; + } edp; struct { bool full_link; @@ -1516,7 +1489,6 @@ struct intel_vbt_data { /* MIPI DSI */ struct { - u16 port; u16 panel_id; struct mipi_config *config; struct mipi_pps_data *pps; @@ -1532,6 +1504,7 @@ struct intel_vbt_data { union child_device_config *child_dev; struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS]; + struct sdvo_device_mapping sdvo_mappings[2]; }; enum intel_ddb_partitioning { @@ -1706,7 +1679,7 @@ struct i915_wa_reg { struct i915_workarounds { struct i915_wa_reg reg[I915_MAX_WA_REGS]; u32 count; - u32 hw_whitelist_count[I915_NUM_RINGS]; + u32 hw_whitelist_count[I915_NUM_ENGINES]; }; struct i915_virtual_gpu { @@ -1719,7 +1692,7 @@ struct i915_execbuffer_params { uint32_t dispatch_flags; uint32_t args_batch_start_offset; uint64_t batch_obj_vm_offset; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_i915_gem_object *batch_obj; struct intel_context *ctx; struct drm_i915_gem_request *request; @@ -1771,7 +1744,7 @@ struct drm_i915_private { wait_queue_head_t gmbus_wait_queue; struct pci_dev *bridge_dev; - struct intel_engine_cs ring[I915_NUM_RINGS]; + struct intel_engine_cs engine[I915_NUM_ENGINES]; struct drm_i915_gem_object *semaphore_obj; uint32_t last_seqno, next_seqno; @@ -1829,6 +1802,7 @@ struct drm_i915_private { unsigned int skl_boot_cdclk; unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; unsigned int max_dotclk_freq; + unsigned int rawclk_freq; unsigned int hpll_freq; unsigned int czclk_freq; @@ -1855,7 +1829,7 @@ struct drm_i915_private { struct drm_atomic_state *modeset_restore_state; struct list_head vm_list; /* Global list of all address spaces */ - struct i915_gtt gtt; /* VM representing the global address space */ + struct i915_ggtt ggtt; /* VM representing the global address space */ struct i915_gem_mm mm; DECLARE_HASHTABLE(mm_structs, 7); @@ -1863,8 +1837,6 @@ struct drm_i915_private { /* Kernel Modesetting */ - struct sdvo_device_mapping sdvo_mappings[2]; - struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES]; wait_queue_head_t pending_flip_queue; @@ -1876,6 +1848,14 @@ struct drm_i915_private { /* dpll and cdclk state is protected by connection_mutex */ int num_shared_dpll; struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; + const struct intel_dpll_mgr *dpll_mgr; + + /* + * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll. + * Must be global rather than per dpll, because on some platforms + * plls share registers. + */ + struct mutex dpll_lock; unsigned int active_crtcs; unsigned int min_pixclk[I915_MAX_PIPES]; @@ -1884,9 +1864,6 @@ struct drm_i915_private { struct i915_workarounds workarounds; - /* Reclocking support */ - bool render_reclock_avail; - struct i915_frontbuffer_tracking fb_tracking; u16 orig_clock; @@ -1936,7 +1913,14 @@ struct drm_i915_private { u32 fdi_rx_config; + /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */ u32 chv_phy_control; + /* + * Shadows for CHV DPLL_MD regs to keep the state + * checker somewhat working in the presence hardware + * crappiness (can't read out DPLL_MD for pipes B & C). + */ + u32 chv_dpll_md[I915_MAX_PIPES]; u32 suspend_count; bool suspended_to_idle; @@ -1980,6 +1964,13 @@ struct drm_i915_private { }; uint8_t max_level; + + /* + * Should be held around atomic WM register writing; also + * protects * intel_crtc->wm.active and + * cstate->wm.need_postvbl_update. + */ + struct mutex wm_mutex; } wm; struct i915_runtime_pm pm; @@ -1989,15 +1980,13 @@ struct drm_i915_private { int (*execbuf_submit)(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); - int (*init_rings)(struct drm_device *dev); - void (*cleanup_ring)(struct intel_engine_cs *ring); - void (*stop_ring)(struct intel_engine_cs *ring); + int (*init_engines)(struct drm_device *dev); + void (*cleanup_engine)(struct intel_engine_cs *engine); + void (*stop_engine)(struct intel_engine_cs *engine); } gt; struct intel_context *kernel_context; - bool edp_low_vswing; - /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -2024,10 +2013,28 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc) return container_of(guc, struct drm_i915_private, guc); } -/* Iterate over initialised rings */ -#define for_each_ring(ring__, dev_priv__, i__) \ - for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \ - for_each_if ((((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))) +/* Simple iterator over all initialised engines */ +#define for_each_engine(engine__, dev_priv__) \ + for ((engine__) = &(dev_priv__)->engine[0]; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (intel_engine_initialized(engine__)) + +/* Iterator with engine_id */ +#define for_each_engine_id(engine__, dev_priv__, id__) \ + for ((engine__) = &(dev_priv__)->engine[0], (id__) = 0; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (((id__) = (engine__)->id, \ + intel_engine_initialized(engine__))) + +/* Iterator over subset of engines selected by mask */ +#define for_each_engine_masked(engine__, dev_priv__, mask__) \ + for ((engine__) = &(dev_priv__)->engine[0]; \ + (engine__) < &(dev_priv__)->engine[I915_NUM_ENGINES]; \ + (engine__)++) \ + for_each_if (((mask__) & intel_engine_flag(engine__)) && \ + intel_engine_initialized(engine__)) enum hdmi_force_audio { HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */ @@ -2097,7 +2104,7 @@ struct drm_i915_gem_object { struct drm_mm_node *stolen; struct list_head global_list; - struct list_head ring_list[I915_NUM_RINGS]; + struct list_head engine_list[I915_NUM_ENGINES]; /** Used in execbuf to temporarily hold a ref */ struct list_head obj_exec_link; @@ -2108,7 +2115,7 @@ struct drm_i915_gem_object { * rendering and so a non-zero seqno), and is not set if it i s on * inactive (ready to be unbound) list. */ - unsigned int active:I915_NUM_RINGS; + unsigned int active:I915_NUM_ENGINES; /** * This is set if the object has been written to since last bound @@ -2172,10 +2179,7 @@ struct drm_i915_gem_object { struct scatterlist *sg; int last; } get_page; - - /* prime dma-buf support */ - void *dma_buf_vmapping; - int vmapping_count; + void *mapping; /** Breadcrumb of last rendering to the buffer. * There can only be one writer, but we allow for multiple readers. @@ -2187,7 +2191,7 @@ struct drm_i915_gem_object { * read request. This allows for the CPU to read from an active * buffer by only waiting for the write to complete. * */ - struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS]; + struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES]; struct drm_i915_gem_request *last_write_req; /** Breadcrumb of last fenced GPU access to the buffer. */ struct drm_i915_gem_request *last_fenced_req; @@ -2242,7 +2246,7 @@ struct drm_i915_gem_request { /** On Which ring this request was generated */ struct drm_i915_private *i915; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; /** GEM sequence number associated with the previous request, * when the HWS breadcrumb is equal to this the GPU is processing @@ -2335,9 +2339,9 @@ i915_gem_request_get_seqno(struct drm_i915_gem_request *req) } static inline struct intel_engine_cs * -i915_gem_request_get_ring(struct drm_i915_gem_request *req) +i915_gem_request_get_engine(struct drm_i915_gem_request *req) { - return req ? req->ring : NULL; + return req ? req->engine : NULL; } static inline struct drm_i915_gem_request * @@ -2351,7 +2355,7 @@ i915_gem_request_reference(struct drm_i915_gem_request *req) static inline void i915_gem_request_unreference(struct drm_i915_gem_request *req) { - WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex)); kref_put(&req->ref, i915_gem_request_free); } @@ -2363,7 +2367,7 @@ i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req) if (!req) return; - dev = req->ring->dev; + dev = req->engine->dev; if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex)) mutex_unlock(&dev->struct_mutex); } @@ -2493,6 +2497,7 @@ struct drm_i915_cmd_table { __p; \ }) #define INTEL_INFO(p) (&__I915__(p)->info) +#define INTEL_GEN(p) (INTEL_INFO(p)->gen) #define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) #define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision) @@ -2611,11 +2616,14 @@ struct drm_i915_cmd_table { #define BLT_RING (1<<BCS) #define VEBOX_RING (1<<VECS) #define BSD2_RING (1<<VCS2) +#define ALL_ENGINES (~0) + #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) #define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) +#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) #define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ __I915__(dev)->ellc_size) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) @@ -2634,8 +2642,9 @@ struct drm_i915_cmd_table { /* WaRsDisableCoarsePowerGating:skl,bxt */ #define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ - ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \ - IS_SKL_REVID(dev, 0, SKL_REVID_F0))) + IS_SKL_GT3(dev) || \ + IS_SKL_GT4(dev)) + /* * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * even when in MSI mode. This results in spurious interrupt warnings if the @@ -2695,6 +2704,7 @@ struct drm_i915_cmd_table { #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 +#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) @@ -2726,6 +2736,13 @@ extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); extern int i915_resume_switcheroo(struct drm_device *dev); /* i915_dma.c */ +void __printf(3, 4) +__i915_printk(struct drm_i915_private *dev_priv, const char *level, + const char *fmt, ...); + +#define i915_report_error(dev_priv, fmt, ...) \ + __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) + extern int i915_driver_load(struct drm_device *, unsigned long flags); extern int i915_driver_unload(struct drm_device *); extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); @@ -2738,9 +2755,11 @@ extern void i915_driver_postclose(struct drm_device *dev, extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif -extern int intel_gpu_reset(struct drm_device *dev); +extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); extern bool intel_has_gpu_reset(struct drm_device *dev); extern int i915_reset(struct drm_device *dev); +extern int intel_guc_reset(struct drm_i915_private *dev_priv); +extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); @@ -2757,7 +2776,7 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); /* i915_irq.c */ void i915_queue_hangcheck(struct drm_device *dev); __printf(3, 4) -void i915_handle_error(struct drm_device *dev, bool wedged, +void i915_handle_error(struct drm_device *dev, u32 engine_mask, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); @@ -2893,6 +2912,7 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load_init(struct drm_device *dev); void i915_gem_load_cleanup(struct drm_device *dev); +void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, @@ -2977,12 +2997,44 @@ static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) BUG_ON(obj->pages == NULL); obj->pages_pin_count++; } + static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) { BUG_ON(obj->pages_pin_count == 0); obj->pages_pin_count--; } +/** + * i915_gem_object_pin_map - return a contiguous mapping of the entire object + * @obj - the object to map into kernel address space + * + * Calls i915_gem_object_pin_pages() to prevent reaping of the object's + * pages and then returns a contiguous mapping of the backing storage into + * the kernel address space. + * + * The caller must hold the struct_mutex. + * + * Returns the pointer through which to access the backing storage. + */ +void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj); + +/** + * i915_gem_object_unpin_map - releases an earlier mapping + * @obj - the object to unmap + * + * After pinning the object and mapping its pages, once you are finished + * with your access, call i915_gem_object_unpin_map() to release the pin + * upon the mapping. Once the pin count reaches zero, that mapping may be + * removed. + * + * The caller must hold the struct_mutex. + */ +static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj) +{ + lockdep_assert_held(&obj->base.dev->struct_mutex); + i915_gem_object_unpin_pages(obj); +} + int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_engine_cs *to, @@ -3006,25 +3058,29 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, bool lazy_coherency) { - u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); - return i915_seqno_passed(seqno, req->previous_seqno); + if (!lazy_coherency && req->engine->irq_seqno_barrier) + req->engine->irq_seqno_barrier(req->engine); + return i915_seqno_passed(req->engine->get_seqno(req->engine), + req->previous_seqno); } static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, bool lazy_coherency) { - u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency); - return i915_seqno_passed(seqno, req->seqno); + if (!lazy_coherency && req->engine->irq_seqno_barrier) + req->engine->irq_seqno_barrier(req->engine); + return i915_seqno_passed(req->engine->get_seqno(req->engine), + req->seqno); } int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); struct drm_i915_gem_request * -i915_gem_find_active_request(struct intel_engine_cs *ring); +i915_gem_find_active_request(struct intel_engine_cs *engine); bool i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); +void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); @@ -3059,11 +3115,11 @@ static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) void i915_gem_reset(struct drm_device *dev); bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); -int i915_gem_init_rings(struct drm_device *dev); +int i915_gem_init_engines(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); -void i915_gem_cleanup_ringbuffer(struct drm_device *dev); +void i915_gem_cleanup_engines(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); void __i915_add_request(struct drm_i915_gem_request *req, @@ -3154,13 +3210,9 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ -#define i915_obj_to_ggtt(obj) \ - (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) - static inline struct i915_hw_ppgtt * i915_vm_to_ppgtt(struct i915_address_space *vm) { - WARN_ON(i915_is_ggtt(vm)); return container_of(vm, struct i915_hw_ppgtt, base); } @@ -3173,7 +3225,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) static inline unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) { - return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; + + return i915_gem_obj_size(obj, &ggtt->base); } static inline int __must_check @@ -3181,7 +3236,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, uint32_t alignment, unsigned flags) { - return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), + struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; + + return i915_gem_object_pin(obj, &ggtt->base, alignment, flags | PIN_GLOBAL); } @@ -3296,6 +3354,7 @@ unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, #define I915_SHRINK_UNBOUND 0x2 #define I915_SHRINK_BOUND 0x4 #define I915_SHRINK_ACTIVE 0x8 +#define I915_SHRINK_VMAPS 0x10 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv); @@ -3342,7 +3401,7 @@ static inline void i915_error_state_buf_release( { kfree(eb->buf); } -void i915_capture_error_state(struct drm_device *dev, bool wedge, +void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, const char *error_msg); void i915_error_state_get(struct drm_device *dev, struct i915_error_state_file_priv *error_priv); @@ -3354,10 +3413,10 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* i915_cmd_parser.c */ int i915_cmd_parser_get_version(void); -int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); -void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); -bool i915_needs_cmd_parser(struct intel_engine_cs *ring); -int i915_parse_cmds(struct intel_engine_cs *ring, +int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); +void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); +bool i915_needs_cmd_parser(struct intel_engine_cs *engine); +int i915_parse_cmds(struct intel_engine_cs *engine, struct drm_i915_gem_object *batch_obj, struct drm_i915_gem_object *shadow_batch_obj, u32 batch_start_offset, @@ -3391,6 +3450,12 @@ extern void intel_i2c_reset(struct drm_device *dev); /* intel_bios.c */ int intel_bios_init(struct drm_i915_private *dev_priv); bool intel_bios_is_valid_vbt(const void *buf, size_t size); +bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); +bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); +bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); +bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); +bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, + enum port port); /* intel_opregion.c */ #ifdef CONFIG_ACPI @@ -3402,6 +3467,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable); extern int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state); +extern int intel_opregion_get_panel_type(struct drm_device *dev); #else static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } static inline void intel_opregion_init(struct drm_device *dev) { return; } @@ -3417,6 +3483,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) { return 0; } +static inline int intel_opregion_get_panel_type(struct drm_device *dev) +{ + return -ENODEV; +} #endif /* intel_acpi.c */ @@ -3628,11 +3698,11 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) } } -static inline void i915_trace_irq_get(struct intel_engine_cs *ring, +static inline void i915_trace_irq_get(struct intel_engine_cs *engine, struct drm_i915_gem_request *req) { - if (ring->trace_irq_req == NULL && ring->irq_get(ring)) - i915_gem_request_assign(&ring->trace_irq_req, req); + if (engine->trace_irq_req == NULL && engine->irq_get(engine)) + i915_gem_request_assign(&engine->trace_irq_req, req); } #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3d31d3ac589e..b37ffea8b458 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -130,9 +130,9 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_get_aperture *args = data; - struct i915_gtt *ggtt = &dev_priv->gtt; struct i915_vma *vma; size_t pinned; @@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned += vma->node.size; mutex_unlock(&dev->struct_mutex); - args->aper_size = dev_priv->gtt.base.total; + args->aper_size = ggtt->base.total; args->aper_available_size = args->aper_size - pinned; return 0; @@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) drm_clflush_virt_range(vaddr, PAGE_SIZE); kunmap_atomic(src); - page_cache_release(page); + put_page(page); vaddr += PAGE_SIZE; } @@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj) set_page_dirty(page); if (obj->madv == I915_MADV_WILLNEED) mark_page_accessed(page); - page_cache_release(page); + put_page(page); vaddr += PAGE_SIZE; } obj->dirty = 0; @@ -765,7 +765,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_i915_gem_pwrite *args, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; ssize_t remain; loff_t offset, page_base; char __user *user_data; @@ -807,7 +808,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, * source page isn't available. Return the error and we'll * retry in the slow path. */ - if (fast_user_write(dev_priv->gtt.mappable, page_base, + if (fast_user_write(ggtt->mappable, page_base, page_offset, user_data, page_length)) { ret = -EFAULT; goto out_flush; @@ -1141,9 +1142,9 @@ static void fake_irq(unsigned long data) } static bool missed_irq(struct drm_i915_private *dev_priv, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings); + return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings); } static unsigned long local_clock_us(unsigned *cpu) @@ -1193,7 +1194,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state) * takes to sleep on a request, on the order of a microsecond. */ - if (req->ring->irq_refcount) + if (req->engine->irq_refcount) return -EBUSY; /* Only spin if we know the GPU is processing this request */ @@ -1243,11 +1244,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req, s64 *timeout, struct intel_rps_client *rps) { - struct intel_engine_cs *ring = i915_gem_request_get_ring(req); - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = i915_gem_request_get_engine(req); + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; const bool irq_test_in_progress = - ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring); + ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine); int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; DEFINE_WAIT(wait); unsigned long timeout_expire; @@ -1288,7 +1289,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, if (ret == 0) goto out; - if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) { + if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) { ret = -ENODEV; goto out; } @@ -1296,7 +1297,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req, for (;;) { struct timer_list timer; - prepare_to_wait(&ring->irq_queue, &wait, state); + prepare_to_wait(&engine->irq_queue, &wait, state); /* We need to check whether any gpu reset happened in between * the caller grabbing the seqno and now ... */ @@ -1325,11 +1326,11 @@ int __i915_wait_request(struct drm_i915_gem_request *req, } timer.function = NULL; - if (timeout || missed_irq(dev_priv, ring)) { + if (timeout || missed_irq(dev_priv, engine)) { unsigned long expire; setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); - expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire; + expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire; mod_timer(&timer, expire); } @@ -1341,9 +1342,9 @@ int __i915_wait_request(struct drm_i915_gem_request *req, } } if (!irq_test_in_progress) - ring->irq_put(ring); + engine->irq_put(engine); - finish_wait(&ring->irq_queue, &wait); + finish_wait(&engine->irq_queue, &wait); out: trace_i915_gem_request_wait_end(req); @@ -1370,7 +1371,6 @@ out: int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, struct drm_file *file) { - struct drm_i915_private *dev_private; struct drm_i915_file_private *file_priv; WARN_ON(!req || !file || req->file_priv); @@ -1381,7 +1381,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, if (req->file_priv) return -EINVAL; - dev_private = req->ring->dev->dev_private; file_priv = file->driver_priv; spin_lock(&file_priv->mm.lock); @@ -1434,7 +1433,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request) static void __i915_gem_request_retire__upto(struct drm_i915_gem_request *req) { - struct intel_engine_cs *engine = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *tmp; lockdep_assert_held(&engine->dev->struct_mutex); @@ -1466,7 +1465,7 @@ i915_wait_request(struct drm_i915_gem_request *req) BUG_ON(req == NULL); - dev = req->ring->dev; + dev = req->engine->dev; dev_priv = dev->dev_private; interruptible = dev_priv->mm.interruptible; @@ -1505,14 +1504,14 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, if (ret) return ret; - i = obj->last_write_req->ring->id; + i = obj->last_write_req->engine->id; if (obj->last_read_req[i] == obj->last_write_req) i915_gem_object_retire__read(obj, i); else i915_gem_object_retire__write(obj); } } else { - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = 0; i < I915_NUM_ENGINES; i++) { if (obj->last_read_req[i] == NULL) continue; @@ -1532,7 +1531,7 @@ static void i915_gem_object_retire_request(struct drm_i915_gem_object *obj, struct drm_i915_gem_request *req) { - int ring = req->ring->id; + int ring = req->engine->id; if (obj->last_read_req[ring] == req) i915_gem_object_retire__read(obj, ring); @@ -1552,7 +1551,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_request *requests[I915_NUM_RINGS]; + struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; unsigned reset_counter; int ret, i, n = 0; @@ -1577,7 +1576,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj, requests[n++] = i915_gem_request_reference(req); } else { - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = 0; i < I915_NUM_ENGINES; i++) { struct drm_i915_gem_request *req; req = obj->last_read_req[i]; @@ -1792,7 +1791,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt_view view = i915_ggtt_view_normal; pgoff_t page_offset; unsigned long pfn; @@ -1827,7 +1827,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } /* Use a partial view if the object is bigger than the aperture. */ - if (obj->base.size >= dev_priv->gtt.mappable_end && + if (obj->base.size >= ggtt->mappable_end && obj->tiling_mode == I915_TILING_NONE) { static const unsigned int chunk_size = 256; // 1 MiB @@ -1855,7 +1855,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) goto unpin; /* Finally, remap it using the new GTT offset */ - pfn = dev_priv->gtt.mappable_base + + pfn = ggtt->mappable_base + i915_gem_obj_ggtt_offset_view(obj, &view); pfn >>= PAGE_SHIFT; @@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) if (obj->madv == I915_MADV_WILLNEED) mark_page_accessed(page); - page_cache_release(page); + put_page(page); } obj->dirty = 0; @@ -2232,6 +2232,14 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) * lists early. */ list_del(&obj->global_list); + if (obj->mapping) { + if (is_vmalloc_addr(obj->mapping)) + vunmap(obj->mapping); + else + kunmap(kmap_to_page(obj->mapping)); + obj->mapping = NULL; + } + ops->put_pages(obj); obj->pages = NULL; @@ -2346,7 +2354,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) err_pages: sg_mark_end(sg); for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) - page_cache_release(sg_page_iter_page(&sg_iter)); + put_page(sg_page_iter_page(&sg_iter)); sg_free_table(st); kfree(st); @@ -2400,21 +2408,64 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj) return 0; } +void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) +{ + int ret; + + lockdep_assert_held(&obj->base.dev->struct_mutex); + + ret = i915_gem_object_get_pages(obj); + if (ret) + return ERR_PTR(ret); + + i915_gem_object_pin_pages(obj); + + if (obj->mapping == NULL) { + struct page **pages; + + pages = NULL; + if (obj->base.size == PAGE_SIZE) + obj->mapping = kmap(sg_page(obj->pages->sgl)); + else + pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT, + sizeof(*pages), + GFP_TEMPORARY); + if (pages != NULL) { + struct sg_page_iter sg_iter; + int n; + + n = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, + obj->pages->nents, 0) + pages[n++] = sg_page_iter_page(&sg_iter); + + obj->mapping = vmap(pages, n, 0, PAGE_KERNEL); + drm_free_large(pages); + } + if (obj->mapping == NULL) { + i915_gem_object_unpin_pages(obj); + return ERR_PTR(-ENOMEM); + } + } + + return obj->mapping; +} + void i915_vma_move_to_active(struct i915_vma *vma, struct drm_i915_gem_request *req) { struct drm_i915_gem_object *obj = vma->obj; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; - ring = i915_gem_request_get_ring(req); + engine = i915_gem_request_get_engine(req); /* Add a reference if we're newly entering the active list. */ if (obj->active == 0) drm_gem_object_reference(&obj->base); - obj->active |= intel_ring_flag(ring); + obj->active |= intel_engine_flag(engine); - list_move_tail(&obj->ring_list[ring->id], &ring->active_list); - i915_gem_request_assign(&obj->last_read_req[ring->id], req); + list_move_tail(&obj->engine_list[engine->id], &engine->active_list); + i915_gem_request_assign(&obj->last_read_req[engine->id], req); list_move_tail(&vma->vm_link, &vma->vm->active_list); } @@ -2423,7 +2474,7 @@ static void i915_gem_object_retire__write(struct drm_i915_gem_object *obj) { RQ_BUG_ON(obj->last_write_req == NULL); - RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring))); + RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine))); i915_gem_request_assign(&obj->last_write_req, NULL); intel_fb_obj_flush(obj, true, ORIGIN_CS); @@ -2437,10 +2488,10 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring) RQ_BUG_ON(obj->last_read_req[ring] == NULL); RQ_BUG_ON(!(obj->active & (1 << ring))); - list_del_init(&obj->ring_list[ring]); + list_del_init(&obj->engine_list[ring]); i915_gem_request_assign(&obj->last_read_req[ring], NULL); - if (obj->last_write_req && obj->last_write_req->ring->id == ring) + if (obj->last_write_req && obj->last_write_req->engine->id == ring) i915_gem_object_retire__write(obj); obj->active &= ~(1 << ring); @@ -2467,24 +2518,20 @@ static int i915_gem_init_seqno(struct drm_device *dev, u32 seqno) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int ret, i, j; + struct intel_engine_cs *engine; + int ret; /* Carefully retire all requests without writing to the rings */ - for_each_ring(ring, dev_priv, i) { - ret = intel_ring_idle(ring); + for_each_engine(engine, dev_priv) { + ret = intel_engine_idle(engine); if (ret) return ret; } i915_gem_retire_requests(dev); /* Finally reset hw state */ - for_each_ring(ring, dev_priv, i) { - intel_ring_init_seqno(ring, seqno); - - for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++) - ring->semaphore.sync_seqno[j] = 0; - } + for_each_engine(engine, dev_priv) + intel_ring_init_seqno(engine, seqno); return 0; } @@ -2542,7 +2589,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, struct drm_i915_gem_object *obj, bool flush_caches) { - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_i915_private *dev_priv; struct intel_ringbuffer *ringbuf; u32 request_start; @@ -2551,8 +2598,8 @@ void __i915_add_request(struct drm_i915_gem_request *request, if (WARN_ON(request == NULL)) return; - ring = request->ring; - dev_priv = ring->dev->dev_private; + engine = request->engine; + dev_priv = request->i915; ringbuf = request->ringbuf; /* @@ -2579,6 +2626,28 @@ void __i915_add_request(struct drm_i915_gem_request *request, WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret); } + trace_i915_gem_request_add(request); + + request->head = request_start; + + /* Whilst this request exists, batch_obj will be on the + * active_list, and so will hold the active reference. Only when this + * request is retired will the the batch_obj be moved onto the + * inactive_list and lose its active reference. Hence we do not need + * to explicitly hold another reference here. + */ + request->batch_obj = obj; + + /* Seal the request and mark it as pending execution. Note that + * we may inspect this state, without holding any locks, during + * hangcheck. Hence we apply the barrier to ensure that we do not + * see a more recent value in the hws than we are tracking. + */ + request->emitted_jiffies = jiffies; + request->previous_seqno = engine->last_submitted_seqno; + smp_store_mb(engine->last_submitted_seqno, request->seqno); + list_add_tail(&request->list, &engine->request_list); + /* Record the position of the start of the request so that * should we detect the updated seqno part-way through the * GPU processing the request, we never over-estimate the @@ -2587,33 +2656,16 @@ void __i915_add_request(struct drm_i915_gem_request *request, request->postfix = intel_ring_get_tail(ringbuf); if (i915.enable_execlists) - ret = ring->emit_request(request); + ret = engine->emit_request(request); else { - ret = ring->add_request(request); + ret = engine->add_request(request); request->tail = intel_ring_get_tail(ringbuf); } /* Not allowed to fail! */ WARN(ret, "emit|add_request failed: %d!\n", ret); - request->head = request_start; - - /* Whilst this request exists, batch_obj will be on the - * active_list, and so will hold the active reference. Only when this - * request is retired will the the batch_obj be moved onto the - * inactive_list and lose its active reference. Hence we do not need - * to explicitly hold another reference here. - */ - request->batch_obj = obj; - - request->emitted_jiffies = jiffies; - request->previous_seqno = ring->last_submitted_seqno; - ring->last_submitted_seqno = request->seqno; - list_add_tail(&request->list, &ring->request_list); - - trace_i915_gem_request_add(request); - - i915_queue_hangcheck(ring->dev); + i915_queue_hangcheck(engine->dev); queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, @@ -2680,7 +2732,7 @@ void i915_gem_request_free(struct kref *req_ref) if (ctx) { if (i915.enable_execlists && ctx != req->i915->kernel_context) - intel_lr_context_unpin(ctx, req->ring); + intel_lr_context_unpin(ctx, req->engine); i915_gem_context_unreference(ctx); } @@ -2689,11 +2741,11 @@ void i915_gem_request_free(struct kref *req_ref) } static inline int -__i915_gem_request_alloc(struct intel_engine_cs *ring, +__i915_gem_request_alloc(struct intel_engine_cs *engine, struct intel_context *ctx, struct drm_i915_gem_request **req_out) { - struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct drm_i915_private *dev_priv = to_i915(engine->dev); struct drm_i915_gem_request *req; int ret; @@ -2706,13 +2758,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *ring, if (req == NULL) return -ENOMEM; - ret = i915_gem_get_seqno(ring->dev, &req->seqno); + ret = i915_gem_get_seqno(engine->dev, &req->seqno); if (ret) goto err; kref_init(&req->ref); req->i915 = dev_priv; - req->ring = ring; + req->engine = engine; req->ctx = ctx; i915_gem_context_reference(req->ctx); @@ -2787,11 +2839,11 @@ void i915_gem_request_cancel(struct drm_i915_gem_request *req) } struct drm_i915_gem_request * -i915_gem_find_active_request(struct intel_engine_cs *ring) +i915_gem_find_active_request(struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; - list_for_each_entry(request, &ring->request_list, list) { + list_for_each_entry(request, &engine->request_list, list) { if (i915_gem_request_completed(request, false)) continue; @@ -2801,38 +2853,38 @@ i915_gem_find_active_request(struct intel_engine_cs *ring) return NULL; } -static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv, - struct intel_engine_cs *ring) +static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) { struct drm_i915_gem_request *request; bool ring_hung; - request = i915_gem_find_active_request(ring); + request = i915_gem_find_active_request(engine); if (request == NULL) return; - ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; + ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; i915_set_reset_status(dev_priv, request->ctx, ring_hung); - list_for_each_entry_continue(request, &ring->request_list, list) + list_for_each_entry_continue(request, &engine->request_list, list) i915_set_reset_status(dev_priv, request->ctx, false); } -static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, - struct intel_engine_cs *ring) +static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, + struct intel_engine_cs *engine) { struct intel_ringbuffer *buffer; - while (!list_empty(&ring->active_list)) { + while (!list_empty(&engine->active_list)) { struct drm_i915_gem_object *obj; - obj = list_first_entry(&ring->active_list, + obj = list_first_entry(&engine->active_list, struct drm_i915_gem_object, - ring_list[ring->id]); + engine_list[engine->id]); - i915_gem_object_retire__read(obj, ring->id); + i915_gem_object_retire__read(obj, engine->id); } /* @@ -2842,14 +2894,16 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, */ if (i915.enable_execlists) { - spin_lock_irq(&ring->execlist_lock); + /* Ensure irq handler finishes or is cancelled. */ + tasklet_kill(&engine->irq_tasklet); + spin_lock_bh(&engine->execlist_lock); /* list_splice_tail_init checks for empty lists */ - list_splice_tail_init(&ring->execlist_queue, - &ring->execlist_retired_req_list); + list_splice_tail_init(&engine->execlist_queue, + &engine->execlist_retired_req_list); + spin_unlock_bh(&engine->execlist_lock); - spin_unlock_irq(&ring->execlist_lock); - intel_execlists_retire_requests(ring); + intel_execlists_retire_requests(engine); } /* @@ -2859,10 +2913,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, * implicit references on things like e.g. ppgtt address spaces through * the request. */ - while (!list_empty(&ring->request_list)) { + while (!list_empty(&engine->request_list)) { struct drm_i915_gem_request *request; - request = list_first_entry(&ring->request_list, + request = list_first_entry(&engine->request_list, struct drm_i915_gem_request, list); @@ -2876,28 +2930,29 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, * upon reset is less than when we start. Do one more pass over * all the ringbuffers to reset last_retired_head. */ - list_for_each_entry(buffer, &ring->buffers, link) { + list_for_each_entry(buffer, &engine->buffers, link) { buffer->last_retired_head = buffer->tail; intel_ring_update_space(buffer); } + + intel_ring_init_seqno(engine, engine->last_submitted_seqno); } void i915_gem_reset(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; /* * Before we free the objects from the requests, we need to inspect * them for finding the guilty party. As the requests only borrow * their reference to the objects, the inspection must be done first. */ - for_each_ring(ring, dev_priv, i) - i915_gem_reset_ring_status(dev_priv, ring); + for_each_engine(engine, dev_priv) + i915_gem_reset_engine_status(dev_priv, engine); - for_each_ring(ring, dev_priv, i) - i915_gem_reset_ring_cleanup(dev_priv, ring); + for_each_engine(engine, dev_priv) + i915_gem_reset_engine_cleanup(dev_priv, engine); i915_gem_context_reset(dev); @@ -2910,19 +2965,19 @@ void i915_gem_reset(struct drm_device *dev) * This function clears the request list as sequence numbers are passed. */ void -i915_gem_retire_requests_ring(struct intel_engine_cs *ring) +i915_gem_retire_requests_ring(struct intel_engine_cs *engine) { - WARN_ON(i915_verify_lists(ring->dev)); + WARN_ON(i915_verify_lists(engine->dev)); /* Retire requests first as we use it above for the early return. * If we retire requests last, we may use a later seqno and so clear * the requests lists without clearing the active list, leading to * confusion. */ - while (!list_empty(&ring->request_list)) { + while (!list_empty(&engine->request_list)) { struct drm_i915_gem_request *request; - request = list_first_entry(&ring->request_list, + request = list_first_entry(&engine->request_list, struct drm_i915_gem_request, list); @@ -2936,45 +2991,44 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) * by the ringbuffer to the flushing/inactive lists as appropriate, * before we free the context associated with the requests. */ - while (!list_empty(&ring->active_list)) { + while (!list_empty(&engine->active_list)) { struct drm_i915_gem_object *obj; - obj = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list[ring->id]); + obj = list_first_entry(&engine->active_list, + struct drm_i915_gem_object, + engine_list[engine->id]); - if (!list_empty(&obj->last_read_req[ring->id]->list)) + if (!list_empty(&obj->last_read_req[engine->id]->list)) break; - i915_gem_object_retire__read(obj, ring->id); + i915_gem_object_retire__read(obj, engine->id); } - if (unlikely(ring->trace_irq_req && - i915_gem_request_completed(ring->trace_irq_req, true))) { - ring->irq_put(ring); - i915_gem_request_assign(&ring->trace_irq_req, NULL); + if (unlikely(engine->trace_irq_req && + i915_gem_request_completed(engine->trace_irq_req, true))) { + engine->irq_put(engine); + i915_gem_request_assign(&engine->trace_irq_req, NULL); } - WARN_ON(i915_verify_lists(ring->dev)); + WARN_ON(i915_verify_lists(engine->dev)); } bool i915_gem_retire_requests(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; bool idle = true; - int i; - for_each_ring(ring, dev_priv, i) { - i915_gem_retire_requests_ring(ring); - idle &= list_empty(&ring->request_list); + for_each_engine(engine, dev_priv) { + i915_gem_retire_requests_ring(engine); + idle &= list_empty(&engine->request_list); if (i915.enable_execlists) { - spin_lock_irq(&ring->execlist_lock); - idle &= list_empty(&ring->execlist_queue); - spin_unlock_irq(&ring->execlist_lock); + spin_lock_bh(&engine->execlist_lock); + idle &= list_empty(&engine->execlist_queue); + spin_unlock_bh(&engine->execlist_lock); - intel_execlists_retire_requests(ring); + intel_execlists_retire_requests(engine); } } @@ -3011,25 +3065,21 @@ i915_gem_idle_work_handler(struct work_struct *work) struct drm_i915_private *dev_priv = container_of(work, typeof(*dev_priv), mm.idle_work.work); struct drm_device *dev = dev_priv->dev; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, i) - if (!list_empty(&ring->request_list)) + for_each_engine(engine, dev_priv) + if (!list_empty(&engine->request_list)) return; /* we probably should sync with hangcheck here, using cancel_work_sync. - * Also locking seems to be fubar here, ring->request_list is protected + * Also locking seems to be fubar here, engine->request_list is protected * by dev->struct_mutex. */ intel_mark_idle(dev); if (mutex_trylock(&dev->struct_mutex)) { - struct intel_engine_cs *ring; - int i; - - for_each_ring(ring, dev_priv, i) - i915_gem_batch_pool_fini(&ring->batch_pool); + for_each_engine(engine, dev_priv) + i915_gem_batch_pool_fini(&engine->batch_pool); mutex_unlock(&dev->struct_mutex); } @@ -3048,7 +3098,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj) if (!obj->active) return 0; - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = 0; i < I915_NUM_ENGINES; i++) { struct drm_i915_gem_request *req; req = obj->last_read_req[i]; @@ -3096,7 +3146,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_wait *args = data; struct drm_i915_gem_object *obj; - struct drm_i915_gem_request *req[I915_NUM_RINGS]; + struct drm_i915_gem_request *req[I915_NUM_ENGINES]; unsigned reset_counter; int i, n = 0; int ret; @@ -3133,7 +3183,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) drm_gem_object_unreference(&obj->base); reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = 0; i < I915_NUM_ENGINES; i++) { if (obj->last_read_req[i] == NULL) continue; @@ -3166,7 +3216,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_engine_cs *from; int ret; - from = i915_gem_request_get_ring(from_req); + from = i915_gem_request_get_engine(from_req); if (to == from) return 0; @@ -3260,7 +3310,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, struct drm_i915_gem_request **to_req) { const bool readonly = obj->base.pending_write_domain == 0; - struct drm_i915_gem_request *req[I915_NUM_RINGS]; + struct drm_i915_gem_request *req[I915_NUM_ENGINES]; int ret, i, n; if (!obj->active) @@ -3274,7 +3324,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj, if (obj->last_write_req) req[n++] = obj->last_write_req; } else { - for (i = 0; i < I915_NUM_RINGS; i++) + for (i = 0; i < I915_NUM_ENGINES; i++) if (obj->last_read_req[i]) req[n++] = obj->last_read_req[i]; } @@ -3391,15 +3441,15 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma) int i915_gpu_idle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int ret, i; + struct intel_engine_cs *engine; + int ret; /* Flush everything onto the inactive list. */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { if (!i915.enable_execlists) { struct drm_i915_gem_request *req; - req = i915_gem_request_alloc(ring, NULL); + req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) return PTR_ERR(req); @@ -3412,7 +3462,7 @@ int i915_gpu_idle(struct drm_device *dev) i915_add_request_no_flush(req); } - ret = intel_ring_idle(ring); + ret = intel_engine_idle(engine); if (ret) return ret; } @@ -3466,7 +3516,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, uint64_t flags) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; u32 fence_alignment, unfenced_alignment; u32 search_flag, alloc_flag; u64 start, end; @@ -3513,7 +3564,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; end = vm->total; if (flags & PIN_MAPPABLE) - end = min_t(u64, end, dev_priv->gtt.mappable_end); + end = min_t(u64, end, ggtt->mappable_end); if (flags & PIN_ZONE_4G) end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); @@ -3720,6 +3771,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) { + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; uint32_t old_write_domain, old_read_domains; struct i915_vma *vma; int ret; @@ -3774,7 +3828,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) vma = i915_gem_obj_to_ggtt(obj); if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) list_move_tail(&vma->vm_link, - &to_i915(obj->base.dev)->gtt.base.inactive_list); + &ggtt->base.inactive_list); return 0; } @@ -3949,7 +4003,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, * cacheline, whereas normally such cachelines would get * invalidated. */ - if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) + if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) return -ENODEV; level = I915_CACHE_LLC; @@ -4211,7 +4265,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) (vma->node.start & (fence_alignment - 1)) == 0); mappable = (vma->node.start + fence_size <= - to_i915(obj->base.dev)->gtt.mappable_end); + to_i915(obj->base.dev)->ggtt.mappable_end); obj->map_and_fenceable = mappable && fenceable; } @@ -4243,9 +4297,6 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj, vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : i915_gem_obj_to_vma(obj, vm); - if (IS_ERR(vma)) - return PTR_ERR(vma); - if (vma) { if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; @@ -4308,10 +4359,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, uint32_t alignment, uint64_t flags) { - if (WARN_ONCE(!view, "no view specified")) - return -EINVAL; + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; + + BUG_ON(!view); - return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, + return i915_gem_object_do_pin(obj, &ggtt->base, view, alignment, flags | PIN_GLOBAL); } @@ -4359,15 +4413,15 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, if (obj->active) { int i; - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = 0; i < I915_NUM_ENGINES; i++) { struct drm_i915_gem_request *req; req = obj->last_read_req[i]; if (req) - args->busy |= 1 << (16 + req->ring->exec_id); + args->busy |= 1 << (16 + req->engine->exec_id); } if (obj->last_write_req) - args->busy |= obj->last_write_req->ring->exec_id; + args->busy |= obj->last_write_req->engine->exec_id; } unref: @@ -4447,8 +4501,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, int i; INIT_LIST_HEAD(&obj->global_list); - for (i = 0; i < I915_NUM_RINGS; i++) - INIT_LIST_HEAD(&obj->ring_list[i]); + for (i = 0; i < I915_NUM_ENGINES; i++) + INIT_LIST_HEAD(&obj->engine_list[i]); INIT_LIST_HEAD(&obj->obj_exec_link); INIT_LIST_HEAD(&obj->vma_list); INIT_LIST_HEAD(&obj->batch_pool_link); @@ -4623,14 +4677,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { - struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; - if (WARN_ONCE(!view, "no view specified")) - return ERR_PTR(-EINVAL); + BUG_ON(!view); list_for_each_entry(vma, &obj->vma_list, obj_link) - if (vma->vm == ggtt && + if (vma->vm == &ggtt->base && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma; return NULL; @@ -4653,14 +4708,13 @@ void i915_gem_vma_destroy(struct i915_vma *vma) } static void -i915_gem_stop_ringbuffers(struct drm_device *dev) +i915_gem_stop_engines(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, i) - dev_priv->gt.stop_ring(ring); + for_each_engine(engine, dev_priv) + dev_priv->gt.stop_engine(engine); } int @@ -4676,7 +4730,7 @@ i915_gem_suspend(struct drm_device *dev) i915_gem_retire_requests(dev); - i915_gem_stop_ringbuffers(dev); + i915_gem_stop_engines(dev); mutex_unlock(&dev->struct_mutex); cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); @@ -4697,8 +4751,8 @@ err: int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) { - struct intel_engine_cs *ring = req->ring; - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = req->engine; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; u32 *remap_info = dev_priv->l3_parity.remap_info[slice]; int i, ret; @@ -4716,12 +4770,12 @@ int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice) * at initialization time. */ for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i)); - intel_ring_emit(ring, remap_info[i]); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i)); + intel_ring_emit(engine, remap_info[i]); } - intel_ring_advance(ring); + intel_ring_advance(engine); return ret; } @@ -4778,7 +4832,7 @@ static void init_unused_rings(struct drm_device *dev) } } -int i915_gem_init_rings(struct drm_device *dev) +int i915_gem_init_engines(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -4814,13 +4868,13 @@ int i915_gem_init_rings(struct drm_device *dev) return 0; cleanup_vebox_ring: - intel_cleanup_ring_buffer(&dev_priv->ring[VECS]); + intel_cleanup_engine(&dev_priv->engine[VECS]); cleanup_blt_ring: - intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); + intel_cleanup_engine(&dev_priv->engine[BCS]); cleanup_bsd_ring: - intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); + intel_cleanup_engine(&dev_priv->engine[VCS]); cleanup_render_ring: - intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); + intel_cleanup_engine(&dev_priv->engine[RCS]); return ret; } @@ -4829,8 +4883,8 @@ int i915_gem_init_hw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int ret, i, j; + struct intel_engine_cs *engine; + int ret, j; if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; @@ -4876,8 +4930,8 @@ i915_gem_init_hw(struct drm_device *dev) } /* Need to do basic initialisation of all rings first: */ - for_each_ring(ring, dev_priv, i) { - ret = ring->init_hw(ring); + for_each_engine(engine, dev_priv) { + ret = engine->init_hw(engine); if (ret) goto out; } @@ -4901,34 +4955,36 @@ i915_gem_init_hw(struct drm_device *dev) goto out; /* Now it is safe to go back round and do everything else: */ - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_request *req; - req = i915_gem_request_alloc(ring, NULL); + req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) { ret = PTR_ERR(req); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); goto out; } - if (ring->id == RCS) { + if (engine->id == RCS) { for (j = 0; j < NUM_L3_SLICES(dev); j++) i915_gem_l3_remap(req, j); } ret = i915_ppgtt_init_ring(req); if (ret && ret != -EIO) { - DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret); + DRM_ERROR("PPGTT enable %s failed %d\n", + engine->name, ret); i915_gem_request_cancel(req); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); goto out; } ret = i915_gem_context_enable(req); if (ret && ret != -EIO) { - DRM_ERROR("Context enable ring #%d failed %d\n", i, ret); + DRM_ERROR("Context enable %s failed %d\n", + engine->name, ret); i915_gem_request_cancel(req); - i915_gem_cleanup_ringbuffer(dev); + i915_gem_cleanup_engines(dev); goto out; } @@ -4952,14 +5008,14 @@ int i915_gem_init(struct drm_device *dev) if (!i915.enable_execlists) { dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission; - dev_priv->gt.init_rings = i915_gem_init_rings; - dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer; - dev_priv->gt.stop_ring = intel_stop_ring_buffer; + dev_priv->gt.init_engines = i915_gem_init_engines; + dev_priv->gt.cleanup_engine = intel_cleanup_engine; + dev_priv->gt.stop_engine = intel_stop_engine; } else { dev_priv->gt.execbuf_submit = intel_execlists_submission; - dev_priv->gt.init_rings = intel_logical_rings_init; - dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup; - dev_priv->gt.stop_ring = intel_logical_ring_stop; + dev_priv->gt.init_engines = intel_logical_rings_init; + dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup; + dev_priv->gt.stop_engine = intel_logical_ring_stop; } /* This is just a security blanket to placate dragons. @@ -4974,13 +5030,13 @@ int i915_gem_init(struct drm_device *dev) if (ret) goto out_unlock; - i915_gem_init_global_gtt(dev); + i915_gem_init_ggtt(dev); ret = i915_gem_context_init(dev); if (ret) goto out_unlock; - ret = dev_priv->gt.init_rings(dev); + ret = dev_priv->gt.init_engines(dev); if (ret) goto out_unlock; @@ -5003,29 +5059,52 @@ out_unlock: } void -i915_gem_cleanup_ringbuffer(struct drm_device *dev) +i915_gem_cleanup_engines(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, i) - dev_priv->gt.cleanup_ring(ring); + for_each_engine(engine, dev_priv) + dev_priv->gt.cleanup_engine(engine); - if (i915.enable_execlists) - /* - * Neither the BIOS, ourselves or any other kernel - * expects the system to be in execlists mode on startup, - * so we need to reset the GPU back to legacy mode. - */ - intel_gpu_reset(dev); + if (i915.enable_execlists) + /* + * Neither the BIOS, ourselves or any other kernel + * expects the system to be in execlists mode on startup, + * so we need to reset the GPU back to legacy mode. + */ + intel_gpu_reset(dev, ALL_ENGINES); } static void -init_ring_lists(struct intel_engine_cs *ring) +init_engine_lists(struct intel_engine_cs *engine) +{ + INIT_LIST_HEAD(&engine->active_list); + INIT_LIST_HEAD(&engine->request_list); +} + +void +i915_gem_load_init_fences(struct drm_i915_private *dev_priv) { - INIT_LIST_HEAD(&ring->active_list); - INIT_LIST_HEAD(&ring->request_list); + struct drm_device *dev = dev_priv->dev; + + if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && + !IS_CHERRYVIEW(dev_priv)) + dev_priv->num_fence_regs = 32; + else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) || + IS_I945GM(dev_priv) || IS_G33(dev_priv)) + dev_priv->num_fence_regs = 16; + else + dev_priv->num_fence_regs = 8; + + if (intel_vgpu_active(dev)) + dev_priv->num_fence_regs = + I915_READ(vgtif_reg(avail_rs.fence_num)); + + /* Initialize fence registers to zero */ + i915_gem_restore_fences(dev); + + i915_gem_detect_bit_6_swizzle(dev); } void @@ -5055,8 +5134,8 @@ i915_gem_load_init(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.unbound_list); INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); - for (i = 0; i < I915_NUM_RINGS; i++) - init_ring_lists(&dev_priv->ring[i]); + for (i = 0; i < I915_NUM_ENGINES; i++) + init_engine_lists(&dev_priv->engine[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); INIT_DELAYED_WORK(&dev_priv->mm.retire_work, @@ -5067,17 +5146,6 @@ i915_gem_load_init(struct drm_device *dev) dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; - if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) - dev_priv->num_fence_regs = 32; - else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) - dev_priv->num_fence_regs = 16; - else - dev_priv->num_fence_regs = 8; - - if (intel_vgpu_active(dev)) - dev_priv->num_fence_regs = - I915_READ(vgtif_reg(avail_rs.fence_num)); - /* * Set initial sequence number for requests. * Using this number allows the wraparound to happen early, @@ -5086,11 +5154,8 @@ i915_gem_load_init(struct drm_device *dev) dev_priv->next_seqno = ((u32)~0 - 0x1100); dev_priv->last_seqno = ((u32)~0 - 0x1101); - /* Initialize fence registers to zero */ INIT_LIST_HEAD(&dev_priv->mm.fence_list); - i915_gem_restore_fences(dev); - i915_gem_detect_bit_6_swizzle(dev); init_waitqueue_head(&dev_priv->pending_flip_queue); dev_priv->mm.interruptible = true; @@ -5213,11 +5278,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct i915_address_space *ggtt = i915_obj_to_ggtt(o); + struct drm_i915_private *dev_priv = to_i915(o->base.dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == ggtt && + if (vma->vm == &ggtt->base && i915_ggtt_view_equal(&vma->ggtt_view, view)) return vma->node.start; @@ -5244,11 +5310,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view) { - struct i915_address_space *ggtt = i915_obj_to_ggtt(o); + struct drm_i915_private *dev_priv = to_i915(o->base.dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, obj_link) - if (vma->vm == ggtt && + if (vma->vm == &ggtt->base && i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 5dd84e148bba..91028d9c6269 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -342,15 +342,15 @@ void i915_gem_context_reset(struct drm_device *dev) struct intel_context *ctx; list_for_each_entry(ctx, &dev_priv->context_list, link) - intel_lr_context_reset(dev, ctx); + intel_lr_context_reset(dev_priv, ctx); } - for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct intel_engine_cs *engine = &dev_priv->engine[i]; - if (ring->last_context) { - i915_gem_context_unpin(ring->last_context, ring); - ring->last_context = NULL; + if (engine->last_context) { + i915_gem_context_unpin(engine->last_context, engine); + engine->last_context = NULL; } } @@ -413,7 +413,7 @@ void i915_gem_context_fini(struct drm_device *dev) /* The only known way to stop the gpu from accessing the hw context is * to reset it. Do this as the very last operation to avoid confusing * other code, leading to spurious errors. */ - intel_gpu_reset(dev); + intel_gpu_reset(dev, ALL_ENGINES); /* When default context is created and switched to, base object refcount * will be 2 (+1 from object creation and +1 from do_switch()). @@ -421,17 +421,17 @@ void i915_gem_context_fini(struct drm_device *dev) * to default context. So we need to unreference the base object once * to offset the do_switch part, so that i915_gem_context_unreference() * can then free the base object correctly. */ - WARN_ON(!dev_priv->ring[RCS].last_context); + WARN_ON(!dev_priv->engine[RCS].last_context); i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); } - for (i = I915_NUM_RINGS; --i >= 0;) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; + for (i = I915_NUM_ENGINES; --i >= 0;) { + struct intel_engine_cs *engine = &dev_priv->engine[i]; - if (ring->last_context) { - i915_gem_context_unpin(ring->last_context, ring); - ring->last_context = NULL; + if (engine->last_context) { + i915_gem_context_unpin(engine->last_context, engine); + engine->last_context = NULL; } } @@ -441,14 +441,14 @@ void i915_gem_context_fini(struct drm_device *dev) int i915_gem_context_enable(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; if (i915.enable_execlists) { - if (ring->init_context == NULL) + if (engine->init_context == NULL) return 0; - ret = ring->init_context(req); + ret = engine->init_context(req); } else ret = i915_switch_context(req); @@ -510,35 +510,35 @@ i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id) static inline int mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; u32 flags = hw_flags | MI_MM_SPACE_GTT; const int num_rings = /* Use an extended w/a on ivb+ if signalling from other rings */ - i915_semaphore_is_enabled(ring->dev) ? - hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : + i915_semaphore_is_enabled(engine->dev) ? + hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 0; - int len, i, ret; + int len, ret; /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value * explicitly, so we rely on the value at ring init, stored in * itlb_before_ctx_switch. */ - if (IS_GEN6(ring->dev)) { - ret = ring->flush(req, I915_GEM_GPU_DOMAINS, 0); + if (IS_GEN6(engine->dev)) { + ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); if (ret) return ret; } /* These flags are for resource streamer on HSW+ */ - if (IS_HASWELL(ring->dev) || INTEL_INFO(ring->dev)->gen >= 8) + if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); - else if (INTEL_INFO(ring->dev)->gen < 8) + else if (INTEL_INFO(engine->dev)->gen < 8) flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); len = 4; - if (INTEL_INFO(ring->dev)->gen >= 7) + if (INTEL_INFO(engine->dev)->gen >= 7) len += 2 + (num_rings ? 4*num_rings + 2 : 0); ret = intel_ring_begin(req, len); @@ -546,54 +546,61 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) return ret; /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ - if (INTEL_INFO(ring->dev)->gen >= 7) { - intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); + if (INTEL_INFO(engine->dev)->gen >= 7) { + intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); if (num_rings) { struct intel_engine_cs *signaller; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_ring(signaller, to_i915(ring->dev), i) { - if (signaller == ring) + intel_ring_emit(engine, + MI_LOAD_REGISTER_IMM(num_rings)); + for_each_engine(signaller, to_i915(engine->dev)) { + if (signaller == engine) continue; - intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); - intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); + intel_ring_emit_reg(engine, + RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit(engine, + _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } } } - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_SET_CONTEXT); + intel_ring_emit(engine, + i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | flags); /* * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP * WaMiSetContext_Hang:snb,ivb,vlv */ - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, MI_NOOP); - if (INTEL_INFO(ring->dev)->gen >= 7) { + if (INTEL_INFO(engine->dev)->gen >= 7) { if (num_rings) { struct intel_engine_cs *signaller; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); - for_each_ring(signaller, to_i915(ring->dev), i) { - if (signaller == ring) + intel_ring_emit(engine, + MI_LOAD_REGISTER_IMM(num_rings)); + for_each_engine(signaller, to_i915(engine->dev)) { + if (signaller == engine) continue; - intel_ring_emit_reg(ring, RING_PSMI_CTL(signaller->mmio_base)); - intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); + intel_ring_emit_reg(engine, + RING_PSMI_CTL(signaller->mmio_base)); + intel_ring_emit(engine, + _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); } } - intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); + intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE); } - intel_ring_advance(ring); + intel_ring_advance(engine); return ret; } -static inline bool should_skip_switch(struct intel_engine_cs *ring, +static inline bool should_skip_switch(struct intel_engine_cs *engine, struct intel_context *from, struct intel_context *to) { @@ -601,42 +608,42 @@ static inline bool should_skip_switch(struct intel_engine_cs *ring, return false; if (to->ppgtt && from == to && - !(intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) + !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) return true; return false; } static bool -needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) +needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; if (!to->ppgtt) return false; - if (INTEL_INFO(ring->dev)->gen < 8) + if (INTEL_INFO(engine->dev)->gen < 8) return true; - if (ring != &dev_priv->ring[RCS]) + if (engine != &dev_priv->engine[RCS]) return true; return false; } static bool -needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, - u32 hw_flags) +needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to, + u32 hw_flags) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; if (!to->ppgtt) return false; - if (!IS_GEN8(ring->dev)) + if (!IS_GEN8(engine->dev)) return false; - if (ring != &dev_priv->ring[RCS]) + if (engine != &dev_priv->engine[RCS]) return false; if (hw_flags & MI_RESTORE_INHIBIT) @@ -648,25 +655,26 @@ needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, static int do_switch(struct drm_i915_gem_request *req) { struct intel_context *to = req->ctx; - struct intel_engine_cs *ring = req->ring; - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct intel_context *from = ring->last_context; + struct intel_engine_cs *engine = req->engine; + struct drm_i915_private *dev_priv = req->i915; + struct intel_context *from = engine->last_context; u32 hw_flags = 0; bool uninitialized = false; int ret, i; - if (from != NULL && ring == &dev_priv->ring[RCS]) { + if (from != NULL && engine == &dev_priv->engine[RCS]) { BUG_ON(from->legacy_hw_ctx.rcs_state == NULL); BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); } - if (should_skip_switch(ring, from, to)) + if (should_skip_switch(engine, from, to)) return 0; /* Trying to pin first makes error handling easier. */ - if (ring == &dev_priv->ring[RCS]) { + if (engine == &dev_priv->engine[RCS]) { ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, - get_context_alignment(ring->dev), 0); + get_context_alignment(engine->dev), + 0); if (ret) return ret; } @@ -676,23 +684,23 @@ static int do_switch(struct drm_i915_gem_request *req) * evict_everything - as a last ditch gtt defrag effort that also * switches to the default context. Hence we need to reload from here. */ - from = ring->last_context; + from = engine->last_context; - if (needs_pd_load_pre(ring, to)) { + if (needs_pd_load_pre(engine, to)) { /* Older GENs and non render rings still want the load first, * "PP_DCLV followed by PP_DIR_BASE register through Load * Register Immediate commands in Ring Buffer before submitting * a context."*/ - trace_switch_mm(ring, to); + trace_switch_mm(engine, to); ret = to->ppgtt->switch_mm(to->ppgtt, req); if (ret) goto unpin_out; /* Doing a PD load always reloads the page dirs */ - to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); + to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } - if (ring != &dev_priv->ring[RCS]) { + if (engine != &dev_priv->engine[RCS]) { if (from) i915_gem_context_unreference(from); goto done; @@ -717,14 +725,14 @@ static int do_switch(struct drm_i915_gem_request *req) * space. This means we must enforce that a page table load * occur when this occurs. */ } else if (to->ppgtt && - (intel_ring_flag(ring) & to->ppgtt->pd_dirty_rings)) { + (intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) { hw_flags |= MI_FORCE_RESTORE; - to->ppgtt->pd_dirty_rings &= ~intel_ring_flag(ring); + to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); } /* We should never emit switch_mm more than once */ - WARN_ON(needs_pd_load_pre(ring, to) && - needs_pd_load_post(ring, to, hw_flags)); + WARN_ON(needs_pd_load_pre(engine, to) && + needs_pd_load_post(engine, to, hw_flags)); ret = mi_set_context(req, hw_flags); if (ret) @@ -733,8 +741,8 @@ static int do_switch(struct drm_i915_gem_request *req) /* GEN8 does *not* require an explicit reload if the PDPs have been * setup, and we do not wish to move them. */ - if (needs_pd_load_post(ring, to, hw_flags)) { - trace_switch_mm(ring, to); + if (needs_pd_load_post(engine, to, hw_flags)) { + trace_switch_mm(engine, to); ret = to->ppgtt->switch_mm(to->ppgtt, req); /* The hardware context switch is emitted, but we haven't * actually changed the state - so it's probably safe to bail @@ -787,11 +795,11 @@ static int do_switch(struct drm_i915_gem_request *req) done: i915_gem_context_reference(to); - ring->last_context = to; + engine->last_context = to; if (uninitialized) { - if (ring->init_context) { - ret = ring->init_context(req); + if (engine->init_context) { + ret = engine->init_context(req); if (ret) DRM_ERROR("ring init context: %d\n", ret); } @@ -800,7 +808,7 @@ done: return 0; unpin_out: - if (ring->id == RCS) + if (engine->id == RCS) i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); return ret; } @@ -820,18 +828,18 @@ unpin_out: */ int i915_switch_context(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_engine_cs *engine = req->engine; + struct drm_i915_private *dev_priv = req->i915; WARN_ON(i915.enable_execlists); WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */ - if (req->ctx != ring->last_context) { + if (req->ctx != engine->last_context) { i915_gem_context_reference(req->ctx); - if (ring->last_context) - i915_gem_context_unreference(ring->last_context); - ring->last_context = req->ctx; + if (engine->last_context) + i915_gem_context_unreference(engine->last_context); + engine->last_context = req->ctx; } return 0; } @@ -937,7 +945,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, else if (to_i915(dev)->mm.aliasing_ppgtt) args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; else - args->value = to_i915(dev)->gtt.base.total; + args->value = to_i915(dev)->ggtt.base.total; break; default: ret = -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index 17299d04189f..a56516482394 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c @@ -36,29 +36,29 @@ i915_verify_lists(struct drm_device *dev) static int warned; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; int err = 0; - int i; if (warned) return 0; - for_each_ring(ring, dev_priv, i) { - list_for_each_entry(obj, &ring->active_list, ring_list[ring->id]) { + for_each_engine(engine, dev_priv) { + list_for_each_entry(obj, &engine->active_list, + engine_list[engine->id]) { if (obj->base.dev != dev || !atomic_read(&obj->base.refcount.refcount)) { DRM_ERROR("%s: freed active obj %p\n", - ring->name, obj); + engine->name, obj); err++; break; } else if (!obj->active || - obj->last_read_req[ring->id] == NULL) { + obj->last_read_req[engine->id] == NULL) { DRM_ERROR("%s: invalid active obj %p\n", - ring->name, obj); + engine->name, obj); err++; } else if (obj->base.write_domain) { DRM_ERROR("%s: invalid write obj %p (w %x)\n", - ring->name, + engine->name, obj, obj->base.write_domain); err++; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 0506016e18e0..80bbe43a2e92 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -95,14 +95,12 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, { struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf); - mutex_lock(&obj->base.dev->struct_mutex); - dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); kfree(sg); + mutex_lock(&obj->base.dev->struct_mutex); i915_gem_object_unpin_pages(obj); - mutex_unlock(&obj->base.dev->struct_mutex); } @@ -110,51 +108,17 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; - struct sg_page_iter sg_iter; - struct page **pages; - int ret, i; + void *addr; + int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) return ERR_PTR(ret); - if (obj->dma_buf_vmapping) { - obj->vmapping_count++; - goto out_unlock; - } - - ret = i915_gem_object_get_pages(obj); - if (ret) - goto err; - - i915_gem_object_pin_pages(obj); - - ret = -ENOMEM; - - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); - if (pages == NULL) - goto err_unpin; - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) - pages[i++] = sg_page_iter_page(&sg_iter); - - obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); - drm_free_large(pages); - - if (!obj->dma_buf_vmapping) - goto err_unpin; - - obj->vmapping_count = 1; -out_unlock: + addr = i915_gem_object_pin_map(obj); mutex_unlock(&dev->struct_mutex); - return obj->dma_buf_vmapping; -err_unpin: - i915_gem_object_unpin_pages(obj); -err: - mutex_unlock(&dev->struct_mutex); - return ERR_PTR(ret); + return addr; } static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) @@ -163,12 +127,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) struct drm_device *dev = obj->base.dev; mutex_lock(&dev->struct_mutex); - if (--obj->vmapping_count == 0) { - vunmap(obj->dma_buf_vmapping); - obj->dma_buf_vmapping = NULL; - - i915_gem_object_unpin_pages(obj); - } + i915_gem_object_unpin_map(obj); mutex_unlock(&dev->struct_mutex); } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1328bc5021b4..6ee4f00f620c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, uint64_t target_offset) { struct drm_device *dev = obj->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; uint64_t delta = relocation_target(reloc, target_offset); uint64_t offset; void __iomem *reloc_page; @@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, /* Map the page containing the relocation we're going to perform. */ offset = i915_gem_obj_ggtt_offset(obj); offset += reloc->offset; - reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + reloc_page = io_mapping_map_atomic_wc(ggtt->mappable, offset & PAGE_MASK); iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); @@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, if (offset_in_page(offset) == 0) { io_mapping_unmap_atomic(reloc_page); reloc_page = - io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + io_mapping_map_atomic_wc(ggtt->mappable, offset); } @@ -599,7 +600,7 @@ static bool only_mappable_for_reloc(unsigned int flags) static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, bool *need_reloc) { struct drm_i915_gem_object *obj = vma->obj; @@ -713,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma) } static int -i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, +i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, struct list_head *vmas, struct intel_context *ctx, bool *need_relocs) @@ -723,10 +724,10 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, struct i915_address_space *vm; struct list_head ordered_vmas; struct list_head pinned_vmas; - bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4; + bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; int retry; - i915_gem_retire_requests_ring(ring); + i915_gem_retire_requests_ring(engine); vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm; @@ -788,7 +789,9 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, if (eb_vma_misplaced(vma)) ret = i915_vma_unbind(vma); else - ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, + engine, + need_relocs); if (ret) goto err; } @@ -798,7 +801,8 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *ring, if (drm_mm_node_allocated(&vma->node)) continue; - ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs); + ret = i915_gem_execbuffer_reserve_vma(vma, engine, + need_relocs); if (ret) goto err; } @@ -821,7 +825,7 @@ static int i915_gem_execbuffer_relocate_slow(struct drm_device *dev, struct drm_i915_gem_execbuffer2 *args, struct drm_file *file, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, struct eb_vmas *eb, struct drm_i915_gem_exec_object2 *exec, struct intel_context *ctx) @@ -910,7 +914,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, goto err; need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); + ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx, + &need_relocs); if (ret) goto err; @@ -938,7 +943,7 @@ static int i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { - const unsigned other_rings = ~intel_ring_flag(req->ring); + const unsigned other_rings = ~intel_engine_flag(req->engine); struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; @@ -948,7 +953,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, struct drm_i915_gem_object *obj = vma->obj; if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, req->ring, &req); + ret = i915_gem_object_sync(obj, req->engine, &req); if (ret) return ret; } @@ -960,7 +965,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req, } if (flush_chipset) - i915_gem_chipset_flush(req->ring->dev); + i915_gem_chipset_flush(req->engine->dev); if (flush_domains & I915_GEM_DOMAIN_GTT) wmb(); @@ -1062,12 +1067,12 @@ validate_exec_list(struct drm_device *dev, static struct intel_context * i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, - struct intel_engine_cs *ring, const u32 ctx_id) + struct intel_engine_cs *engine, const u32 ctx_id) { struct intel_context *ctx = NULL; struct i915_ctx_hang_stats *hs; - if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) + if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) return ERR_PTR(-EINVAL); ctx = i915_gem_context_get(file->driver_priv, ctx_id); @@ -1080,8 +1085,8 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EIO); } - if (i915.enable_execlists && !ctx->engine[ring->id].state) { - int ret = intel_lr_context_deferred_alloc(ctx, ring); + if (i915.enable_execlists && !ctx->engine[engine->id].state) { + int ret = intel_lr_context_deferred_alloc(ctx, engine); if (ret) { DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret); return ERR_PTR(ret); @@ -1095,7 +1100,7 @@ void i915_gem_execbuffer_move_to_active(struct list_head *vmas, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = i915_gem_request_get_ring(req); + struct intel_engine_cs *engine = i915_gem_request_get_engine(req); struct i915_vma *vma; list_for_each_entry(vma, vmas, exec_list) { @@ -1122,7 +1127,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { i915_gem_request_assign(&obj->last_fenced_req, req); if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { - struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct drm_i915_private *dev_priv = to_i915(engine->dev); list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, &dev_priv->mm.fence_list); } @@ -1136,7 +1141,7 @@ void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params) { /* Unconditionally force add_request to emit a full flush. */ - params->ring->gpu_caches_dirty = true; + params->engine->gpu_caches_dirty = true; /* Add a breadcrumb for the completion of the batch buffer */ __i915_add_request(params->request, params->batch_obj, true); @@ -1146,11 +1151,11 @@ static int i915_reset_gen7_sol_offsets(struct drm_device *dev, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_private *dev_priv = dev->dev_private; int ret, i; - if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) { + if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { DRM_DEBUG("sol reset is gen7/rcs only\n"); return -EINVAL; } @@ -1160,18 +1165,18 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev, return ret; for (i = 0; i < 4; i++) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i)); - intel_ring_emit(ring, 0); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i)); + intel_ring_emit(engine, 0); } - intel_ring_advance(ring); + intel_ring_advance(engine); return 0; } static struct drm_i915_gem_object* -i915_gem_execbuffer_parse(struct intel_engine_cs *ring, +i915_gem_execbuffer_parse(struct intel_engine_cs *engine, struct drm_i915_gem_exec_object2 *shadow_exec_entry, struct eb_vmas *eb, struct drm_i915_gem_object *batch_obj, @@ -1183,12 +1188,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *ring, struct i915_vma *vma; int ret; - shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool, + shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool, PAGE_ALIGN(batch_len)); if (IS_ERR(shadow_batch_obj)) return shadow_batch_obj; - ret = i915_parse_cmds(ring, + ret = i915_parse_cmds(engine, batch_obj, shadow_batch_obj, batch_start_offset, @@ -1229,7 +1234,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct list_head *vmas) { struct drm_device *dev = params->dev; - struct intel_engine_cs *ring = params->ring; + struct intel_engine_cs *engine = params->engine; struct drm_i915_private *dev_priv = dev->dev_private; u64 exec_start, exec_len; int instp_mode; @@ -1244,8 +1249,8 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (ret) return ret; - WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id), - "%s didn't clear reload\n", ring->name); + WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id), + "%s didn't clear reload\n", engine->name); instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; @@ -1253,7 +1258,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); return -EINVAL; } @@ -1280,17 +1285,17 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, return -EINVAL; } - if (ring == &dev_priv->ring[RCS] && + if (engine == &dev_priv->engine[RCS] && instp_mode != dev_priv->relative_constants_mode) { ret = intel_ring_begin(params->request, 4); if (ret) return ret; - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, INSTPM); - intel_ring_emit(ring, instp_mask << 16 | instp_mode); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, INSTPM); + intel_ring_emit(engine, instp_mask << 16 | instp_mode); + intel_ring_advance(engine); dev_priv->relative_constants_mode = instp_mode; } @@ -1308,7 +1313,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, if (exec_len == 0) exec_len = params->batch_obj->base.size; - ret = ring->dispatch_execbuffer(params->request, + ret = engine->dispatch_execbuffer(params->request, exec_start, exec_len, params->dispatch_flags); if (ret) @@ -1365,7 +1370,7 @@ eb_get_batch(struct eb_vmas *eb) #define I915_USER_RINGS (4) -static const enum intel_ring_id user_ring_map[I915_USER_RINGS + 1] = { +static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = { [I915_EXEC_DEFAULT] = RCS, [I915_EXEC_RENDER] = RCS, [I915_EXEC_BLT] = BCS, @@ -1408,12 +1413,12 @@ eb_select_ring(struct drm_i915_private *dev_priv, return -EINVAL; } - *ring = &dev_priv->ring[_VCS(bsd_idx)]; + *ring = &dev_priv->engine[_VCS(bsd_idx)]; } else { - *ring = &dev_priv->ring[user_ring_map[user_ring_id]]; + *ring = &dev_priv->engine[user_ring_map[user_ring_id]]; } - if (!intel_ring_initialized(*ring)) { + if (!intel_engine_initialized(*ring)) { DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id); return -EINVAL; } @@ -1427,12 +1432,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_exec_object2 *exec) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *req = NULL; struct eb_vmas *eb; struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_exec_object2 shadow_exec_entry; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx; struct i915_address_space *vm; struct i915_execbuffer_params params_master; /* XXX: will be removed later */ @@ -1459,7 +1465,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (args->flags & I915_EXEC_IS_PINNED) dispatch_flags |= I915_DISPATCH_PINNED; - ret = eb_select_ring(dev_priv, file, args, &ring); + ret = eb_select_ring(dev_priv, file, args, &engine); if (ret) return ret; @@ -1473,9 +1479,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n"); return -EINVAL; } - if (ring->id != RCS) { + if (engine->id != RCS) { DRM_DEBUG("RS is not available on %s\n", - ring->name); + engine->name); return -EINVAL; } @@ -1488,7 +1494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ret) goto pre_mutex_err; - ctx = i915_gem_validate_context(dev, file, ring, ctx_id); + ctx = i915_gem_validate_context(dev, file, engine, ctx_id); if (IS_ERR(ctx)) { mutex_unlock(&dev->struct_mutex); ret = PTR_ERR(ctx); @@ -1500,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, if (ctx->ppgtt) vm = &ctx->ppgtt->base; else - vm = &dev_priv->gtt.base; + vm = &ggtt->base; memset(¶ms_master, 0x00, sizeof(params_master)); @@ -1522,7 +1528,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, /* Move the objects en-masse into the GTT, evicting if necessary. */ need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; - ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs); + ret = i915_gem_execbuffer_reserve(engine, &eb->vmas, ctx, + &need_relocs); if (ret) goto err; @@ -1531,7 +1538,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ret = i915_gem_execbuffer_relocate(eb); if (ret) { if (ret == -EFAULT) { - ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, + ret = i915_gem_execbuffer_relocate_slow(dev, args, file, + engine, eb, exec, ctx); BUG_ON(!mutex_is_locked(&dev->struct_mutex)); } @@ -1547,16 +1555,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, } params->args_batch_start_offset = args->batch_start_offset; - if (i915_needs_cmd_parser(ring) && args->batch_len) { + if (i915_needs_cmd_parser(engine) && args->batch_len) { struct drm_i915_gem_object *parsed_batch_obj; - parsed_batch_obj = i915_gem_execbuffer_parse(ring, - &shadow_exec_entry, - eb, - batch_obj, - args->batch_start_offset, - args->batch_len, - file->is_master); + parsed_batch_obj = i915_gem_execbuffer_parse(engine, + &shadow_exec_entry, + eb, + batch_obj, + args->batch_start_offset, + args->batch_len, + file->is_master); if (IS_ERR(parsed_batch_obj)) { ret = PTR_ERR(parsed_batch_obj); goto err; @@ -1608,7 +1616,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm); /* Allocate a request for this batch buffer nice and early. */ - req = i915_gem_request_alloc(ring, ctx); + req = i915_gem_request_alloc(engine, ctx); if (IS_ERR(req)) { ret = PTR_ERR(req); goto err_batch_unpin; @@ -1626,7 +1634,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, */ params->dev = dev; params->file = file; - params->ring = ring; + params->engine = engine; params->dispatch_flags = dispatch_flags; params->batch_obj = batch_obj; params->ctx = ctx; @@ -1775,11 +1783,9 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, return -EINVAL; } - exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, - GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); - if (exec2_list == NULL) - exec2_list = drm_malloc_ab(sizeof(*exec2_list), - args->buffer_count); + exec2_list = drm_malloc_gfp(args->buffer_count, + sizeof(*exec2_list), + GFP_TEMPORARY); if (exec2_list == NULL) { DRM_DEBUG("Failed to allocate exec list for %d buffers\n", args->buffer_count); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 49e4f26b79d8..c5cb04907525 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -658,7 +658,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, unsigned entry, dma_addr_t addr) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; BUG_ON(entry >= 4); @@ -667,13 +667,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(ring, entry)); - intel_ring_emit(ring, upper_32_bits(addr)); - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(ring, entry)); - intel_ring_emit(ring, lower_32_bits(addr)); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry)); + intel_ring_emit(engine, upper_32_bits(addr)); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry)); + intel_ring_emit(engine, lower_32_bits(addr)); + intel_ring_advance(engine); return 0; } @@ -706,8 +706,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm, uint64_t length, gen8_pte_t scratch_pte) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen8_pte_t *pt_vaddr; unsigned pdpe = gen8_pdpe_index(start); unsigned pde = gen8_pde_index(start); @@ -762,8 +761,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, uint64_t length, bool use_scratch) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page), I915_CACHE_LLC, use_scratch); @@ -788,8 +786,7 @@ gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level cache_level) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen8_pte_t *pt_vaddr; unsigned pdpe = gen8_pdpe_index(start); unsigned pde = gen8_pde_index(start); @@ -829,8 +826,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, enum i915_cache_level cache_level, u32 unused) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct sg_page_iter sg_iter; __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0); @@ -981,8 +977,7 @@ static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) static void gen8_ppgtt_cleanup(struct i915_address_space *vm) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); if (intel_vgpu_active(vm->dev)) gen8_ppgtt_notify_vgt(ppgtt, false); @@ -1216,8 +1211,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm, uint64_t start, uint64_t length) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); unsigned long *new_page_dirs, *new_page_tables; struct drm_device *dev = vm->dev; struct i915_page_directory *pd; @@ -1329,8 +1323,7 @@ static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm, uint64_t length) { DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4); - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_directory_pointer *pdp; uint64_t pml4e; int ret = 0; @@ -1376,8 +1369,7 @@ err_out: static int gen8_alloc_va_range(struct i915_address_space *vm, uint64_t start, uint64_t length) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); if (USES_FULL_48BIT_PPGTT(vm->dev)) return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length); @@ -1629,6 +1621,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, struct i915_page_directory *pd, uint32_t start, uint32_t length) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_page_table *pt; uint32_t pde, temp; @@ -1637,7 +1630,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, /* Make sure write is complete before other code can use this page * table. Also require for WC mapped PTEs */ - readl(dev_priv->gtt.gsm); + readl(ggtt->gsm); } static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) @@ -1650,11 +1643,11 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; /* NB: TLBs must be flushed and invalidated before a switch */ - ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -1662,13 +1655,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); - intel_ring_emit(ring, PP_DIR_DCLV_2G); - intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); - intel_ring_emit(ring, get_pd_offset(ppgtt)); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); + intel_ring_emit(engine, PP_DIR_DCLV_2G); + intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); + intel_ring_emit(engine, get_pd_offset(ppgtt)); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return 0; } @@ -1676,22 +1669,22 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev); - I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); return 0; } static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; /* NB: TLBs must be flushed and invalidated before a switch */ - ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; @@ -1699,17 +1692,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, if (ret) return ret; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2)); - intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(ring)); - intel_ring_emit(ring, PP_DIR_DCLV_2G); - intel_ring_emit_reg(ring, RING_PP_DIR_BASE(ring)); - intel_ring_emit(ring, get_pd_offset(ppgtt)); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2)); + intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine)); + intel_ring_emit(engine, PP_DIR_DCLV_2G); + intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine)); + intel_ring_emit(engine, get_pd_offset(ppgtt)); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); /* XXX: RCS is the only one to auto invalidate the TLBs? */ - if (ring->id != RCS) { - ret = ring->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); + if (engine->id != RCS) { + ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); if (ret) return ret; } @@ -1720,15 +1713,15 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G); - I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt)); + I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); + I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); - POSTING_READ(RING_PP_DIR_DCLV(ring)); + POSTING_READ(RING_PP_DIR_DCLV(engine)); return 0; } @@ -1736,12 +1729,11 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, static void gen8_ppgtt_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int j; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, j) { + for_each_engine(engine, dev_priv) { u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0; - I915_WRITE(RING_MODE_GEN7(ring), + I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); } } @@ -1749,9 +1741,8 @@ static void gen8_ppgtt_enable(struct drm_device *dev) static void gen7_ppgtt_enable(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; uint32_t ecochk, ecobits; - int i; ecobits = I915_READ(GAC_ECO_BITS); I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); @@ -1765,9 +1756,9 @@ static void gen7_ppgtt_enable(struct drm_device *dev) } I915_WRITE(GAM_ECOCHK, ecochk); - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { /* GFX_MODE is per-ring on gen7+ */ - I915_WRITE(RING_MODE_GEN7(ring), + I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); } } @@ -1796,8 +1787,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, uint64_t length, bool use_scratch) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen6_pte_t *pt_vaddr, scratch_pte; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; @@ -1831,8 +1821,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level cache_level, u32 flags) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); gen6_pte_t *pt_vaddr; unsigned first_entry = start >> PAGE_SHIFT; unsigned act_pt = first_entry / GEN6_PTES; @@ -1864,9 +1853,9 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, { DECLARE_BITMAP(new_page_tables, I915_PDES); struct drm_device *dev = vm->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_table *pt; uint32_t start, length, start_save, length_save; uint32_t pde, temp; @@ -1932,7 +1921,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, /* Make sure write is complete before other code can use this page * table. Also require for WC mapped PTEs */ - readl(dev_priv->gtt.gsm); + readl(ggtt->gsm); mark_tlbs_dirty(ppgtt); return 0; @@ -1978,8 +1967,7 @@ static void gen6_free_scratch(struct i915_address_space *vm) static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, base); + struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); struct i915_page_table *pt; uint32_t pde; @@ -1997,7 +1985,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) { struct i915_address_space *vm = &ppgtt->base; struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; bool retried = false; int ret; @@ -2005,23 +1994,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); + BUG_ON(!drm_mm_initialized(&ggtt->base.mm)); ret = gen6_init_scratch(vm); if (ret) return ret; alloc: - ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, + ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm, &ppgtt->node, GEN6_PD_SIZE, GEN6_PD_ALIGN, 0, - 0, dev_priv->gtt.base.total, + 0, ggtt->base.total, DRM_MM_TOPDOWN); if (ret == -ENOSPC && !retried) { - ret = i915_gem_evict_something(dev, &dev_priv->gtt.base, + ret = i915_gem_evict_something(dev, &ggtt->base, GEN6_PD_SIZE, GEN6_PD_ALIGN, I915_CACHE_NONE, - 0, dev_priv->gtt.base.total, + 0, ggtt->base.total, 0); if (ret) goto err_out; @@ -2034,7 +2023,7 @@ alloc: goto err_out; - if (ppgtt->node.start < dev_priv->gtt.mappable_end) + if (ppgtt->node.start < ggtt->mappable_end) DRM_DEBUG("Forced to use aperture for PDEs\n"); return 0; @@ -2062,10 +2051,11 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) { struct drm_device *dev = ppgtt->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; - ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode; + ppgtt->base.pte_encode = ggtt->base.pte_encode; if (IS_GEN6(dev)) { ppgtt->switch_mm = gen6_mm_switch; } else if (IS_HASWELL(dev)) { @@ -2095,7 +2085,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ppgtt->pd.base.ggtt_offset = ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); - ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + + ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); @@ -2192,7 +2182,7 @@ int i915_ppgtt_init_hw(struct drm_device *dev) int i915_ppgtt_init_ring(struct drm_i915_gem_request *req) { - struct drm_i915_private *dev_priv = req->ring->dev->dev_private; + struct drm_i915_private *dev_priv = req->i915; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; if (i915.enable_execlists) @@ -2263,9 +2253,10 @@ static bool needs_idle_maps(struct drm_device *dev) static bool do_idling(struct drm_i915_private *dev_priv) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; bool ret = dev_priv->mm.interruptible; - if (unlikely(dev_priv->gtt.do_idle_maps)) { + if (unlikely(ggtt->do_idle_maps)) { dev_priv->mm.interruptible = false; if (i915_gpu_idle(dev_priv->dev)) { DRM_ERROR("Couldn't idle GPU\n"); @@ -2279,22 +2270,23 @@ static bool do_idling(struct drm_i915_private *dev_priv) static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) { - if (unlikely(dev_priv->gtt.do_idle_maps)) + struct i915_ggtt *ggtt = &dev_priv->ggtt; + + if (unlikely(ggtt->do_idle_maps)) dev_priv->mm.interruptible = interruptible; } void i915_check_and_clear_faults(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; if (INTEL_INFO(dev)->gen < 6) return; - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { u32 fault_reg; - fault_reg = I915_READ(RING_FAULT_REG(ring)); + fault_reg = I915_READ(RING_FAULT_REG(engine)); if (fault_reg & RING_FAULT_VALID) { DRM_DEBUG_DRIVER("Unexpected fault\n" "\tAddr: 0x%08lx\n" @@ -2305,16 +2297,16 @@ void i915_check_and_clear_faults(struct drm_device *dev) fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", RING_FAULT_SRCID(fault_reg), RING_FAULT_FAULT_TYPE(fault_reg)); - I915_WRITE(RING_FAULT_REG(ring), + I915_WRITE(RING_FAULT_REG(engine), fault_reg & ~RING_FAULT_VALID); } } - POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS])); + POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS])); } static void i915_ggtt_flush(struct drm_i915_private *dev_priv) { - if (INTEL_INFO(dev_priv->dev)->gen < 6) { + if (INTEL_INFO(dev_priv)->gen < 6) { intel_gtt_chipset_flush(); } else { I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); @@ -2324,7 +2316,8 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv) void i915_gem_suspend_gtt_mappings(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; /* Don't bother messing with faults pre GEN6 as we have little * documentation supporting that it's a good idea. @@ -2334,10 +2327,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) i915_check_and_clear_faults(dev); - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - dev_priv->gtt.base.start, - dev_priv->gtt.base.total, - true); + ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, + true); i915_ggtt_flush(dev_priv); } @@ -2367,10 +2358,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level level, u32 unused) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned first_entry = start >> PAGE_SHIFT; gen8_pte_t __iomem *gtt_entries = - (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; /* shut up gcc */ @@ -2444,10 +2436,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, uint64_t start, enum i915_cache_level level, u32 flags) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned first_entry = start >> PAGE_SHIFT; gen6_pte_t __iomem *gtt_entries = - (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + (gen6_pte_t __iomem *)ggtt->gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; @@ -2487,12 +2480,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, uint64_t length, bool use_scratch) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen8_pte_t scratch_pte, __iomem *gtt_base = - (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; - const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; + (gen8_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; int i; int rpm_atomic_seq; @@ -2518,12 +2512,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, uint64_t length, bool use_scratch) { - struct drm_i915_private *dev_priv = vm->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; gen6_pte_t scratch_pte, __iomem *gtt_base = - (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; - const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; + (gen6_pte_t __iomem *)ggtt->gsm + first_entry; + const int max_entries = ggtt_total_entries(ggtt) - first_entry; int i; int rpm_atomic_seq; @@ -2613,32 +2608,31 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - struct drm_device *dev = vma->vm->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj = vma->obj; - struct sg_table *pages = obj->pages; - u32 pte_flags = 0; + u32 pte_flags; int ret; ret = i915_get_ggtt_vma_pages(vma); if (ret) return ret; - pages = vma->ggtt_view.pages; /* Currently applicable only to VLV */ - if (obj->gt_ro) + pte_flags = 0; + if (vma->obj->gt_ro) pte_flags |= PTE_READ_ONLY; if (flags & GLOBAL_BIND) { - vma->vm->insert_entries(vma->vm, pages, + vma->vm->insert_entries(vma->vm, + vma->ggtt_view.pages, vma->node.start, cache_level, pte_flags); } if (flags & LOCAL_BIND) { - struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; - appgtt->base.insert_entries(&appgtt->base, pages, + struct i915_hw_ppgtt *appgtt = + to_i915(vma->vm->dev)->mm.aliasing_ppgtt; + appgtt->base.insert_entries(&appgtt->base, + vma->ggtt_view.pages, vma->node.start, cache_level, pte_flags); } @@ -2717,8 +2711,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, * aperture. One page should be enough to keep any prefetching inside * of the aperture. */ - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_mm_node *entry; struct drm_i915_gem_object *obj; unsigned long hole_start, hole_end; @@ -2726,13 +2720,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, BUG_ON(mappable_end > end); - ggtt_vm->start = start; + ggtt->base.start = start; /* Subtract the guard page before address space initialization to * shrink the range used by drm_mm */ - ggtt_vm->total = end - start - PAGE_SIZE; - i915_address_space_init(ggtt_vm, dev_priv); - ggtt_vm->total += PAGE_SIZE; + ggtt->base.total = end - start - PAGE_SIZE; + i915_address_space_init(&ggtt->base, dev_priv); + ggtt->base.total += PAGE_SIZE; if (intel_vgpu_active(dev)) { ret = intel_vgt_balloon(dev); @@ -2741,36 +2735,36 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, } if (!HAS_LLC(dev)) - ggtt_vm->mm.color_adjust = i915_gtt_color_adjust; + ggtt->base.mm.color_adjust = i915_gtt_color_adjust; /* Mark any preallocated objects as occupied */ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); + struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base); DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", i915_gem_obj_ggtt_offset(obj), obj->base.size); WARN_ON(i915_gem_obj_ggtt_bound(obj)); - ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); + ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); if (ret) { DRM_DEBUG_KMS("Reservation failed: %i\n", ret); return ret; } vma->bound |= GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list); + list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); } /* Clear any non-preallocated blocks */ - drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { + drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", hole_start, hole_end); - ggtt_vm->clear_range(ggtt_vm, hole_start, + ggtt->base.clear_range(&ggtt->base, hole_start, hole_end - hole_start, true); } /* And finally clear the reserved guard page */ - ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); + ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true); if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { struct i915_hw_ppgtt *ppgtt; @@ -2801,28 +2795,33 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, true); dev_priv->mm.aliasing_ppgtt = ppgtt; - WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma); - dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma; + WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); + ggtt->base.bind_vma = aliasing_gtt_bind_vma; } return 0; } -void i915_gem_init_global_gtt(struct drm_device *dev) +/** + * i915_gem_init_ggtt - Initialize GEM for Global GTT + * @dev: DRM device + */ +void i915_gem_init_ggtt(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - u64 gtt_size, mappable_size; - - gtt_size = dev_priv->gtt.base.total; - mappable_size = dev_priv->gtt.mappable_end; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; - i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); + i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total); } -void i915_global_gtt_cleanup(struct drm_device *dev) +/** + * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization + * @dev: DRM device + */ +void i915_ggtt_cleanup_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *vm = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; if (dev_priv->mm.aliasing_ppgtt) { struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; @@ -2832,15 +2831,15 @@ void i915_global_gtt_cleanup(struct drm_device *dev) i915_gem_cleanup_stolen(dev); - if (drm_mm_initialized(&vm->mm)) { + if (drm_mm_initialized(&ggtt->base.mm)) { if (intel_vgpu_active(dev)) intel_vgt_deballoon(); - drm_mm_takedown(&vm->mm); - list_del(&vm->global_link); + drm_mm_takedown(&ggtt->base.mm); + list_del(&ggtt->base.global_link); } - vm->cleanup(vm); + ggtt->base.cleanup(&ggtt->base); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -2924,13 +2923,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) static int ggtt_probe_common(struct drm_device *dev, size_t gtt_size) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_page_scratch *scratch_page; - phys_addr_t gtt_phys_addr; + phys_addr_t ggtt_phys_addr; /* For Modern GENs the PTEs and register space are split in the BAR */ - gtt_phys_addr = pci_resource_start(dev->pdev, 0) + - (pci_resource_len(dev->pdev, 0) / 2); + ggtt_phys_addr = pci_resource_start(dev->pdev, 0) + + (pci_resource_len(dev->pdev, 0) / 2); /* * On BXT writes larger than 64 bit to the GTT pagetable range will be @@ -2940,10 +2940,10 @@ static int ggtt_probe_common(struct drm_device *dev, * readback check when writing GTT PTE entries. */ if (IS_BROXTON(dev)) - dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); + ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size); else - dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); - if (!dev_priv->gtt.gsm) { + ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size); + if (!ggtt->gsm) { DRM_ERROR("Failed to map the gtt page table\n"); return -ENOMEM; } @@ -2952,11 +2952,11 @@ static int ggtt_probe_common(struct drm_device *dev, if (IS_ERR(scratch_page)) { DRM_ERROR("Scratch setup failed\n"); /* iounmap will also get called at remove, but meh */ - iounmap(dev_priv->gtt.gsm); + iounmap(ggtt->gsm); return PTR_ERR(scratch_page); } - dev_priv->gtt.base.scratch_page = scratch_page; + ggtt->base.scratch_page = scratch_page; return 0; } @@ -2977,7 +2977,7 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - if (!USES_PPGTT(dev_priv->dev)) + if (!USES_PPGTT(dev_priv)) /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, * so RTL will always use the value corresponding to * pat_sel = 000". @@ -3034,20 +3034,16 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); } -static int gen8_gmch_probe(struct drm_device *dev, - u64 *gtt_total, - size_t *stolen, - phys_addr_t *mappable_base, - u64 *mappable_end) +static int gen8_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = dev->dev_private; - u64 gtt_size; + struct drm_device *dev = ggtt->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); u16 snb_gmch_ctl; int ret; /* TODO: We're not aware of mappable constraints on gen8 yet */ - *mappable_base = pci_resource_start(dev->pdev, 2); - *mappable_end = pci_resource_len(dev->pdev, 2); + ggtt->mappable_base = pci_resource_start(dev->pdev, 2); + ggtt->mappable_end = pci_resource_len(dev->pdev, 2); if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39))) pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39)); @@ -3055,56 +3051,50 @@ static int gen8_gmch_probe(struct drm_device *dev, pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); if (INTEL_INFO(dev)->gen >= 9) { - *stolen = gen9_get_stolen_size(snb_gmch_ctl); - gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); + ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl); + ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl); } else if (IS_CHERRYVIEW(dev)) { - *stolen = chv_get_stolen_size(snb_gmch_ctl); - gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); + ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl); + ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl); } else { - *stolen = gen8_get_stolen_size(snb_gmch_ctl); - gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); + ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl); + ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl); } - *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; + ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT; if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) chv_setup_private_ppat(dev_priv); else bdw_setup_private_ppat(dev_priv); - ret = ggtt_probe_common(dev, gtt_size); - - dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range; - dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries; - dev_priv->gtt.base.bind_vma = ggtt_bind_vma; - dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; + ret = ggtt_probe_common(dev, ggtt->size); + ggtt->base.clear_range = gen8_ggtt_clear_range; if (IS_CHERRYVIEW(dev_priv)) - dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL; + ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL; + else + ggtt->base.insert_entries = gen8_ggtt_insert_entries; + ggtt->base.bind_vma = ggtt_bind_vma; + ggtt->base.unbind_vma = ggtt_unbind_vma; return ret; } -static int gen6_gmch_probe(struct drm_device *dev, - u64 *gtt_total, - size_t *stolen, - phys_addr_t *mappable_base, - u64 *mappable_end) +static int gen6_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned int gtt_size; + struct drm_device *dev = ggtt->base.dev; u16 snb_gmch_ctl; int ret; - *mappable_base = pci_resource_start(dev->pdev, 2); - *mappable_end = pci_resource_len(dev->pdev, 2); + ggtt->mappable_base = pci_resource_start(dev->pdev, 2); + ggtt->mappable_end = pci_resource_len(dev->pdev, 2); /* 64/512MB is the current min/max we actually know of, but this is just * a coarse sanity check. */ - if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { - DRM_ERROR("Unknown GMADR size (%llx)\n", - dev_priv->gtt.mappable_end); + if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) { + DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end); return -ENXIO; } @@ -3112,37 +3102,32 @@ static int gen6_gmch_probe(struct drm_device *dev, pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); - *stolen = gen6_get_stolen_size(snb_gmch_ctl); + ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); + ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl); + ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT; - gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; + ret = ggtt_probe_common(dev, ggtt->size); - ret = ggtt_probe_common(dev, gtt_size); - - dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range; - dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries; - dev_priv->gtt.base.bind_vma = ggtt_bind_vma; - dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; + ggtt->base.clear_range = gen6_ggtt_clear_range; + ggtt->base.insert_entries = gen6_ggtt_insert_entries; + ggtt->base.bind_vma = ggtt_bind_vma; + ggtt->base.unbind_vma = ggtt_unbind_vma; return ret; } static void gen6_gmch_remove(struct i915_address_space *vm) { + struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base); - struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base); - - iounmap(gtt->gsm); + iounmap(ggtt->gsm); free_scratch_page(vm->dev, vm->scratch_page); } -static int i915_gmch_probe(struct drm_device *dev, - u64 *gtt_total, - size_t *stolen, - phys_addr_t *mappable_base, - u64 *mappable_end) +static int i915_gmch_probe(struct i915_ggtt *ggtt) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_device *dev = ggtt->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); int ret; ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); @@ -3151,15 +3136,16 @@ static int i915_gmch_probe(struct drm_device *dev, return -EIO; } - intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); + intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, + &ggtt->mappable_base, &ggtt->mappable_end); - dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); - dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries; - dev_priv->gtt.base.clear_range = i915_ggtt_clear_range; - dev_priv->gtt.base.bind_vma = ggtt_bind_vma; - dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma; + ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); + ggtt->base.insert_entries = i915_ggtt_insert_entries; + ggtt->base.clear_range = i915_ggtt_clear_range; + ggtt->base.bind_vma = ggtt_bind_vma; + ggtt->base.unbind_vma = ggtt_unbind_vma; - if (unlikely(dev_priv->gtt.do_idle_maps)) + if (unlikely(ggtt->do_idle_maps)) DRM_INFO("applying Ironlake quirks for intel_iommu\n"); return 0; @@ -3170,41 +3156,52 @@ static void i915_gmch_remove(struct i915_address_space *vm) intel_gmch_remove(); } -int i915_gem_gtt_init(struct drm_device *dev) +/** + * i915_ggtt_init_hw - Initialize GGTT hardware + * @dev: DRM device + */ +int i915_ggtt_init_hw(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_gtt *gtt = &dev_priv->gtt; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; int ret; if (INTEL_INFO(dev)->gen <= 5) { - gtt->gtt_probe = i915_gmch_probe; - gtt->base.cleanup = i915_gmch_remove; + ggtt->probe = i915_gmch_probe; + ggtt->base.cleanup = i915_gmch_remove; } else if (INTEL_INFO(dev)->gen < 8) { - gtt->gtt_probe = gen6_gmch_probe; - gtt->base.cleanup = gen6_gmch_remove; + ggtt->probe = gen6_gmch_probe; + ggtt->base.cleanup = gen6_gmch_remove; if (IS_HASWELL(dev) && dev_priv->ellc_size) - gtt->base.pte_encode = iris_pte_encode; + ggtt->base.pte_encode = iris_pte_encode; else if (IS_HASWELL(dev)) - gtt->base.pte_encode = hsw_pte_encode; + ggtt->base.pte_encode = hsw_pte_encode; else if (IS_VALLEYVIEW(dev)) - gtt->base.pte_encode = byt_pte_encode; + ggtt->base.pte_encode = byt_pte_encode; else if (INTEL_INFO(dev)->gen >= 7) - gtt->base.pte_encode = ivb_pte_encode; + ggtt->base.pte_encode = ivb_pte_encode; else - gtt->base.pte_encode = snb_pte_encode; + ggtt->base.pte_encode = snb_pte_encode; } else { - dev_priv->gtt.gtt_probe = gen8_gmch_probe; - dev_priv->gtt.base.cleanup = gen6_gmch_remove; + ggtt->probe = gen8_gmch_probe; + ggtt->base.cleanup = gen6_gmch_remove; } - gtt->base.dev = dev; - gtt->base.is_ggtt = true; + ggtt->base.dev = dev; + ggtt->base.is_ggtt = true; - ret = gtt->gtt_probe(dev, >t->base.total, >t->stolen_size, - >t->mappable_base, >t->mappable_end); + ret = ggtt->probe(ggtt); if (ret) return ret; + if ((ggtt->base.total - 1) >> 32) { + DRM_ERROR("We never expected a Global GTT with more than 32bits" + "of address space! Found %lldM!\n", + ggtt->base.total >> 20); + ggtt->base.total = 1ULL << 32; + ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); + } + /* * Initialise stolen early so that we may reserve preallocated * objects for the BIOS to KMS transition. @@ -3215,9 +3212,9 @@ int i915_gem_gtt_init(struct drm_device *dev) /* GMADR is the PCI mmio aperture into the global GTT. */ DRM_INFO("Memory usable by graphics device = %lluM\n", - gtt->base.total >> 20); - DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20); - DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20); + ggtt->base.total >> 20); + DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); + DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20); #ifdef CONFIG_INTEL_IOMMU if (intel_iommu_gfx_mapped) DRM_INFO("VT-d active for gfx access\n"); @@ -3234,33 +3231,30 @@ int i915_gem_gtt_init(struct drm_device *dev) return 0; out_gtt_cleanup: - gtt->base.cleanup(&dev_priv->gtt.base); + ggtt->base.cleanup(&ggtt->base); return ret; } void i915_gem_restore_gtt_mappings(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; - struct i915_address_space *vm; struct i915_vma *vma; bool flush; i915_check_and_clear_faults(dev); /* First fill our portion of the GTT with scratch pages */ - dev_priv->gtt.base.clear_range(&dev_priv->gtt.base, - dev_priv->gtt.base.start, - dev_priv->gtt.base.total, - true); + ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, + true); /* Cache flush objects bound into GGTT and rebind them. */ - vm = &dev_priv->gtt.base; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { flush = false; list_for_each_entry(vma, &obj->vma_list, obj_link) { - if (vma->vm != vm) + if (vma->vm != &ggtt->base) continue; WARN_ON(i915_vma_bind(vma, obj->cache_level, @@ -3283,15 +3277,17 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) } if (USES_PPGTT(dev)) { + struct i915_address_space *vm; + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { /* TODO: Perhaps it shouldn't be gen6 specific */ - struct i915_hw_ppgtt *ppgtt = - container_of(vm, struct i915_hw_ppgtt, - base); + struct i915_hw_ppgtt *ppgtt; - if (i915_is_ggtt(vm)) + if (vm->is_ggtt) ppgtt = dev_priv->mm.aliasing_ppgtt; + else + ppgtt = i915_vm_to_ppgtt(vm); gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); @@ -3350,19 +3346,13 @@ struct i915_vma * i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { - struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); - struct i915_vma *vma; - - if (WARN_ON(!view)) - return ERR_PTR(-EINVAL); - - vma = i915_gem_obj_to_ggtt_view(obj, view); - - if (IS_ERR(vma)) - return vma; + struct drm_device *dev = obj->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); if (!vma) - vma = __i915_gem_vma_create(obj, ggtt, view); + vma = __i915_gem_vma_create(obj, &ggtt->base, view); return vma; @@ -3377,11 +3367,6 @@ rotate_pages(const dma_addr_t *in, unsigned int offset, unsigned int column, row; unsigned int src_idx; - if (!sg) { - st->nents = 0; - sg = st->sgl; - } - for (column = 0; column < width; column++) { src_idx = stride * (height - 1) + column; for (row = 0; row < height; row++) { @@ -3405,7 +3390,7 @@ static struct sg_table * intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, struct drm_i915_gem_object *obj) { - unsigned int size_pages = rot_info->size >> PAGE_SHIFT; + unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; unsigned int size_pages_uv; struct sg_page_iter sg_iter; unsigned long i; @@ -3416,14 +3401,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, int ret = -ENOMEM; /* Allocate a temporary list of source pages for random access. */ - page_addr_list = drm_malloc_ab(obj->base.size / PAGE_SIZE, - sizeof(dma_addr_t)); + page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, + sizeof(dma_addr_t), + GFP_TEMPORARY); if (!page_addr_list) return ERR_PTR(ret); /* Account for UV plane with NV12. */ if (rot_info->pixel_format == DRM_FORMAT_NV12) - size_pages_uv = rot_info->size_uv >> PAGE_SHIFT; + size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height; else size_pages_uv = 0; @@ -3443,11 +3429,14 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, i++; } + st->nents = 0; + sg = st->sgl; + /* Rotate the pages. */ sg = rotate_pages(page_addr_list, 0, - rot_info->width_pages, rot_info->height_pages, - rot_info->width_pages, - st, NULL); + rot_info->plane[0].width, rot_info->plane[0].height, + rot_info->plane[0].width, + st, sg); /* Append the UV plane if NV12. */ if (rot_info->pixel_format == DRM_FORMAT_NV12) { @@ -3459,18 +3448,15 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, rot_info->uv_start_page = uv_start_page; - rotate_pages(page_addr_list, uv_start_page, - rot_info->width_pages_uv, - rot_info->height_pages_uv, - rot_info->width_pages_uv, - st, sg); + sg = rotate_pages(page_addr_list, rot_info->uv_start_page, + rot_info->plane[1].width, rot_info->plane[1].height, + rot_info->plane[1].width, + st, sg); } - DRM_DEBUG_KMS( - "Created rotated page mapping for object size %zu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0)).\n", - obj->base.size, rot_info->pitch, rot_info->height, - rot_info->pixel_format, rot_info->width_pages, - rot_info->height_pages, size_pages + size_pages_uv, + DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n", + obj->base.size, rot_info->plane[0].width, + rot_info->plane[0].height, size_pages + size_pages_uv, size_pages); drm_free_large(page_addr_list); @@ -3482,11 +3468,9 @@ err_sg_alloc: err_st_alloc: drm_free_large(page_addr_list); - DRM_DEBUG_KMS( - "Failed to create rotated mapping for object size %zu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %u pages (%u plane 0))\n", - obj->base.size, ret, rot_info->pitch, rot_info->height, - rot_info->pixel_format, rot_info->width_pages, - rot_info->height_pages, size_pages + size_pages_uv, + DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n", + obj->base.size, ret, rot_info->plane[0].width, + rot_info->plane[0].height, size_pages + size_pages_uv, size_pages); return ERR_PTR(ret); } @@ -3634,7 +3618,7 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj, if (view->type == I915_GGTT_VIEW_NORMAL) { return obj->base.size; } else if (view->type == I915_GGTT_VIEW_ROTATED) { - return view->params.rotated.size; + return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT; } else if (view->type == I915_GGTT_VIEW_PARTIAL) { return view->params.partial.size << PAGE_SHIFT; } else { diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index 8774f1ba46e7..d7dd3d8a8758 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t; typedef uint64_t gen8_ppgtt_pdpe_t; typedef uint64_t gen8_ppgtt_pml4e_t; -#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) +#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT) /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) @@ -135,16 +135,13 @@ enum i915_ggtt_view_type { }; struct intel_rotation_info { - unsigned int height; - unsigned int pitch; unsigned int uv_offset; uint32_t pixel_format; - uint64_t fb_modifier; - unsigned int width_pages, height_pages; - uint64_t size; - unsigned int width_pages_uv, height_pages_uv; - uint64_t size_uv; unsigned int uv_start_page; + struct { + /* tiles */ + unsigned int width, height; + } plane[2]; }; struct i915_ggtt_view { @@ -342,13 +339,14 @@ struct i915_address_space { * and correct (in cases like swizzling). That region is referred to as GMADR in * the spec. */ -struct i915_gtt { +struct i915_ggtt { struct i915_address_space base; size_t stolen_size; /* Total size of stolen memory */ size_t stolen_usable_size; /* Total size minus BIOS reserved */ size_t stolen_reserved_base; size_t stolen_reserved_size; + size_t size; /* Total size of Global GTT */ u64 mappable_end; /* End offset that we can CPU map */ struct io_mapping *mappable; /* Mapping to our CPU mappable region */ phys_addr_t mappable_base; /* PA of our GMADR */ @@ -360,10 +358,7 @@ struct i915_gtt { int mtrr; - /* global gtt ops */ - int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total, - size_t *stolen, phys_addr_t *mappable_base, - u64 *mappable_end); + int (*probe)(struct i915_ggtt *ggtt); }; struct i915_hw_ppgtt { @@ -518,10 +513,9 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n) px_dma(ppgtt->base.scratch_pd); } -int i915_gem_gtt_init(struct drm_device *dev); -void i915_gem_init_global_gtt(struct drm_device *dev); -void i915_global_gtt_cleanup(struct drm_device *dev); - +int i915_ggtt_init_hw(struct drm_device *dev); +void i915_gem_init_ggtt(struct drm_device *dev); +void i915_ggtt_cleanup_hw(struct drm_device *dev); int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt); int i915_ppgtt_init_hw(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c index fc7e6d5c6251..71611bf21fca 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.c +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c @@ -169,15 +169,15 @@ void i915_gem_render_state_fini(struct render_state *so) drm_gem_object_unreference(&so->obj->base); } -int i915_gem_render_state_prepare(struct intel_engine_cs *ring, +int i915_gem_render_state_prepare(struct intel_engine_cs *engine, struct render_state *so) { int ret; - if (WARN_ON(ring->id != RCS)) + if (WARN_ON(engine->id != RCS)) return -ENOENT; - ret = render_state_init(so, ring->dev); + ret = render_state_init(so, engine->dev); if (ret) return ret; @@ -198,21 +198,21 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req) struct render_state so; int ret; - ret = i915_gem_render_state_prepare(req->ring, &so); + ret = i915_gem_render_state_prepare(req->engine, &so); if (ret) return ret; if (so.rodata == NULL) return 0; - ret = req->ring->dispatch_execbuffer(req, so.ggtt_offset, + ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset, so.rodata->batch_items * 4, I915_DISPATCH_SECURE); if (ret) goto out; if (so.aux_batch_size > 8) { - ret = req->ring->dispatch_execbuffer(req, + ret = req->engine->dispatch_execbuffer(req, (so.ggtt_offset + so.aux_batch_offset), so.aux_batch_size, diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h index e641bb093a90..6aaa3a10a630 100644 --- a/drivers/gpu/drm/i915/i915_gem_render_state.h +++ b/drivers/gpu/drm/i915/i915_gem_render_state.h @@ -43,7 +43,7 @@ struct render_state { int i915_gem_render_state_init(struct drm_i915_gem_request *req); void i915_gem_render_state_fini(struct render_state *so); -int i915_gem_render_state_prepare(struct intel_engine_cs *ring, +int i915_gem_render_state_prepare(struct intel_engine_cs *engine, struct render_state *so); #endif /* _I915_GEM_RENDER_STATE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index d3c473ffb90a..d46388f25e04 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -28,6 +28,7 @@ #include <linux/swap.h> #include <linux/pci.h> #include <linux/dma-buf.h> +#include <linux/vmalloc.h> #include <drm/drmP.h> #include <drm/i915_drm.h> @@ -166,6 +167,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, obj->madv != I915_MADV_DONTNEED) continue; + if (flags & I915_SHRINK_VMAPS && + !is_vmalloc_addr(obj->mapping)) + continue; + if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) continue; @@ -246,7 +251,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) count = 0; list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) - if (obj->pages_pin_count == 0) + if (can_release_pages(obj)) count += obj->base.size >> PAGE_SHIFT; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { @@ -288,35 +293,56 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) return freed; } +struct shrinker_lock_uninterruptible { + bool was_interruptible; + bool unlock; +}; + +static bool +i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, + struct shrinker_lock_uninterruptible *slu, + int timeout_ms) +{ + unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; + + while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { + schedule_timeout_killable(1); + if (fatal_signal_pending(current)) + return false; + if (--timeout == 0) { + pr_err("Unable to lock GPU to purge memory.\n"); + return false; + } + } + + slu->was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + return true; +} + +static void +i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv, + struct shrinker_lock_uninterruptible *slu) +{ + dev_priv->mm.interruptible = slu->was_interruptible; + if (slu->unlock) + mutex_unlock(&dev_priv->dev->struct_mutex); +} + static int i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) { struct drm_i915_private *dev_priv = container_of(nb, struct drm_i915_private, mm.oom_notifier); - struct drm_device *dev = dev_priv->dev; + struct shrinker_lock_uninterruptible slu; struct drm_i915_gem_object *obj; - unsigned long timeout = msecs_to_jiffies(5000) + 1; unsigned long pinned, bound, unbound, freed_pages; - bool was_interruptible; - bool unlock; - while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { - schedule_timeout_killable(1); - if (fatal_signal_pending(current)) - return NOTIFY_DONE; - } - if (timeout == 0) { - pr_err("Unable to purge GPU memory due lock contention.\n"); + if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) return NOTIFY_DONE; - } - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; freed_pages = i915_gem_shrink_all(dev_priv); - dev_priv->mm.interruptible = was_interruptible; - /* Because we may be allocating inside our own driver, we cannot * assert that there are no objects with pinned pages that are not * being pointed to by hardware. @@ -341,8 +367,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) bound += obj->base.size; } - if (unlock) - mutex_unlock(&dev->struct_mutex); + i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); if (freed_pages || unbound || bound) pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", @@ -356,6 +381,29 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) return NOTIFY_DONE; } +static int +i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct drm_i915_private *dev_priv = + container_of(nb, struct drm_i915_private, mm.vmap_notifier); + struct shrinker_lock_uninterruptible slu; + unsigned long freed_pages; + + if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) + return NOTIFY_DONE; + + freed_pages = i915_gem_shrink(dev_priv, -1UL, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_ACTIVE | + I915_SHRINK_VMAPS); + + i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); + + *(unsigned long *)ptr += freed_pages; + return NOTIFY_DONE; +} + /** * i915_gem_shrinker_init - Initialize i915 shrinker * @dev_priv: i915 device @@ -371,6 +419,9 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; WARN_ON(register_oom_notifier(&dev_priv->mm.oom_notifier)); + + dev_priv->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap; + WARN_ON(register_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); } /** @@ -381,6 +432,7 @@ void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) */ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv) { + WARN_ON(unregister_vmap_purge_notifier(&dev_priv->mm.vmap_notifier)); WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); unregister_shrinker(&dev_priv->mm.shrinker); } diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 2e6e9fb6f80d..ea06da012d32 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -72,9 +72,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, unsigned alignment) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; + return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, - alignment, 0, - dev_priv->gtt.stolen_usable_size); + alignment, 0, + ggtt->stolen_usable_size); } void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, @@ -87,7 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, static unsigned long i915_stolen_to_physical(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct resource *r; u32 base; @@ -134,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) I85X_DRB3, &tmp); tom = tmp * MB(32); - base = tom - tseg_size - dev_priv->gtt.stolen_size; + base = tom - tseg_size - ggtt->stolen_size; } else if (IS_845G(dev)) { u32 tseg_size = 0; u32 tom; @@ -158,7 +161,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) I830_DRB3, &tmp); tom = tmp * MB(32); - base = tom - tseg_size - dev_priv->gtt.stolen_size; + base = tom - tseg_size - ggtt->stolen_size; } else if (IS_I830(dev)) { u32 tseg_size = 0; u32 tom; @@ -178,7 +181,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) I830_DRB3, &tmp); tom = tmp * MB(32); - base = tom - tseg_size - dev_priv->gtt.stolen_size; + base = tom - tseg_size - ggtt->stolen_size; } if (base == 0) @@ -189,41 +192,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) struct { u32 start, end; } stolen[2] = { - { .start = base, .end = base + dev_priv->gtt.stolen_size, }, - { .start = base, .end = base + dev_priv->gtt.stolen_size, }, + { .start = base, .end = base + ggtt->stolen_size, }, + { .start = base, .end = base + ggtt->stolen_size, }, }; - u64 gtt_start, gtt_end; + u64 ggtt_start, ggtt_end; - gtt_start = I915_READ(PGTBL_CTL); + ggtt_start = I915_READ(PGTBL_CTL); if (IS_GEN4(dev)) - gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | - (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; + ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) | + (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28; else - gtt_start &= PGTBL_ADDRESS_LO_MASK; - gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4; + ggtt_start &= PGTBL_ADDRESS_LO_MASK; + ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4; - if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) - stolen[0].end = gtt_start; - if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) - stolen[1].start = gtt_end; + if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end) + stolen[0].end = ggtt_start; + if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end) + stolen[1].start = ggtt_end; /* pick the larger of the two chunks */ if (stolen[0].end - stolen[0].start > stolen[1].end - stolen[1].start) { base = stolen[0].start; - dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start; + ggtt->stolen_size = stolen[0].end - stolen[0].start; } else { base = stolen[1].start; - dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start; + ggtt->stolen_size = stolen[1].end - stolen[1].start; } if (stolen[0].start != stolen[1].start || stolen[0].end != stolen[1].end) { DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", - (unsigned long long) gtt_start, - (unsigned long long) gtt_end - 1); + (unsigned long long)ggtt_start, + (unsigned long long)ggtt_end - 1); DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", - base, base + (u32) dev_priv->gtt.stolen_size - 1); + base, base + (u32)ggtt->stolen_size - 1); } } @@ -233,7 +236,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) * kernel. So if the region is already marked as busy, something * is seriously wrong. */ - r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, + r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size, "Graphics Stolen Memory"); if (r == NULL) { /* @@ -245,7 +248,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) * reservation starting from 1 instead of 0. */ r = devm_request_mem_region(dev->dev, base + 1, - dev_priv->gtt.stolen_size - 1, + ggtt->stolen_size - 1, "Graphics Stolen Memory"); /* * GEN3 firmware likes to smash pci bridges into the stolen @@ -253,7 +256,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) */ if (r == NULL && !IS_GEN3(dev)) { DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", - base, base + (uint32_t)dev_priv->gtt.stolen_size); + base, base + (uint32_t)ggtt->stolen_size); base = 0; } } @@ -274,11 +277,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, unsigned long *base, unsigned long *size) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? CTG_STOLEN_RESERVED : ELK_STOLEN_RESERVED); unsigned long stolen_top = dev_priv->mm.stolen_base + - dev_priv->gtt.stolen_size; + ggtt->stolen_size; *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; @@ -369,10 +373,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, unsigned long *base, unsigned long *size) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); unsigned long stolen_top; - stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; + stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; @@ -388,7 +393,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, int i915_gem_init_stolen(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long reserved_total, reserved_base = 0, reserved_size; unsigned long stolen_top; @@ -401,14 +407,14 @@ int i915_gem_init_stolen(struct drm_device *dev) } #endif - if (dev_priv->gtt.stolen_size == 0) + if (ggtt->stolen_size == 0) return 0; dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); if (dev_priv->mm.stolen_base == 0) return 0; - stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size; + stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size; switch (INTEL_INFO(dev_priv)->gen) { case 2: @@ -458,19 +464,18 @@ int i915_gem_init_stolen(struct drm_device *dev) return 0; } - dev_priv->gtt.stolen_reserved_base = reserved_base; - dev_priv->gtt.stolen_reserved_size = reserved_size; + ggtt->stolen_reserved_base = reserved_base; + ggtt->stolen_reserved_size = reserved_size; /* It is possible for the reserved area to end before the end of stolen * memory, so just consider the start. */ reserved_total = stolen_top - reserved_base; DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", - dev_priv->gtt.stolen_size >> 10, - (dev_priv->gtt.stolen_size - reserved_total) >> 10); + ggtt->stolen_size >> 10, + (ggtt->stolen_size - reserved_total) >> 10); - dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size - - reserved_total; + ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total; /* * Basic memrange allocator for stolen space. @@ -483,7 +488,7 @@ int i915_gem_init_stolen(struct drm_device *dev) * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon * problem later. */ - drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); + drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size); return 0; } @@ -492,12 +497,13 @@ static struct sg_table * i915_pages_create_for_stolen(struct drm_device *dev, u32 offset, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct sg_table *st; struct scatterlist *sg; DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); - BUG_ON(offset > dev_priv->gtt.stolen_size - size); + BUG_ON(offset > ggtt->stolen_size - size); /* We hide that we have no struct page backing our stolen object * by wrapping the contiguous physical allocation with a fake @@ -628,8 +634,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 gtt_offset, u32 size) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct i915_address_space *ggtt = &dev_priv->gtt.base; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; struct i915_vma *vma; @@ -675,7 +681,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, if (gtt_offset == I915_GTT_OFFSET_NONE) return obj; - vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); + vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; @@ -688,8 +694,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, */ vma->node.start = gtt_offset; vma->node.size = size; - if (drm_mm_initialized(&ggtt->mm)) { - ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); + if (drm_mm_initialized(&ggtt->base.mm)) { + ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node); if (ret) { DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); goto err; @@ -697,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, vma->bound |= GLOBAL_BIND; __i915_vma_set_map_and_fenceable(vma); - list_add_tail(&vma->vm_link, &ggtt->inactive_list); + list_add_tail(&vma->vm_link, &ggtt->base.inactive_list); } list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 6be40f3ba2c7..bebaf75d5348 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -34,7 +34,7 @@ struct i915_mm_struct { struct mm_struct *mm; - struct drm_device *dev; + struct drm_i915_private *i915; struct i915_mmu_notifier *mn; struct hlist_node node; struct kref kref; @@ -49,6 +49,7 @@ struct i915_mmu_notifier { struct hlist_node node; struct mmu_notifier mn; struct rb_root objects; + struct workqueue_struct *wq; }; struct i915_mmu_object { @@ -60,6 +61,40 @@ struct i915_mmu_object { bool attached; }; +static void wait_rendering(struct drm_i915_gem_object *obj) +{ + struct drm_device *dev = obj->base.dev; + struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; + unsigned reset_counter; + int i, n; + + if (!obj->active) + return; + + n = 0; + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct drm_i915_gem_request *req; + + req = obj->last_read_req[i]; + if (req == NULL) + continue; + + requests[n++] = i915_gem_request_reference(req); + } + + reset_counter = atomic_read(&to_i915(dev)->gpu_error.reset_counter); + mutex_unlock(&dev->struct_mutex); + + for (i = 0; i < n; i++) + __i915_wait_request(requests[i], reset_counter, false, + NULL, NULL); + + mutex_lock(&dev->struct_mutex); + + for (i = 0; i < n; i++) + i915_gem_request_unreference(requests[i]); +} + static void cancel_userptr(struct work_struct *work) { struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); @@ -75,6 +110,8 @@ static void cancel_userptr(struct work_struct *work) struct i915_vma *vma, *tmp; bool was_interruptible; + wait_rendering(obj); + was_interruptible = dev_priv->mm.interruptible; dev_priv->mm.interruptible = false; @@ -140,7 +177,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, */ mo = container_of(it, struct i915_mmu_object, it); if (kref_get_unless_zero(&mo->obj->base.refcount)) - schedule_work(&mo->work); + queue_work(mn->wq, &mo->work); list_add(&mo->link, &cancelled); it = interval_tree_iter_next(it, start, end); @@ -148,6 +185,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, list_for_each_entry(mo, &cancelled, link) del_object(mo); spin_unlock(&mn->lock); + + flush_workqueue(mn->wq); } static const struct mmu_notifier_ops i915_gem_userptr_notifier = { @@ -167,10 +206,16 @@ i915_mmu_notifier_create(struct mm_struct *mm) spin_lock_init(&mn->lock); mn->mn.ops = &i915_gem_userptr_notifier; mn->objects = RB_ROOT; + mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0); + if (mn->wq == NULL) { + kfree(mn); + return ERR_PTR(-ENOMEM); + } /* Protected by mmap_sem (write-lock) */ ret = __mmu_notifier_register(&mn->mn, mm); if (ret) { + destroy_workqueue(mn->wq); kfree(mn); return ERR_PTR(ret); } @@ -205,13 +250,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm) return mn; down_write(&mm->mm->mmap_sem); - mutex_lock(&to_i915(mm->dev)->mm_lock); + mutex_lock(&mm->i915->mm_lock); if ((mn = mm->mn) == NULL) { mn = i915_mmu_notifier_create(mm->mm); if (!IS_ERR(mn)) mm->mn = mn; } - mutex_unlock(&to_i915(mm->dev)->mm_lock); + mutex_unlock(&mm->i915->mm_lock); up_write(&mm->mm->mmap_sem); return mn; @@ -256,6 +301,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn, return; mmu_notifier_unregister(&mn->mn, mm); + destroy_workqueue(mn->wq); kfree(mn); } @@ -327,7 +373,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) } kref_init(&mm->kref); - mm->dev = obj->base.dev; + mm->i915 = to_i915(obj->base.dev); mm->mm = current->mm; atomic_inc(¤t->mm->mm_count); @@ -362,7 +408,7 @@ __i915_mm_struct_free(struct kref *kref) /* Protected by dev_priv->mm_lock */ hash_del(&mm->node); - mutex_unlock(&to_i915(mm->dev)->mm_lock); + mutex_unlock(&mm->i915->mm_lock); INIT_WORK(&mm->work, __i915_mm_struct_free__worker); schedule_work(&mm->work); @@ -494,26 +540,28 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ret = -ENOMEM; pinned = 0; - pvec = kmalloc(npages*sizeof(struct page *), - GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); - if (pvec == NULL) - pvec = drm_malloc_ab(npages, sizeof(struct page *)); + pvec = drm_malloc_gfp(npages, sizeof(struct page *), GFP_TEMPORARY); if (pvec != NULL) { struct mm_struct *mm = obj->userptr.mm->mm; - down_read(&mm->mmap_sem); - while (pinned < npages) { - ret = get_user_pages_remote(work->task, mm, - obj->userptr.ptr + pinned * PAGE_SIZE, - npages - pinned, - !obj->userptr.read_only, 0, - pvec + pinned, NULL); - if (ret < 0) - break; - - pinned += ret; + ret = -EFAULT; + if (atomic_inc_not_zero(&mm->mm_users)) { + down_read(&mm->mmap_sem); + while (pinned < npages) { + ret = get_user_pages_remote + (work->task, mm, + obj->userptr.ptr + pinned * PAGE_SIZE, + npages - pinned, + !obj->userptr.read_only, 0, + pvec + pinned, NULL); + if (ret < 0) + break; + + pinned += ret; + } + up_read(&mm->mmap_sem); + mmput(mm); } - up_read(&mm->mmap_sem); } mutex_lock(&dev->struct_mutex); @@ -634,14 +682,11 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) pvec = NULL; pinned = 0; if (obj->userptr.mm->mm == current->mm) { - pvec = kmalloc(num_pages*sizeof(struct page *), - GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); + pvec = drm_malloc_gfp(num_pages, sizeof(struct page *), + GFP_TEMPORARY); if (pvec == NULL) { - pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); - if (pvec == NULL) { - __i915_gem_userptr_set_active(obj, false); - return -ENOMEM; - } + __i915_gem_userptr_set_active(obj, false); + return -ENOMEM; } pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, @@ -683,7 +728,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) set_page_dirty(page); mark_page_accessed(page); - page_cache_release(page); + put_page(page); } obj->dirty = 0; @@ -758,6 +803,13 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file int ret; u32 handle; + if (!HAS_LLC(dev) && !HAS_SNOOP(dev)) { + /* We cannot support coherent userptr objects on hw without + * LLC and broken snooping. + */ + return -ENODEV; + } + if (args->flags & ~(I915_USERPTR_READ_ONLY | I915_USERPTR_UNSYNCHRONIZED)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 831895b8cb75..89725c9efc25 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -198,7 +198,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m, err->size, err->read_domains, err->write_domain); - for (i = 0; i < I915_NUM_RINGS; i++) + for (i = 0; i < I915_NUM_ENGINES; i++) err_printf(m, "%02x ", err->rseqno[i]); err_printf(m, "] %02x", err->wseqno); @@ -230,8 +230,6 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a) return "wait"; case HANGCHECK_ACTIVE: return "active"; - case HANGCHECK_ACTIVE_LOOP: - return "active (loop)"; case HANGCHECK_KICK: return "kick"; case HANGCHECK_HUNG: @@ -298,6 +296,7 @@ static void i915_ring_error_state(struct drm_i915_error_state_buf *m, } } err_printf(m, " seqno: 0x%08x\n", ring->seqno); + err_printf(m, " last_seqno: 0x%08x\n", ring->last_seqno); err_printf(m, " waiting: %s\n", yesno(ring->waiting)); err_printf(m, " ring->head: 0x%08x\n", ring->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ring->cpu_ring_tail); @@ -433,7 +432,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, for (i = 0; i < ARRAY_SIZE(error->ring); i++) { obj = error->ring[i].batchbuffer; if (obj) { - err_puts(m, dev_priv->ring[i].name); + err_puts(m, dev_priv->engine[i].name); if (error->ring[i].pid != -1) err_printf(m, " (submitted by %s [%d])", error->ring[i].comm, @@ -447,14 +446,14 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, obj = error->ring[i].wa_batchbuffer; if (obj) { err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n", - dev_priv->ring[i].name, + dev_priv->engine[i].name, lower_32_bits(obj->gtt_offset)); print_error_obj(m, obj); } if (error->ring[i].num_requests) { err_printf(m, "%s --- %d requests\n", - dev_priv->ring[i].name, + dev_priv->engine[i].name, error->ring[i].num_requests); for (j = 0; j < error->ring[i].num_requests; j++) { err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n", @@ -466,7 +465,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if ((obj = error->ring[i].ringbuffer)) { err_printf(m, "%s --- ringbuffer = 0x%08x\n", - dev_priv->ring[i].name, + dev_priv->engine[i].name, lower_32_bits(obj->gtt_offset)); print_error_obj(m, obj); } @@ -480,7 +479,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, hws_page = &obj->pages[LRC_PPHWSP_PN][0]; } err_printf(m, "%s --- HW Status = 0x%08llx\n", - dev_priv->ring[i].name, hws_offset); + dev_priv->engine[i].name, hws_offset); offset = 0; for (elt = 0; elt < PAGE_SIZE/16; elt += 4) { err_printf(m, "[%04x] %08x %08x %08x %08x\n", @@ -493,9 +492,31 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, } } + obj = error->ring[i].wa_ctx; + if (obj) { + u64 wa_ctx_offset = obj->gtt_offset; + u32 *wa_ctx_page = &obj->pages[0][0]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; + u32 wa_ctx_size = (engine->wa_ctx.indirect_ctx.size + + engine->wa_ctx.per_ctx.size); + + err_printf(m, "%s --- WA ctx batch buffer = 0x%08llx\n", + dev_priv->engine[i].name, wa_ctx_offset); + offset = 0; + for (elt = 0; elt < wa_ctx_size; elt += 4) { + err_printf(m, "[%04x] %08x %08x %08x %08x\n", + offset, + wa_ctx_page[elt + 0], + wa_ctx_page[elt + 1], + wa_ctx_page[elt + 2], + wa_ctx_page[elt + 3]); + offset += 16; + } + } + if ((obj = error->ring[i].ctx)) { err_printf(m, "%s --- HW Context = 0x%08x\n", - dev_priv->ring[i].name, + dev_priv->engine[i].name, lower_32_bits(obj->gtt_offset)); print_error_obj(m, obj); } @@ -585,6 +606,7 @@ static void i915_error_state_free(struct kref *error_ref) i915_error_object_free(error->ring[i].hws_page); i915_error_object_free(error->ring[i].ctx); kfree(error->ring[i].requests); + i915_error_object_free(error->ring[i].wa_ctx); } i915_error_object_free(error->semaphore_obj); @@ -606,6 +628,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, struct drm_i915_gem_object *src, struct i915_address_space *vm) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_error_object *dst; struct i915_vma *vma = NULL; int num_pages; @@ -632,7 +655,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, vma = i915_gem_obj_to_ggtt(src); use_ggtt = (src->cache_level == I915_CACHE_NONE && vma && (vma->bound & GLOBAL_BIND) && - reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end); + reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end); /* Cannot access stolen address directly, try to use the aperture */ if (src->stolen) { @@ -642,12 +665,13 @@ i915_error_object_create(struct drm_i915_private *dev_priv, goto unwind; reloc_offset = i915_gem_obj_ggtt_offset(src); - if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end) + if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end) goto unwind; } /* Cannot access snooped pages through the aperture */ - if (use_ggtt && src->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv->dev)) + if (use_ggtt && src->cache_level != I915_CACHE_NONE && + !HAS_LLC(dev_priv)) goto unwind; dst->page_count = num_pages; @@ -668,7 +692,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, * captures what the GPU read. */ - s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + s = io_mapping_map_atomic_wc(ggtt->mappable, reloc_offset); memcpy_fromio(d, s, PAGE_SIZE); io_mapping_unmap_atomic(s); @@ -701,7 +725,7 @@ unwind: return NULL; } #define i915_error_ggtt_object_create(dev_priv, src) \ - i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base) + i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base) static void capture_bo(struct drm_i915_error_buffer *err, struct i915_vma *vma) @@ -711,7 +735,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->size = obj->base.size; err->name = obj->base.name; - for (i = 0; i < I915_NUM_RINGS; i++) + for (i = 0; i < I915_NUM_ENGINES; i++) err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]); err->wseqno = i915_gem_request_get_seqno(obj->last_write_req); err->gtt_offset = vma->node.start; @@ -726,7 +750,7 @@ static void capture_bo(struct drm_i915_error_buffer *err, err->purgeable = obj->madv != I915_MADV_WILLNEED; err->userptr = obj->userptr.mm != NULL; err->ring = obj->last_write_req ? - i915_gem_request_get_ring(obj->last_write_req)->id : -1; + i915_gem_request_get_engine(obj->last_write_req)->id : -1; err->cache_level = obj->cache_level; } @@ -788,7 +812,7 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv, * synchronization commands which almost always appear in the case * strictly a client bug. Use instdone to differentiate those some. */ - for (i = 0; i < I915_NUM_RINGS; i++) { + for (i = 0; i < I915_NUM_ENGINES; i++) { if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) { if (ring_id) *ring_id = i; @@ -821,11 +845,11 @@ static void i915_gem_record_fences(struct drm_device *dev, static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, struct drm_i915_error_state *error, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { struct intel_engine_cs *to; - int i; + enum intel_engine_id id; if (!i915_semaphore_is_enabled(dev_priv->dev)) return; @@ -835,68 +859,69 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv, i915_error_ggtt_object_create(dev_priv, dev_priv->semaphore_obj); - for_each_ring(to, dev_priv, i) { + for_each_engine_id(to, dev_priv, id) { int idx; u16 signal_offset; u32 *tmp; - if (ring == to) + if (engine == to) continue; - signal_offset = (GEN8_SIGNAL_OFFSET(ring, i) & (PAGE_SIZE - 1)) + signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4; tmp = error->semaphore_obj->pages[0]; - idx = intel_ring_sync_index(ring, to); + idx = intel_ring_sync_index(engine, to); ering->semaphore_mboxes[idx] = tmp[signal_offset]; - ering->semaphore_seqno[idx] = ring->semaphore.sync_seqno[idx]; + ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx]; } } static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { - ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(ring->mmio_base)); - ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(ring->mmio_base)); - ering->semaphore_seqno[0] = ring->semaphore.sync_seqno[0]; - ering->semaphore_seqno[1] = ring->semaphore.sync_seqno[1]; + ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base)); + ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base)); + ering->semaphore_seqno[0] = engine->semaphore.sync_seqno[0]; + ering->semaphore_seqno[1] = engine->semaphore.sync_seqno[1]; - if (HAS_VEBOX(dev_priv->dev)) { + if (HAS_VEBOX(dev_priv)) { ering->semaphore_mboxes[2] = - I915_READ(RING_SYNC_2(ring->mmio_base)); - ering->semaphore_seqno[2] = ring->semaphore.sync_seqno[2]; + I915_READ(RING_SYNC_2(engine->mmio_base)); + ering->semaphore_seqno[2] = engine->semaphore.sync_seqno[2]; } } static void i915_record_ring_state(struct drm_device *dev, struct drm_i915_error_state *error, - struct intel_engine_cs *ring, + struct intel_engine_cs *engine, struct drm_i915_error_ring *ering) { struct drm_i915_private *dev_priv = dev->dev_private; if (INTEL_INFO(dev)->gen >= 6) { - ering->rc_psmi = I915_READ(RING_PSMI_CTL(ring->mmio_base)); - ering->fault_reg = I915_READ(RING_FAULT_REG(ring)); + ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); + ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); if (INTEL_INFO(dev)->gen >= 8) - gen8_record_semaphore_state(dev_priv, error, ring, ering); + gen8_record_semaphore_state(dev_priv, error, engine, + ering); else - gen6_record_semaphore_state(dev_priv, ring, ering); + gen6_record_semaphore_state(dev_priv, engine, ering); } if (INTEL_INFO(dev)->gen >= 4) { - ering->faddr = I915_READ(RING_DMA_FADD(ring->mmio_base)); - ering->ipeir = I915_READ(RING_IPEIR(ring->mmio_base)); - ering->ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); - ering->instdone = I915_READ(RING_INSTDONE(ring->mmio_base)); - ering->instps = I915_READ(RING_INSTPS(ring->mmio_base)); - ering->bbaddr = I915_READ(RING_BBADDR(ring->mmio_base)); + ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); + ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); + ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); + ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); + ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); + ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); if (INTEL_INFO(dev)->gen >= 8) { - ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(ring->mmio_base)) << 32; - ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(ring->mmio_base)) << 32; + ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; + ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; } - ering->bbstate = I915_READ(RING_BBSTATE(ring->mmio_base)); + ering->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base)); } else { ering->faddr = I915_READ(DMA_FADD_I8XX); ering->ipeir = I915_READ(IPEIR); @@ -904,20 +929,21 @@ static void i915_record_ring_state(struct drm_device *dev, ering->instdone = I915_READ(GEN2_INSTDONE); } - ering->waiting = waitqueue_active(&ring->irq_queue); - ering->instpm = I915_READ(RING_INSTPM(ring->mmio_base)); - ering->seqno = ring->get_seqno(ring, false); - ering->acthd = intel_ring_get_active_head(ring); - ering->start = I915_READ_START(ring); - ering->head = I915_READ_HEAD(ring); - ering->tail = I915_READ_TAIL(ring); - ering->ctl = I915_READ_CTL(ring); + ering->waiting = waitqueue_active(&engine->irq_queue); + ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); + ering->acthd = intel_ring_get_active_head(engine); + ering->seqno = engine->get_seqno(engine); + ering->last_seqno = engine->last_submitted_seqno; + ering->start = I915_READ_START(engine); + ering->head = I915_READ_HEAD(engine); + ering->tail = I915_READ_TAIL(engine); + ering->ctl = I915_READ_CTL(engine); if (I915_NEED_GFX_HWS(dev)) { i915_reg_t mmio; if (IS_GEN7(dev)) { - switch (ring->id) { + switch (engine->id) { default: case RCS: mmio = RENDER_HWS_PGA_GEN7; @@ -932,51 +958,51 @@ static void i915_record_ring_state(struct drm_device *dev, mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(ring->dev)) { - mmio = RING_HWS_PGA_GEN6(ring->mmio_base); + } else if (IS_GEN6(engine->dev)) { + mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ - mmio = RING_HWS_PGA(ring->mmio_base); + mmio = RING_HWS_PGA(engine->mmio_base); } ering->hws = I915_READ(mmio); } - ering->hangcheck_score = ring->hangcheck.score; - ering->hangcheck_action = ring->hangcheck.action; + ering->hangcheck_score = engine->hangcheck.score; + ering->hangcheck_action = engine->hangcheck.action; if (USES_PPGTT(dev)) { int i; - ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); + ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); if (IS_GEN6(dev)) ering->vm_info.pp_dir_base = - I915_READ(RING_PP_DIR_BASE_READ(ring)); + I915_READ(RING_PP_DIR_BASE_READ(engine)); else if (IS_GEN7(dev)) ering->vm_info.pp_dir_base = - I915_READ(RING_PP_DIR_BASE(ring)); + I915_READ(RING_PP_DIR_BASE(engine)); else if (INTEL_INFO(dev)->gen >= 8) for (i = 0; i < 4; i++) { ering->vm_info.pdp[i] = - I915_READ(GEN8_RING_PDP_UDW(ring, i)); + I915_READ(GEN8_RING_PDP_UDW(engine, i)); ering->vm_info.pdp[i] <<= 32; ering->vm_info.pdp[i] |= - I915_READ(GEN8_RING_PDP_LDW(ring, i)); + I915_READ(GEN8_RING_PDP_LDW(engine, i)); } } } -static void i915_gem_record_active_context(struct intel_engine_cs *ring, +static void i915_gem_record_active_context(struct intel_engine_cs *engine, struct drm_i915_error_state *error, struct drm_i915_error_ring *ering) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; struct drm_i915_gem_object *obj; /* Currently render ring is the only HW context user */ - if (ring->id != RCS || !error->ccid) + if (engine->id != RCS || !error->ccid) return; list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { @@ -993,30 +1019,31 @@ static void i915_gem_record_active_context(struct intel_engine_cs *ring, static void i915_gem_record_rings(struct drm_device *dev, struct drm_i915_error_state *error) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_request *request; int i, count; - for (i = 0; i < I915_NUM_RINGS; i++) { - struct intel_engine_cs *ring = &dev_priv->ring[i]; + for (i = 0; i < I915_NUM_ENGINES; i++) { + struct intel_engine_cs *engine = &dev_priv->engine[i]; struct intel_ringbuffer *rbuf; error->ring[i].pid = -1; - if (ring->dev == NULL) + if (engine->dev == NULL) continue; error->ring[i].valid = true; - i915_record_ring_state(dev, error, ring, &error->ring[i]); + i915_record_ring_state(dev, error, engine, &error->ring[i]); - request = i915_gem_find_active_request(ring); + request = i915_gem_find_active_request(engine); if (request) { struct i915_address_space *vm; vm = request->ctx && request->ctx->ppgtt ? &request->ctx->ppgtt->base : - &dev_priv->gtt.base; + &ggtt->base; /* We need to copy these to an anonymous buffer * as the simplest method to avoid being overwritten @@ -1027,10 +1054,10 @@ static void i915_gem_record_rings(struct drm_device *dev, request->batch_obj, vm); - if (HAS_BROKEN_CS_TLB(dev_priv->dev)) + if (HAS_BROKEN_CS_TLB(dev_priv)) error->ring[i].wa_batchbuffer = i915_error_ggtt_object_create(dev_priv, - ring->scratch.obj); + engine->scratch.obj); if (request->pid) { struct task_struct *task; @@ -1052,11 +1079,11 @@ static void i915_gem_record_rings(struct drm_device *dev, * executed). */ if (request) - rbuf = request->ctx->engine[ring->id].ringbuf; + rbuf = request->ctx->engine[engine->id].ringbuf; else - rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf; + rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf; } else - rbuf = ring->buffer; + rbuf = engine->buffer; error->ring[i].cpu_ring_head = rbuf->head; error->ring[i].cpu_ring_tail = rbuf->tail; @@ -1065,12 +1092,19 @@ static void i915_gem_record_rings(struct drm_device *dev, i915_error_ggtt_object_create(dev_priv, rbuf->obj); error->ring[i].hws_page = - i915_error_ggtt_object_create(dev_priv, ring->status_page.obj); + i915_error_ggtt_object_create(dev_priv, + engine->status_page.obj); + + if (engine->wa_ctx.obj) { + error->ring[i].wa_ctx = + i915_error_ggtt_object_create(dev_priv, + engine->wa_ctx.obj); + } - i915_gem_record_active_context(ring, error, &error->ring[i]); + i915_gem_record_active_context(engine, error, &error->ring[i]); count = 0; - list_for_each_entry(request, &ring->request_list, list) + list_for_each_entry(request, &engine->request_list, list) count++; error->ring[i].num_requests = count; @@ -1083,7 +1117,7 @@ static void i915_gem_record_rings(struct drm_device *dev, } count = 0; - list_for_each_entry(request, &ring->request_list, list) { + list_for_each_entry(request, &engine->request_list, list) { struct drm_i915_error_request *erq; if (count >= error->ring[i].num_requests) { @@ -1272,7 +1306,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, static void i915_error_capture_msg(struct drm_device *dev, struct drm_i915_error_state *error, - bool wedged, + u32 engine_mask, const char *error_msg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1295,7 +1329,7 @@ static void i915_error_capture_msg(struct drm_device *dev, scnprintf(error->error_msg + len, sizeof(error->error_msg) - len, ", reason: %s, action: %s", error_msg, - wedged ? "reset" : "continue"); + engine_mask ? "reset" : "continue"); } static void i915_capture_gen_state(struct drm_i915_private *dev_priv, @@ -1318,7 +1352,7 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv, * out a structure which becomes available in debugfs for user level tools * to pick up. */ -void i915_capture_error_state(struct drm_device *dev, bool wedged, +void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, const char *error_msg) { static bool warned; @@ -1346,7 +1380,7 @@ void i915_capture_error_state(struct drm_device *dev, bool wedged, error->overlay = intel_overlay_capture_error_state(dev); error->display = intel_display_capture_error_state(dev); - i915_error_capture_msg(dev, error, wedged, error_msg); + i915_error_capture_msg(dev, error, engine_mask, error_msg); DRM_INFO("%s\n", error->error_msg); spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h index e4ba5822289b..80786d9f9ad3 100644 --- a/drivers/gpu/drm/i915/i915_guc_reg.h +++ b/drivers/gpu/drm/i915/i915_guc_reg.h @@ -27,9 +27,12 @@ /* Definitions of GuC H/W registers, bits, etc */ #define GUC_STATUS _MMIO(0xc000) +#define GS_RESET_SHIFT 0 +#define GS_MIA_IN_RESET (0x01 << GS_RESET_SHIFT) #define GS_BOOTROM_SHIFT 1 #define GS_BOOTROM_MASK (0x7F << GS_BOOTROM_SHIFT) #define GS_BOOTROM_RSA_FAILED (0x50 << GS_BOOTROM_SHIFT) +#define GS_BOOTROM_JUMP_PASSED (0x76 << GS_BOOTROM_SHIFT) #define GS_UKERNEL_SHIFT 8 #define GS_UKERNEL_MASK (0xFF << GS_UKERNEL_SHIFT) #define GS_UKERNEL_LAPIC_DONE (0x30 << GS_UKERNEL_SHIFT) @@ -37,7 +40,13 @@ #define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) #define GS_MIA_SHIFT 16 #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) -#define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) +#define GS_MIA_CORE_STATE (0x01 << GS_MIA_SHIFT) +#define GS_MIA_HALT_REQUESTED (0x02 << GS_MIA_SHIFT) +#define GS_MIA_ISR_ENTRY (0x04 << GS_MIA_SHIFT) +#define GS_AUTH_STATUS_SHIFT 30 +#define GS_AUTH_STATUS_MASK (0x03 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_BAD (0x01 << GS_AUTH_STATUS_SHIFT) +#define GS_AUTH_STATUS_GOOD (0x02 << GS_AUTH_STATUS_SHIFT) #define SOFT_SCRATCH(n) _MMIO(0xc180 + (n) * 4) #define SOFT_SCRATCH_COUNT 16 diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c index d7543efc8a5e..da86bdbba275 100644 --- a/drivers/gpu/drm/i915/i915_guc_submission.c +++ b/drivers/gpu/drm/i915/i915_guc_submission.c @@ -377,11 +377,11 @@ static void guc_init_ctx_desc(struct intel_guc *guc, struct i915_guc_client *client) { struct drm_i915_private *dev_priv = guc_to_i915(guc); - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct intel_context *ctx = client->owner; struct guc_context_desc desc; struct sg_table *sg; - int i; + enum intel_engine_id id; memset(&desc, 0, sizeof(desc)); @@ -390,8 +390,8 @@ static void guc_init_ctx_desc(struct intel_guc *guc, desc.priority = client->priority; desc.db_id = client->doorbell_id; - for_each_ring(ring, dev_priv, i) { - struct guc_execlist_context *lrc = &desc.lrc[ring->guc_id]; + for_each_engine_id(engine, dev_priv, id) { + struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; struct drm_i915_gem_object *obj; uint64_t ctx_desc; @@ -402,27 +402,27 @@ static void guc_init_ctx_desc(struct intel_guc *guc, * for now who owns a GuC client. But for future owner of GuC * client, need to make sure lrc is pinned prior to enter here. */ - obj = ctx->engine[i].state; + obj = ctx->engine[id].state; if (!obj) break; /* XXX: continue? */ - ctx_desc = intel_lr_context_descriptor(ctx, ring); + ctx_desc = intel_lr_context_descriptor(ctx, engine); lrc->context_desc = (u32)ctx_desc; /* The state page is after PPHWSP */ lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) + LRC_STATE_PN * PAGE_SIZE; lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | - (ring->guc_id << GUC_ELC_ENGINE_OFFSET); + (engine->guc_id << GUC_ELC_ENGINE_OFFSET); - obj = ctx->engine[i].ringbuf->obj; + obj = ctx->engine[id].ringbuf->obj; lrc->ring_begin = i915_gem_obj_ggtt_offset(obj); lrc->ring_end = lrc->ring_begin + obj->base.size - 1; lrc->ring_next_free_location = lrc->ring_begin; lrc->ring_current_tail_pointer_value = 0; - desc.engines_used |= (1 << ring->guc_id); + desc.engines_used |= (1 << engine->guc_id); } WARN_ON(desc.engines_used == 0); @@ -542,11 +542,12 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc, wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1; wqi->header = WQ_TYPE_INORDER | (wq_len << WQ_LEN_SHIFT) | - (rq->ring->guc_id << WQ_TARGET_SHIFT) | + (rq->engine->guc_id << WQ_TARGET_SHIFT) | WQ_NO_WCFLUSH_WAIT; /* The GuC wants only the low-order word of the context descriptor */ - wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring); + wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, + rq->engine); /* The GuC firmware wants the tail index in QWords, not bytes */ tail = rq->ringbuf->tail >> 3; @@ -569,7 +570,7 @@ int i915_guc_submit(struct i915_guc_client *client, struct drm_i915_gem_request *rq) { struct intel_guc *guc = client->guc; - unsigned int engine_id = rq->ring->guc_id; + unsigned int engine_id = rq->engine->guc_id; int q_ret, b_ret; q_ret = guc_add_workqueue_item(client, rq); @@ -839,9 +840,9 @@ static void guc_create_ads(struct intel_guc *guc) struct guc_ads *ads; struct guc_policies *policies; struct guc_mmio_reg_state *reg_state; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct page *page; - u32 size, i; + u32 size; /* The ads obj includes the struct itself and buffers passed to GuC */ size = sizeof(struct guc_ads) + sizeof(struct guc_policies) + @@ -867,11 +868,11 @@ static void guc_create_ads(struct intel_guc *guc) * so its address won't change after we've told the GuC where * to find it. */ - ring = &dev_priv->ring[RCS]; - ads->golden_context_lrca = ring->status_page.gfx_addr; + engine = &dev_priv->engine[RCS]; + ads->golden_context_lrca = engine->status_page.gfx_addr; - for_each_ring(ring, dev_priv, i) - ads->eng_state_size[ring->guc_id] = intel_lr_context_size(ring); + for_each_engine(engine, dev_priv) + ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine); /* GuC scheduling policies */ policies = (void *)ads + sizeof(struct guc_ads); @@ -883,12 +884,12 @@ static void guc_create_ads(struct intel_guc *guc) /* MMIO reg state */ reg_state = (void *)policies + sizeof(struct guc_policies); - for_each_ring(ring, dev_priv, i) { - reg_state->mmio_white_list[ring->guc_id].mmio_start = - ring->mmio_base + GUC_MMIO_WHITE_LIST_START; + for_each_engine(engine, dev_priv) { + reg_state->mmio_white_list[engine->guc_id].mmio_start = + engine->mmio_base + GUC_MMIO_WHITE_LIST_START; /* Nothing to be saved or restored for now. */ - reg_state->mmio_white_list[ring->guc_id].count = 0; + reg_state->mmio_white_list[engine->guc_id].count = 0; } ads->reg_state_addr = ads->scheduler_policies + diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index d1a46ef5ab3f..247d962afabb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -994,14 +994,15 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev) return; } -static void notify_ring(struct intel_engine_cs *ring) +static void notify_ring(struct intel_engine_cs *engine) { - if (!intel_ring_initialized(ring)) + if (!intel_engine_initialized(engine)) return; - trace_i915_gem_request_notify(ring); + trace_i915_gem_request_notify(engine); + engine->user_interrupts++; - wake_up_all(&ring->irq_queue); + wake_up_all(&engine->irq_queue); } static void vlv_c0_read(struct drm_i915_private *dev_priv, @@ -1079,11 +1080,10 @@ static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) static bool any_waiters(struct drm_i915_private *dev_priv) { - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, i) - if (ring->irq_refcount) + for_each_engine(engine, dev_priv) + if (engine->irq_refcount) return true; return false; @@ -1219,7 +1219,7 @@ static void ivybridge_parity_work(struct work_struct *work) i915_reg_t reg; slice--; - if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) + if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) break; dev_priv->l3_parity.which_slice &= ~(1<<slice); @@ -1258,7 +1258,7 @@ static void ivybridge_parity_work(struct work_struct *work) out: WARN_ON(dev_priv->l3_parity.which_slice); spin_lock_irq(&dev_priv->irq_lock); - gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); + gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); spin_unlock_irq(&dev_priv->irq_lock); mutex_unlock(&dev_priv->dev->struct_mutex); @@ -1291,9 +1291,9 @@ static void ilk_gt_irq_handler(struct drm_device *dev, { if (gt_iir & (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) - notify_ring(&dev_priv->ring[RCS]); + notify_ring(&dev_priv->engine[RCS]); if (gt_iir & ILK_BSD_USER_INTERRUPT) - notify_ring(&dev_priv->ring[VCS]); + notify_ring(&dev_priv->engine[VCS]); } static void snb_gt_irq_handler(struct drm_device *dev, @@ -1303,11 +1303,11 @@ static void snb_gt_irq_handler(struct drm_device *dev, if (gt_iir & (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) - notify_ring(&dev_priv->ring[RCS]); + notify_ring(&dev_priv->engine[RCS]); if (gt_iir & GT_BSD_USER_INTERRUPT) - notify_ring(&dev_priv->ring[VCS]); + notify_ring(&dev_priv->engine[VCS]); if (gt_iir & GT_BLT_USER_INTERRUPT) - notify_ring(&dev_priv->ring[BCS]); + notify_ring(&dev_priv->engine[BCS]); if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | GT_BSD_CS_ERROR_INTERRUPT | @@ -1319,12 +1319,12 @@ static void snb_gt_irq_handler(struct drm_device *dev, } static __always_inline void -gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift) +gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) { if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) - notify_ring(ring); + notify_ring(engine); if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) - intel_lrc_irq_handler(ring); + tasklet_schedule(&engine->irq_tasklet); } static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, @@ -1338,11 +1338,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, I915_WRITE_FW(GEN8_GT_IIR(0), iir); ret = IRQ_HANDLED; - gen8_cs_irq_handler(&dev_priv->ring[RCS], - iir, GEN8_RCS_IRQ_SHIFT); + gen8_cs_irq_handler(&dev_priv->engine[RCS], + iir, GEN8_RCS_IRQ_SHIFT); - gen8_cs_irq_handler(&dev_priv->ring[BCS], - iir, GEN8_BCS_IRQ_SHIFT); + gen8_cs_irq_handler(&dev_priv->engine[BCS], + iir, GEN8_BCS_IRQ_SHIFT); } else DRM_ERROR("The master control interrupt lied (GT0)!\n"); } @@ -1353,11 +1353,11 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, I915_WRITE_FW(GEN8_GT_IIR(1), iir); ret = IRQ_HANDLED; - gen8_cs_irq_handler(&dev_priv->ring[VCS], - iir, GEN8_VCS1_IRQ_SHIFT); + gen8_cs_irq_handler(&dev_priv->engine[VCS], + iir, GEN8_VCS1_IRQ_SHIFT); - gen8_cs_irq_handler(&dev_priv->ring[VCS2], - iir, GEN8_VCS2_IRQ_SHIFT); + gen8_cs_irq_handler(&dev_priv->engine[VCS2], + iir, GEN8_VCS2_IRQ_SHIFT); } else DRM_ERROR("The master control interrupt lied (GT1)!\n"); } @@ -1368,8 +1368,8 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, I915_WRITE_FW(GEN8_GT_IIR(3), iir); ret = IRQ_HANDLED; - gen8_cs_irq_handler(&dev_priv->ring[VECS], - iir, GEN8_VECS_IRQ_SHIFT); + gen8_cs_irq_handler(&dev_priv->engine[VECS], + iir, GEN8_VECS_IRQ_SHIFT); } else DRM_ERROR("The master control interrupt lied (GT3)!\n"); } @@ -1627,9 +1627,9 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) if (INTEL_INFO(dev_priv)->gen >= 8) return; - if (HAS_VEBOX(dev_priv->dev)) { + if (HAS_VEBOX(dev_priv)) { if (pm_iir & PM_VEBOX_USER_INTERRUPT) - notify_ring(&dev_priv->ring[VECS]); + notify_ring(&dev_priv->engine[VECS]); if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); @@ -1829,7 +1829,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ disable_rpm_wakeref_asserts(dev_priv); - for (;;) { + do { master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; iir = I915_READ(VLV_IIR); @@ -1857,7 +1857,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); POSTING_READ(GEN8_MASTER_IRQ); - } + } while (0); enable_rpm_wakeref_asserts(dev_priv); @@ -2449,8 +2449,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) static void i915_error_wake_up(struct drm_i915_private *dev_priv, bool reset_completed) { - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; /* * Notify all waiters for GPU completion events that reset state has @@ -2460,8 +2459,8 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv, */ /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ - for_each_ring(ring, dev_priv, i) - wake_up_all(&ring->irq_queue); + for_each_engine(engine, dev_priv) + wake_up_all(&engine->irq_queue); /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ wake_up_all(&dev_priv->pending_flip_queue); @@ -2653,14 +2652,14 @@ static void i915_report_and_clear_eir(struct drm_device *dev) /** * i915_handle_error - handle a gpu error * @dev: drm device - * + * @engine_mask: mask representing engines that are hung * Do some basic checking of register state at error time and * dump it to the syslog. Also call i915_capture_error_state() to make * sure we get a record and make it available in debugfs. Fire a uevent * so userspace knows something bad happened (should trigger collection * of a ring dump etc.). */ -void i915_handle_error(struct drm_device *dev, bool wedged, +void i915_handle_error(struct drm_device *dev, u32 engine_mask, const char *fmt, ...) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2671,10 +2670,10 @@ void i915_handle_error(struct drm_device *dev, bool wedged, vscnprintf(error_msg, sizeof(error_msg), fmt, args); va_end(args); - i915_capture_error_state(dev, wedged, error_msg); + i915_capture_error_state(dev, engine_mask, error_msg); i915_report_and_clear_eir(dev); - if (wedged) { + if (engine_mask) { atomic_or(I915_RESET_IN_PROGRESS_FLAG, &dev_priv->gpu_error.reset_counter); @@ -2805,10 +2804,10 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) } static bool -ring_idle(struct intel_engine_cs *ring, u32 seqno) +ring_idle(struct intel_engine_cs *engine, u32 seqno) { - return (list_empty(&ring->request_list) || - i915_seqno_passed(seqno, ring->last_submitted_seqno)); + return i915_seqno_passed(seqno, + READ_ONCE(engine->last_submitted_seqno)); } static bool @@ -2824,42 +2823,42 @@ ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) } static struct intel_engine_cs * -semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) +semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, + u64 offset) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; struct intel_engine_cs *signaller; - int i; - if (INTEL_INFO(dev_priv->dev)->gen >= 8) { - for_each_ring(signaller, dev_priv, i) { - if (ring == signaller) + if (INTEL_INFO(dev_priv)->gen >= 8) { + for_each_engine(signaller, dev_priv) { + if (engine == signaller) continue; - if (offset == signaller->semaphore.signal_ggtt[ring->id]) + if (offset == signaller->semaphore.signal_ggtt[engine->id]) return signaller; } } else { u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; - for_each_ring(signaller, dev_priv, i) { - if(ring == signaller) + for_each_engine(signaller, dev_priv) { + if(engine == signaller) continue; - if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) + if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) return signaller; } } DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", - ring->id, ipehr, offset); + engine->id, ipehr, offset); return NULL; } static struct intel_engine_cs * -semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) +semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; u32 cmd, ipehr, head; u64 offset = 0; int i, backwards; @@ -2881,11 +2880,11 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) * Therefore, this function does not support execlist mode in its * current form. Just return NULL and move on. */ - if (ring->buffer == NULL) + if (engine->buffer == NULL) return NULL; - ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); - if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) + ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); + if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) return NULL; /* @@ -2896,8 +2895,8 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) * point at at batch, and semaphores are always emitted into the * ringbuffer itself. */ - head = I915_READ_HEAD(ring) & HEAD_ADDR; - backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; + head = I915_READ_HEAD(engine) & HEAD_ADDR; + backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; for (i = backwards; i; --i) { /* @@ -2905,10 +2904,10 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) * our ring is smaller than what the hardware (and hence * HEAD_ADDR) allows. Also handles wrap-around. */ - head &= ring->buffer->size - 1; + head &= engine->buffer->size - 1; /* This here seems to blow up */ - cmd = ioread32(ring->buffer->virtual_start + head); + cmd = ioread32(engine->buffer->virtual_start + head); if (cmd == ipehr) break; @@ -2918,32 +2917,32 @@ semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) if (!i) return NULL; - *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; - if (INTEL_INFO(ring->dev)->gen >= 8) { - offset = ioread32(ring->buffer->virtual_start + head + 12); + *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; + if (INTEL_INFO(engine->dev)->gen >= 8) { + offset = ioread32(engine->buffer->virtual_start + head + 12); offset <<= 32; - offset = ioread32(ring->buffer->virtual_start + head + 8); + offset = ioread32(engine->buffer->virtual_start + head + 8); } - return semaphore_wait_to_signaller_ring(ring, ipehr, offset); + return semaphore_wait_to_signaller_ring(engine, ipehr, offset); } -static int semaphore_passed(struct intel_engine_cs *ring) +static int semaphore_passed(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; struct intel_engine_cs *signaller; u32 seqno; - ring->hangcheck.deadlock++; + engine->hangcheck.deadlock++; - signaller = semaphore_waits_for(ring, &seqno); + signaller = semaphore_waits_for(engine, &seqno); if (signaller == NULL) return -1; /* Prevent pathological recursion due to driver bugs */ - if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) + if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) return -1; - if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) + if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) return 1; /* cursory check for an unkickable deadlock */ @@ -2956,23 +2955,22 @@ static int semaphore_passed(struct intel_engine_cs *ring) static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) { - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, i) - ring->hangcheck.deadlock = 0; + for_each_engine(engine, dev_priv) + engine->hangcheck.deadlock = 0; } -static bool subunits_stuck(struct intel_engine_cs *ring) +static bool subunits_stuck(struct intel_engine_cs *engine) { u32 instdone[I915_NUM_INSTDONE_REG]; bool stuck; int i; - if (ring->id != RCS) + if (engine->id != RCS) return true; - i915_get_extra_instdone(ring->dev, instdone); + i915_get_extra_instdone(engine->dev, instdone); /* There might be unstable subunit states even when * actual head is not moving. Filter out the unstable ones by @@ -2981,49 +2979,44 @@ static bool subunits_stuck(struct intel_engine_cs *ring) */ stuck = true; for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { - const u32 tmp = instdone[i] | ring->hangcheck.instdone[i]; + const u32 tmp = instdone[i] | engine->hangcheck.instdone[i]; - if (tmp != ring->hangcheck.instdone[i]) + if (tmp != engine->hangcheck.instdone[i]) stuck = false; - ring->hangcheck.instdone[i] |= tmp; + engine->hangcheck.instdone[i] |= tmp; } return stuck; } static enum intel_ring_hangcheck_action -head_stuck(struct intel_engine_cs *ring, u64 acthd) +head_stuck(struct intel_engine_cs *engine, u64 acthd) { - if (acthd != ring->hangcheck.acthd) { + if (acthd != engine->hangcheck.acthd) { /* Clear subunit states on head movement */ - memset(ring->hangcheck.instdone, 0, - sizeof(ring->hangcheck.instdone)); + memset(engine->hangcheck.instdone, 0, + sizeof(engine->hangcheck.instdone)); - if (acthd > ring->hangcheck.max_acthd) { - ring->hangcheck.max_acthd = acthd; - return HANGCHECK_ACTIVE; - } - - return HANGCHECK_ACTIVE_LOOP; + return HANGCHECK_ACTIVE; } - if (!subunits_stuck(ring)) + if (!subunits_stuck(engine)) return HANGCHECK_ACTIVE; return HANGCHECK_HUNG; } static enum intel_ring_hangcheck_action -ring_stuck(struct intel_engine_cs *ring, u64 acthd) +ring_stuck(struct intel_engine_cs *engine, u64 acthd) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; enum intel_ring_hangcheck_action ha; u32 tmp; - ha = head_stuck(ring, acthd); + ha = head_stuck(engine, acthd); if (ha != HANGCHECK_HUNG) return ha; @@ -3035,24 +3028,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) * and break the hang. This should work on * all but the second generation chipsets. */ - tmp = I915_READ_CTL(ring); + tmp = I915_READ_CTL(engine); if (tmp & RING_WAIT) { - i915_handle_error(dev, false, + i915_handle_error(dev, 0, "Kicking stuck wait on %s", - ring->name); - I915_WRITE_CTL(ring, tmp); + engine->name); + I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; } if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { - switch (semaphore_passed(ring)) { + switch (semaphore_passed(engine)) { default: return HANGCHECK_HUNG; case 1: - i915_handle_error(dev, false, + i915_handle_error(dev, 0, "Kicking stuck semaphore on %s", - ring->name); - I915_WRITE_CTL(ring, tmp); + engine->name); + I915_WRITE_CTL(engine, tmp); return HANGCHECK_KICK; case 0: return HANGCHECK_WAIT; @@ -3062,6 +3055,24 @@ ring_stuck(struct intel_engine_cs *ring, u64 acthd) return HANGCHECK_HUNG; } +static unsigned kick_waiters(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = to_i915(engine->dev); + unsigned user_interrupts = READ_ONCE(engine->user_interrupts); + + if (engine->hangcheck.user_interrupts == user_interrupts && + !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { + if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) + DRM_ERROR("Hangcheck timer elapsed... %s idle\n", + engine->name); + else + DRM_INFO("Fake missed irq on %s\n", + engine->name); + wake_up_all(&engine->irq_queue); + } + + return user_interrupts; +} /* * This is called when the chip hasn't reported back with completed * batchbuffers in a long time. We keep track per ring seqno progress and @@ -3076,13 +3087,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work) container_of(work, typeof(*dev_priv), gpu_error.hangcheck_work.work); struct drm_device *dev = dev_priv->dev; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; + enum intel_engine_id id; int busy_count = 0, rings_hung = 0; - bool stuck[I915_NUM_RINGS] = { 0 }; + bool stuck[I915_NUM_ENGINES] = { 0 }; #define BUSY 1 #define KICK 5 #define HUNG 20 +#define ACTIVE_DECAY 15 if (!i915.enable_hangcheck) return; @@ -3100,33 +3112,37 @@ static void i915_hangcheck_elapsed(struct work_struct *work) */ intel_uncore_arm_unclaimed_mmio_detection(dev_priv); - for_each_ring(ring, dev_priv, i) { + for_each_engine_id(engine, dev_priv, id) { u64 acthd; u32 seqno; + unsigned user_interrupts; bool busy = true; semaphore_clear_deadlocks(dev_priv); - seqno = ring->get_seqno(ring, false); - acthd = intel_ring_get_active_head(ring); - - if (ring->hangcheck.seqno == seqno) { - if (ring_idle(ring, seqno)) { - ring->hangcheck.action = HANGCHECK_IDLE; - - if (waitqueue_active(&ring->irq_queue)) { - /* Issue a wake-up to catch stuck h/w. */ - if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { - if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) - DRM_ERROR("Hangcheck timer elapsed... %s idle\n", - ring->name); - else - DRM_INFO("Fake missed irq on %s\n", - ring->name); - wake_up_all(&ring->irq_queue); - } + /* We don't strictly need an irq-barrier here, as we are not + * serving an interrupt request, be paranoid in case the + * barrier has side-effects (such as preventing a broken + * cacheline snoop) and so be sure that we can see the seqno + * advance. If the seqno should stick, due to a stale + * cacheline, we would erroneously declare the GPU hung. + */ + if (engine->irq_seqno_barrier) + engine->irq_seqno_barrier(engine); + + acthd = intel_ring_get_active_head(engine); + seqno = engine->get_seqno(engine); + + /* Reset stuck interrupts between batch advances */ + user_interrupts = 0; + + if (engine->hangcheck.seqno == seqno) { + if (ring_idle(engine, seqno)) { + engine->hangcheck.action = HANGCHECK_IDLE; + if (waitqueue_active(&engine->irq_queue)) { /* Safeguard against driver failure */ - ring->hangcheck.score += BUSY; + user_interrupts = kick_waiters(engine); + engine->hangcheck.score += BUSY; } else busy = false; } else { @@ -3145,58 +3161,60 @@ static void i915_hangcheck_elapsed(struct work_struct *work) * being repeatedly kicked and so responsible * for stalling the machine. */ - ring->hangcheck.action = ring_stuck(ring, - acthd); + engine->hangcheck.action = ring_stuck(engine, + acthd); - switch (ring->hangcheck.action) { + switch (engine->hangcheck.action) { case HANGCHECK_IDLE: case HANGCHECK_WAIT: - case HANGCHECK_ACTIVE: break; - case HANGCHECK_ACTIVE_LOOP: - ring->hangcheck.score += BUSY; + case HANGCHECK_ACTIVE: + engine->hangcheck.score += BUSY; break; case HANGCHECK_KICK: - ring->hangcheck.score += KICK; + engine->hangcheck.score += KICK; break; case HANGCHECK_HUNG: - ring->hangcheck.score += HUNG; - stuck[i] = true; + engine->hangcheck.score += HUNG; + stuck[id] = true; break; } } } else { - ring->hangcheck.action = HANGCHECK_ACTIVE; + engine->hangcheck.action = HANGCHECK_ACTIVE; /* Gradually reduce the count so that we catch DoS * attempts across multiple batches. */ - if (ring->hangcheck.score > 0) - ring->hangcheck.score--; + if (engine->hangcheck.score > 0) + engine->hangcheck.score -= ACTIVE_DECAY; + if (engine->hangcheck.score < 0) + engine->hangcheck.score = 0; /* Clear head and subunit states on seqno movement */ - ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; + acthd = 0; - memset(ring->hangcheck.instdone, 0, - sizeof(ring->hangcheck.instdone)); + memset(engine->hangcheck.instdone, 0, + sizeof(engine->hangcheck.instdone)); } - ring->hangcheck.seqno = seqno; - ring->hangcheck.acthd = acthd; + engine->hangcheck.seqno = seqno; + engine->hangcheck.acthd = acthd; + engine->hangcheck.user_interrupts = user_interrupts; busy_count += busy; } - for_each_ring(ring, dev_priv, i) { - if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { + for_each_engine_id(engine, dev_priv, id) { + if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { DRM_INFO("%s on %s\n", - stuck[i] ? "stuck" : "no progress", - ring->name); - rings_hung++; + stuck[id] ? "stuck" : "no progress", + engine->name); + rings_hung |= intel_engine_flag(engine); } } if (rings_hung) { - i915_handle_error(dev, true, "Ring hung"); + i915_handle_error(dev, rings_hung, "Engine(s) hung"); goto out; } @@ -3267,6 +3285,55 @@ static void gen5_gt_irq_reset(struct drm_device *dev) GEN5_IRQ_RESET(GEN6_PM); } +static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) +{ + enum pipe pipe; + + if (IS_CHERRYVIEW(dev_priv)) + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); + else + I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); + + i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); + I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + + for_each_pipe(dev_priv, pipe) { + I915_WRITE(PIPESTAT(pipe), + PIPE_FIFO_UNDERRUN_STATUS | + PIPESTAT_INT_STATUS_MASK); + dev_priv->pipestat_irq_mask[pipe] = 0; + } + + GEN5_IRQ_RESET(VLV_); + dev_priv->irq_mask = ~0; +} + +static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) +{ + u32 pipestat_mask; + u32 enable_mask; + enum pipe pipe; + + pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | + PIPE_CRC_DONE_INTERRUPT_STATUS; + + i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + for_each_pipe(dev_priv, pipe) + i915_enable_pipestat(dev_priv, pipe, pipestat_mask); + + enable_mask = I915_DISPLAY_PORT_INTERRUPT | + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + if (IS_CHERRYVIEW(dev_priv)) + enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; + + WARN_ON(dev_priv->irq_mask != ~0); + + dev_priv->irq_mask = ~enable_mask; + + GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); +} + /* drm_dma.h hooks */ static void ironlake_irq_reset(struct drm_device *dev) @@ -3284,34 +3351,16 @@ static void ironlake_irq_reset(struct drm_device *dev) ibx_irq_reset(dev); } -static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) -{ - enum pipe pipe; - - i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); - - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), 0xffff); - - GEN5_IRQ_RESET(VLV_); -} - static void valleyview_irq_preinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - /* VLV magic */ - I915_WRITE(VLV_IMR, 0); - I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); - I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); - I915_WRITE(RING_IMR(BLT_RING_BASE), 0); - gen5_gt_irq_reset(dev); - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); - - vlv_display_irq_reset(dev_priv); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + vlv_display_irq_reset(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); } static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) @@ -3384,9 +3433,10 @@ static void cherryview_irq_preinstall(struct drm_device *dev) GEN5_IRQ_RESET(GEN8_PCU_); - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); - - vlv_display_irq_reset(dev_priv); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + vlv_display_irq_reset(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); } static u32 intel_hpd_enabled_irqs(struct drm_device *dev, @@ -3506,6 +3556,26 @@ static void bxt_hpd_irq_setup(struct drm_device *dev) hotplug = I915_READ(PCH_PORT_HOTPLUG); hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; + + DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", + hotplug, enabled_irqs); + hotplug &= ~BXT_DDI_HPD_INVERT_MASK; + + /* + * For BXT invert bit has to be set based on AOB design + * for HPD detection logic, update it based on VBT fields. + */ + + if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && + intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) + hotplug |= BXT_DDIA_HPD_INVERT; + if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && + intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) + hotplug |= BXT_DDIB_HPD_INVERT; + if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && + intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) + hotplug |= BXT_DDIC_HPD_INVERT; + I915_WRITE(PCH_PORT_HOTPLUG, hotplug); } @@ -3613,74 +3683,6 @@ static int ironlake_irq_postinstall(struct drm_device *dev) return 0; } -static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) -{ - u32 pipestat_mask; - u32 iir_mask; - enum pipe pipe; - - pipestat_mask = PIPESTAT_INT_STATUS_MASK | - PIPE_FIFO_UNDERRUN_STATUS; - - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), pipestat_mask); - POSTING_READ(PIPESTAT(PIPE_A)); - - pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | - PIPE_CRC_DONE_INTERRUPT_STATUS; - - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(dev_priv, pipe) - i915_enable_pipestat(dev_priv, pipe, pipestat_mask); - - iir_mask = I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; - if (IS_CHERRYVIEW(dev_priv)) - iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; - dev_priv->irq_mask &= ~iir_mask; - - I915_WRITE(VLV_IIR, iir_mask); - I915_WRITE(VLV_IIR, iir_mask); - I915_WRITE(VLV_IER, ~dev_priv->irq_mask); - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - POSTING_READ(VLV_IMR); -} - -static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) -{ - u32 pipestat_mask; - u32 iir_mask; - enum pipe pipe; - - iir_mask = I915_DISPLAY_PORT_INTERRUPT | - I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | - I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; - if (IS_CHERRYVIEW(dev_priv)) - iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; - - dev_priv->irq_mask |= iir_mask; - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - I915_WRITE(VLV_IER, ~dev_priv->irq_mask); - I915_WRITE(VLV_IIR, iir_mask); - I915_WRITE(VLV_IIR, iir_mask); - POSTING_READ(VLV_IIR); - - pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | - PIPE_CRC_DONE_INTERRUPT_STATUS; - - i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(dev_priv, pipe) - i915_disable_pipestat(dev_priv, pipe, pipestat_mask); - - pipestat_mask = PIPESTAT_INT_STATUS_MASK | - PIPE_FIFO_UNDERRUN_STATUS; - - for_each_pipe(dev_priv, pipe) - I915_WRITE(PIPESTAT(pipe), pipestat_mask); - POSTING_READ(PIPESTAT(PIPE_A)); -} - void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) { assert_spin_locked(&dev_priv->irq_lock); @@ -3690,8 +3692,10 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = true; - if (intel_irqs_enabled(dev_priv)) - valleyview_display_irqs_install(dev_priv); + if (intel_irqs_enabled(dev_priv)) { + vlv_display_irq_reset(dev_priv); + vlv_display_irq_postinstall(dev_priv); + } } void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) @@ -3704,43 +3708,20 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) dev_priv->display_irqs_enabled = false; if (intel_irqs_enabled(dev_priv)) - valleyview_display_irqs_uninstall(dev_priv); + vlv_display_irq_reset(dev_priv); } -static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) -{ - dev_priv->irq_mask = ~0; - - i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - POSTING_READ(PORT_HOTPLUG_EN); - - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IIR, 0xffffffff); - I915_WRITE(VLV_IER, ~dev_priv->irq_mask); - I915_WRITE(VLV_IMR, dev_priv->irq_mask); - POSTING_READ(VLV_IMR); - - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked check happy. */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) - valleyview_display_irqs_install(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); -} static int valleyview_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - vlv_display_irq_postinstall(dev_priv); - gen5_gt_irq_postinstall(dev); - /* ack & enable invalid PTE error interrupts */ -#if 0 /* FIXME: add support to irq handler for checking these bits */ - I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); - I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); -#endif + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + vlv_display_irq_postinstall(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); @@ -3842,10 +3823,13 @@ static int cherryview_irq_postinstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - vlv_display_irq_postinstall(dev_priv); - gen8_gt_irq_postinstall(dev_priv); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + vlv_display_irq_postinstall(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); + I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); POSTING_READ(GEN8_MASTER_IRQ); @@ -3862,20 +3846,6 @@ static void gen8_irq_uninstall(struct drm_device *dev) gen8_irq_reset(dev); } -static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) -{ - /* Interrupt setup is already guaranteed to be single-threaded, this is - * just to make the assert_spin_locked check happy. */ - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display_irqs_enabled) - valleyview_display_irqs_uninstall(dev_priv); - spin_unlock_irq(&dev_priv->irq_lock); - - vlv_display_irq_reset(dev_priv); - - dev_priv->irq_mask = ~0; -} - static void valleyview_irq_uninstall(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -3889,7 +3859,10 @@ static void valleyview_irq_uninstall(struct drm_device *dev) I915_WRITE(HWSTAM, 0xffffffff); - vlv_display_irq_uninstall(dev_priv); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + vlv_display_irq_reset(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); } static void cherryview_irq_uninstall(struct drm_device *dev) @@ -3906,7 +3879,10 @@ static void cherryview_irq_uninstall(struct drm_device *dev) GEN5_IRQ_RESET(GEN8_PCU_); - vlv_display_irq_uninstall(dev_priv); + spin_lock_irq(&dev_priv->irq_lock); + if (dev_priv->display_irqs_enabled) + vlv_display_irq_reset(dev_priv); + spin_unlock_irq(&dev_priv->irq_lock); } static void ironlake_irq_uninstall(struct drm_device *dev) @@ -4044,7 +4020,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) new_iir = I915_READ16(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(&dev_priv->ring[RCS]); + notify_ring(&dev_priv->engine[RCS]); for_each_pipe(dev_priv, pipe) { int plane = pipe; @@ -4240,7 +4216,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(&dev_priv->ring[RCS]); + notify_ring(&dev_priv->engine[RCS]); for_each_pipe(dev_priv, pipe) { int plane = pipe; @@ -4470,9 +4446,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) new_iir = I915_READ(IIR); /* Flush posted writes */ if (iir & I915_USER_INTERRUPT) - notify_ring(&dev_priv->ring[RCS]); + notify_ring(&dev_priv->engine[RCS]); if (iir & I915_BSD_USER_INTERRUPT) - notify_ring(&dev_priv->ring[VCS]); + notify_ring(&dev_priv->engine[VCS]); for_each_pipe(dev_priv, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && @@ -4567,8 +4543,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, i915_hangcheck_elapsed); - pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - if (IS_GEN2(dev_priv)) { dev->max_vblank_count = 0; dev->driver->get_vblank_counter = i8xx_get_vblank_counter; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 278c9c40c2e0..1779f02e6df8 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -56,6 +56,8 @@ struct i915_params i915 __read_mostly = { .edp_vswing = 0, .enable_guc_submission = false, .guc_log_level = -1, + .enable_dp_mst = true, + .inject_load_failure = 0, }; module_param_named(modeset, i915.modeset, int, 0400); @@ -201,3 +203,10 @@ MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)") module_param_named(guc_log_level, i915.guc_log_level, int, 0400); MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (-1:disabled (default), 0-3:enabled)"); + +module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600); +MODULE_PARM_DESC(enable_dp_mst, + "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); +module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); +MODULE_PARM_DESC(inject_load_failure, + "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index bd5026b15d3e..02bc27804291 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -49,6 +49,7 @@ struct i915_params { int use_mmio_flip; int mmio_debug; int edp_vswing; + unsigned int inject_load_failure; /* leave bools at the end to not create holes */ bool enable_hangcheck; bool fastboot; @@ -59,6 +60,7 @@ struct i915_params { bool enable_guc_submission; bool verbose_state_checks; bool nuclear_pageflip; + bool enable_dp_mst; }; extern struct i915_params i915 __read_mostly; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f76cbf3e5d1e..cea5a390d8c9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -164,6 +164,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN6_GRDOM_RENDER (1 << 1) #define GEN6_GRDOM_MEDIA (1 << 2) #define GEN6_GRDOM_BLT (1 << 3) +#define GEN6_GRDOM_VECS (1 << 4) +#define GEN9_GRDOM_GUC (1 << 5) +#define GEN8_GRDOM_MEDIA2 (1 << 7) #define RING_PP_DIR_BASE(ring) _MMIO((ring)->mmio_base+0x228) #define RING_PP_DIR_BASE_READ(ring) _MMIO((ring)->mmio_base+0x518) @@ -586,6 +589,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504) #define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508) +/* There are the 16 64-bit CS General Purpose Registers */ +#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) +#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) + #define OACONTROL _MMIO(0x2360) #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 @@ -621,6 +628,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IOSF_PORT_GPIO_SC 0x48 #define IOSF_PORT_GPIO_SUS 0xa8 #define IOSF_PORT_CCU 0xa9 +#define CHV_IOSF_PORT_GPIO_N 0x13 +#define CHV_IOSF_PORT_GPIO_SE 0x48 +#define CHV_IOSF_PORT_GPIO_E 0xa8 +#define CHV_IOSF_PORT_GPIO_SW 0xb2 #define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) @@ -785,7 +796,9 @@ enum skl_disp_power_wells { #define DSI_PLL_M1_DIV_SHIFT 0 #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) #define CCK_CZ_CLOCK_CONTROL 0x62 +#define CCK_GPLL_CLOCK_CONTROL 0x67 #define CCK_DISPLAY_CLOCK_CONTROL 0x6b +#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c #define CCK_TRUNK_FORCE_ON (1 << 17) #define CCK_TRUNK_FORCE_OFF (1 << 16) #define CCK_FREQUENCY_STATUS (0x1f << 8) @@ -1317,6 +1330,7 @@ enum skl_disp_power_wells { #define _PORT_CL1CM_DW0_A 0x162000 #define _PORT_CL1CM_DW0_BC 0x6C000 #define PHY_POWER_GOOD (1 << 16) +#define PHY_RESERVED (1 << 7) #define BXT_PORT_CL1CM_DW0(phy) _BXT_PHY((phy), _PORT_CL1CM_DW0_BC, \ _PORT_CL1CM_DW0_A) @@ -1776,6 +1790,18 @@ enum skl_disp_power_wells { #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) +/* WaClearTdlStateAckDirtyBits */ +#define GEN8_STATE_ACK _MMIO(0x20F0) +#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) +#define GEN9_STATE_ACK_SLICE2 _MMIO(0x2100) +#define GEN9_STATE_ACK_TDL0 (1 << 12) +#define GEN9_STATE_ACK_TDL1 (1 << 13) +#define GEN9_STATE_ACK_TDL2 (1 << 14) +#define GEN9_STATE_ACK_TDL3 (1 << 15) +#define GEN9_SUBSLICE_TDL_ACK_BITS \ + (GEN9_STATE_ACK_TDL3 | GEN9_STATE_ACK_TDL2 | \ + GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0) + #define GFX_MODE _MMIO(0x2520) #define GFX_MODE_GEN7 _MMIO(0x229c) #define RING_MODE_GEN7(ring) _MMIO((ring)->mmio_base+0x29c) @@ -1795,6 +1821,7 @@ enum skl_disp_power_wells { #define VLV_DISPLAY_BASE 0x180000 #define VLV_MIPI_BASE VLV_DISPLAY_BASE +#define BXT_MIPI_BASE 0x60000 #define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) #define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) @@ -4777,6 +4804,10 @@ enum skl_disp_power_wells { #define CBR_PND_DEADLINE_DISABLE (1<<31) #define CBR_PWM_CLOCK_MUX_SELECT (1<<30) +#define CBR4_VLV _MMIO(VLV_DISPLAY_BASE + 0x70450) +#define CBR_DPLLBMD_PIPE_C (1<<29) +#define CBR_DPLLBMD_PIPE_B (1<<18) + /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64 @@ -6177,6 +6208,7 @@ enum skl_disp_power_wells { /* digital port hotplug */ #define PCH_PORT_HOTPLUG _MMIO(0xc4030) /* SHOTPLUG_CTL */ #define PORTA_HOTPLUG_ENABLE (1 << 28) /* LPT:LP+ & BXT */ +#define BXT_DDIA_HPD_INVERT (1 << 27) #define PORTA_HOTPLUG_STATUS_MASK (3 << 24) /* SPT+ & BXT */ #define PORTA_HOTPLUG_NO_DETECT (0 << 24) /* SPT+ & BXT */ #define PORTA_HOTPLUG_SHORT_DETECT (1 << 24) /* SPT+ & BXT */ @@ -6192,6 +6224,7 @@ enum skl_disp_power_wells { #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) #define PORTD_HOTPLUG_LONG_DETECT (2 << 16) #define PORTC_HOTPLUG_ENABLE (1 << 12) +#define BXT_DDIC_HPD_INVERT (1 << 11) #define PORTC_PULSE_DURATION_2ms (0 << 10) /* pre-LPT */ #define PORTC_PULSE_DURATION_4_5ms (1 << 10) /* pre-LPT */ #define PORTC_PULSE_DURATION_6ms (2 << 10) /* pre-LPT */ @@ -6202,6 +6235,7 @@ enum skl_disp_power_wells { #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) #define PORTC_HOTPLUG_LONG_DETECT (2 << 8) #define PORTB_HOTPLUG_ENABLE (1 << 4) +#define BXT_DDIB_HPD_INVERT (1 << 3) #define PORTB_PULSE_DURATION_2ms (0 << 2) /* pre-LPT */ #define PORTB_PULSE_DURATION_4_5ms (1 << 2) /* pre-LPT */ #define PORTB_PULSE_DURATION_6ms (2 << 2) /* pre-LPT */ @@ -6211,6 +6245,9 @@ enum skl_disp_power_wells { #define PORTB_HOTPLUG_NO_DETECT (0 << 0) #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) #define PORTB_HOTPLUG_LONG_DETECT (2 << 0) +#define BXT_DDI_HPD_INVERT_MASK (BXT_DDIA_HPD_INVERT | \ + BXT_DDIB_HPD_INVERT | \ + BXT_DDIC_HPD_INVERT) #define PCH_PORT_HOTPLUG2 _MMIO(0xc403C) /* SHOTPLUG_CTL2 SPT+ */ #define PORTE_HOTPLUG_ENABLE (1 << 4) @@ -7102,6 +7139,7 @@ enum skl_disp_power_wells { #define GEN9_CCS_TLB_PREFETCH_ENABLE (1<<3) #define GEN8_ROW_CHICKEN _MMIO(0xe4f0) +#define FLOW_CONTROL_ENABLE (1<<15) #define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) #define STALL_DOP_GATING_DISABLE (1<<5) @@ -7362,9 +7400,11 @@ enum skl_disp_power_wells { /* SBI offsets */ #define SBI_SSCDIVINTPHASE 0x0200 #define SBI_SSCDIVINTPHASE6 0x0600 -#define SBI_SSCDIVINTPHASE_DIVSEL_MASK ((0x7f)<<1) +#define SBI_SSCDIVINTPHASE_DIVSEL_SHIFT 1 +#define SBI_SSCDIVINTPHASE_DIVSEL_MASK (0x7f<<1) #define SBI_SSCDIVINTPHASE_DIVSEL(x) ((x)<<1) -#define SBI_SSCDIVINTPHASE_INCVAL_MASK ((0x7f)<<8) +#define SBI_SSCDIVINTPHASE_INCVAL_SHIFT 8 +#define SBI_SSCDIVINTPHASE_INCVAL_MASK (0x7f<<8) #define SBI_SSCDIVINTPHASE_INCVAL(x) ((x)<<8) #define SBI_SSCDIVINTPHASE_DIR(x) ((x)<<15) #define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0) @@ -7374,6 +7414,8 @@ enum skl_disp_power_wells { #define SBI_SSCCTL_PATHALT (1<<3) #define SBI_SSCCTL_DISABLE (1<<0) #define SBI_SSCAUXDIV6 0x0610 +#define SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT 4 +#define SBI_SSCAUXDIV_FINALDIV2SEL_MASK (1<<4) #define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4) #define SBI_DBUFF0 0x2a00 #define SBI_GEN0 0x1f00 @@ -7651,6 +7693,59 @@ enum skl_disp_power_wells { #define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) #define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) +/* pipe degamma/gamma LUTs on IVB+ */ +#define _PAL_PREC_INDEX_A 0x4A400 +#define _PAL_PREC_INDEX_B 0x4AC00 +#define _PAL_PREC_INDEX_C 0x4B400 +#define PAL_PREC_10_12_BIT (0 << 31) +#define PAL_PREC_SPLIT_MODE (1 << 31) +#define PAL_PREC_AUTO_INCREMENT (1 << 15) +#define _PAL_PREC_DATA_A 0x4A404 +#define _PAL_PREC_DATA_B 0x4AC04 +#define _PAL_PREC_DATA_C 0x4B404 +#define _PAL_PREC_GC_MAX_A 0x4A410 +#define _PAL_PREC_GC_MAX_B 0x4AC10 +#define _PAL_PREC_GC_MAX_C 0x4B410 +#define _PAL_PREC_EXT_GC_MAX_A 0x4A420 +#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20 +#define _PAL_PREC_EXT_GC_MAX_C 0x4B420 + +#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B) +#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B) +#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4) +#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4) + +/* pipe CSC & degamma/gamma LUTs on CHV */ +#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900) +#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904) +#define _CGM_PIPE_A_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x67908) +#define _CGM_PIPE_A_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6790C) +#define _CGM_PIPE_A_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x67910) +#define _CGM_PIPE_A_DEGAMMA (VLV_DISPLAY_BASE + 0x66000) +#define _CGM_PIPE_A_GAMMA (VLV_DISPLAY_BASE + 0x67000) +#define _CGM_PIPE_A_MODE (VLV_DISPLAY_BASE + 0x67A00) +#define CGM_PIPE_MODE_GAMMA (1 << 2) +#define CGM_PIPE_MODE_CSC (1 << 1) +#define CGM_PIPE_MODE_DEGAMMA (1 << 0) + +#define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900) +#define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904) +#define _CGM_PIPE_B_CSC_COEFF45 (VLV_DISPLAY_BASE + 0x69908) +#define _CGM_PIPE_B_CSC_COEFF67 (VLV_DISPLAY_BASE + 0x6990C) +#define _CGM_PIPE_B_CSC_COEFF8 (VLV_DISPLAY_BASE + 0x69910) +#define _CGM_PIPE_B_DEGAMMA (VLV_DISPLAY_BASE + 0x68000) +#define _CGM_PIPE_B_GAMMA (VLV_DISPLAY_BASE + 0x69000) +#define _CGM_PIPE_B_MODE (VLV_DISPLAY_BASE + 0x69A00) + +#define CGM_PIPE_CSC_COEFF01(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF01, _CGM_PIPE_B_CSC_COEFF01) +#define CGM_PIPE_CSC_COEFF23(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF23, _CGM_PIPE_B_CSC_COEFF23) +#define CGM_PIPE_CSC_COEFF45(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF45, _CGM_PIPE_B_CSC_COEFF45) +#define CGM_PIPE_CSC_COEFF67(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF67, _CGM_PIPE_B_CSC_COEFF67) +#define CGM_PIPE_CSC_COEFF8(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_CSC_COEFF8, _CGM_PIPE_B_CSC_COEFF8) +#define CGM_PIPE_DEGAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_DEGAMMA, _CGM_PIPE_B_DEGAMMA) + (i) * 8 + (w) * 4) +#define CGM_PIPE_GAMMA(pipe, i, w) _MMIO(_PIPE(pipe, _CGM_PIPE_A_GAMMA, _CGM_PIPE_B_GAMMA) + (i) * 8 + (w) * 4) +#define CGM_PIPE_MODE(pipe) _MMIO_PIPE(pipe, _CGM_PIPE_A_MODE, _CGM_PIPE_B_MODE) + /* MIPI DSI registers */ #define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ @@ -7665,58 +7760,62 @@ enum skl_disp_power_wells { #define BXT_MIPI_DIV_SHIFT(port) \ _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \ BXT_MIPI2_DIV_SHIFT) -/* Var clock divider to generate TX source. Result must be < 39.5 M */ -#define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26) -#define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10) -#define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \ - _MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \ - BXT_MIPI2_ESCLK_VAR_DIV_MASK) - -#define BXT_MIPI_ESCLK_VAR_DIV(port, val) \ - (val << BXT_MIPI_DIV_SHIFT(port)) + /* TX control divider to select actual TX clock output from (8x/var) */ -#define BXT_MIPI1_TX_ESCLK_SHIFT 21 -#define BXT_MIPI2_TX_ESCLK_SHIFT 5 +#define BXT_MIPI1_TX_ESCLK_SHIFT 26 +#define BXT_MIPI2_TX_ESCLK_SHIFT 10 #define BXT_MIPI_TX_ESCLK_SHIFT(port) \ _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \ BXT_MIPI2_TX_ESCLK_SHIFT) -#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21) -#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5) +#define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (0x3F << 26) +#define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (0x3F << 10) #define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \ _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \ - BXT_MIPI2_TX_ESCLK_FIXDIV_MASK) -#define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \ - (0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port)) -#define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \ - (0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port)) -#define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \ - (0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port)) -/* RX control divider to select actual RX clock output from 8x*/ -#define BXT_MIPI1_RX_ESCLK_SHIFT 19 -#define BXT_MIPI2_RX_ESCLK_SHIFT 3 -#define BXT_MIPI_RX_ESCLK_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \ - BXT_MIPI2_RX_ESCLK_SHIFT) -#define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19) -#define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3) -#define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \ - (3 << BXT_MIPI_RX_ESCLK_SHIFT(port)) -#define BXT_MIPI_RX_ESCLK_8X_BY2(port) \ - (1 << BXT_MIPI_RX_ESCLK_SHIFT(port)) -#define BXT_MIPI_RX_ESCLK_8X_BY3(port) \ - (2 << BXT_MIPI_RX_ESCLK_SHIFT(port)) -#define BXT_MIPI_RX_ESCLK_8X_BY4(port) \ - (3 << BXT_MIPI_RX_ESCLK_SHIFT(port)) -/* BXT-A WA: Always prog DPHY dividers to 00 */ -#define BXT_MIPI1_DPHY_DIV_SHIFT 16 -#define BXT_MIPI2_DPHY_DIV_SHIFT 0 -#define BXT_MIPI_DPHY_DIV_SHIFT(port) \ - _MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \ - BXT_MIPI2_DPHY_DIV_SHIFT) -#define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16) -#define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0) -#define BXT_MIPI_DPHY_DIVIDER_MASK(port) \ - (3 << BXT_MIPI_DPHY_DIV_SHIFT(port)) + BXT_MIPI2_TX_ESCLK_FIXDIV_MASK) +#define BXT_MIPI_TX_ESCLK_DIVIDER(port, val) \ + ((val & 0x3F) << BXT_MIPI_TX_ESCLK_SHIFT(port)) +/* RX upper control divider to select actual RX clock output from 8x */ +#define BXT_MIPI1_RX_ESCLK_UPPER_SHIFT 21 +#define BXT_MIPI2_RX_ESCLK_UPPER_SHIFT 5 +#define BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_SHIFT, \ + BXT_MIPI2_RX_ESCLK_UPPER_SHIFT) +#define BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 21) +#define BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK (3 << 5) +#define BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_UPPER_FIXDIV_MASK, \ + BXT_MIPI2_RX_ESCLK_UPPER_FIXDIV_MASK) +#define BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, val) \ + ((val & 3) << BXT_MIPI_RX_ESCLK_UPPER_SHIFT(port)) +/* 8/3X divider to select the actual 8/3X clock output from 8x */ +#define BXT_MIPI1_8X_BY3_SHIFT 19 +#define BXT_MIPI2_8X_BY3_SHIFT 3 +#define BXT_MIPI_8X_BY3_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_8X_BY3_SHIFT, \ + BXT_MIPI2_8X_BY3_SHIFT) +#define BXT_MIPI1_8X_BY3_DIVIDER_MASK (3 << 19) +#define BXT_MIPI2_8X_BY3_DIVIDER_MASK (3 << 3) +#define BXT_MIPI_8X_BY3_DIVIDER_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_8X_BY3_DIVIDER_MASK, \ + BXT_MIPI2_8X_BY3_DIVIDER_MASK) +#define BXT_MIPI_8X_BY3_DIVIDER(port, val) \ + ((val & 3) << BXT_MIPI_8X_BY3_SHIFT(port)) +/* RX lower control divider to select actual RX clock output from 8x */ +#define BXT_MIPI1_RX_ESCLK_LOWER_SHIFT 16 +#define BXT_MIPI2_RX_ESCLK_LOWER_SHIFT 0 +#define BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_SHIFT, \ + BXT_MIPI2_RX_ESCLK_LOWER_SHIFT) +#define BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 16) +#define BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK (3 << 0) +#define BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port) \ + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_LOWER_FIXDIV_MASK, \ + BXT_MIPI2_RX_ESCLK_LOWER_FIXDIV_MASK) +#define BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, val) \ + ((val & 3) << BXT_MIPI_RX_ESCLK_LOWER_SHIFT(port)) + +#define RX_DIVIDER_BIT_1_2 0x3 +#define RX_DIVIDER_BIT_3_4 0xC /* BXT MIPI mode configure */ #define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 @@ -7741,9 +7840,11 @@ enum skl_disp_power_wells { #define BXT_DSIC_16X_BY2 (1 << 10) #define BXT_DSIC_16X_BY3 (2 << 10) #define BXT_DSIC_16X_BY4 (3 << 10) +#define BXT_DSIC_16X_MASK (3 << 10) #define BXT_DSIA_16X_BY2 (1 << 8) #define BXT_DSIA_16X_BY3 (2 << 8) #define BXT_DSIA_16X_BY4 (3 << 8) +#define BXT_DSIA_16X_MASK (3 << 8) #define BXT_DSI_FREQ_SEL_SHIFT 8 #define BXT_DSI_FREQ_SEL_MASK (0xF << BXT_DSI_FREQ_SEL_SHIFT) @@ -7878,8 +7979,8 @@ enum skl_disp_power_wells { #define VID_MODE_FORMAT_MASK (0xf << 7) #define VID_MODE_NOT_SUPPORTED (0 << 7) #define VID_MODE_FORMAT_RGB565 (1 << 7) -#define VID_MODE_FORMAT_RGB666 (2 << 7) -#define VID_MODE_FORMAT_RGB666_LOOSE (3 << 7) +#define VID_MODE_FORMAT_RGB666_PACKED (2 << 7) +#define VID_MODE_FORMAT_RGB666 (3 << 7) #define VID_MODE_FORMAT_RGB888 (4 << 7) #define CMD_MODE_CHANNEL_NUMBER_SHIFT 5 #define CMD_MODE_CHANNEL_NUMBER_MASK (3 << 5) @@ -8135,6 +8236,7 @@ enum skl_disp_power_wells { #define READ_REQUEST_PRIORITY_HIGH (3 << 3) #define RGB_FLIP_TO_BGR (1 << 2) +#define BXT_PIPE_SELECT_SHIFT 7 #define BXT_PIPE_SELECT_MASK (7 << 7) #define BXT_PIPE_SELECT(pipe) ((pipe) << 7) diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index c6188dddb341..2d576b7ff299 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -370,6 +370,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, flush_delayed_work(&dev_priv->rps.delayed_resume_work); + intel_runtime_pm_get(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); val = intel_freq_opcode(dev_priv, val); @@ -378,6 +380,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, val > dev_priv->rps.max_freq || val < dev_priv->rps.min_freq_softlimit) { mutex_unlock(&dev_priv->rps.hw_lock); + intel_runtime_pm_put(dev_priv); return -EINVAL; } @@ -398,6 +401,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->rps.hw_lock); + intel_runtime_pm_put(dev_priv); + return count; } @@ -433,6 +438,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, flush_delayed_work(&dev_priv->rps.delayed_resume_work); + intel_runtime_pm_get(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); val = intel_freq_opcode(dev_priv, val); @@ -441,6 +448,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, val > dev_priv->rps.max_freq || val > dev_priv->rps.max_freq_softlimit) { mutex_unlock(&dev_priv->rps.hw_lock); + intel_runtime_pm_put(dev_priv); return -EINVAL; } @@ -457,6 +465,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, mutex_unlock(&dev_priv->rps.hw_lock); + intel_runtime_pm_put(dev_priv); + return count; } diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fa09e5581137..dc0def210097 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -464,7 +464,7 @@ TRACE_EVENT(i915_gem_ring_sync_to, TP_fast_assign( __entry->dev = from->dev->primary->index; __entry->sync_from = from->id; - __entry->sync_to = to_req->ring->id; + __entry->sync_to = to_req->engine->id; __entry->seqno = i915_gem_request_get_seqno(req); ), @@ -486,13 +486,13 @@ TRACE_EVENT(i915_gem_ring_dispatch, ), TP_fast_assign( - struct intel_engine_cs *ring = - i915_gem_request_get_ring(req); - __entry->dev = ring->dev->primary->index; - __entry->ring = ring->id; + struct intel_engine_cs *engine = + i915_gem_request_get_engine(req); + __entry->dev = engine->dev->primary->index; + __entry->ring = engine->id; __entry->seqno = i915_gem_request_get_seqno(req); __entry->flags = flags; - i915_trace_irq_get(ring, req); + i915_trace_irq_get(engine, req); ), TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", @@ -511,8 +511,8 @@ TRACE_EVENT(i915_gem_ring_flush, ), TP_fast_assign( - __entry->dev = req->ring->dev->primary->index; - __entry->ring = req->ring->id; + __entry->dev = req->engine->dev->primary->index; + __entry->ring = req->engine->id; __entry->invalidate = invalidate; __entry->flush = flush; ), @@ -533,10 +533,10 @@ DECLARE_EVENT_CLASS(i915_gem_request, ), TP_fast_assign( - struct intel_engine_cs *ring = - i915_gem_request_get_ring(req); - __entry->dev = ring->dev->primary->index; - __entry->ring = ring->id; + struct intel_engine_cs *engine = + i915_gem_request_get_engine(req); + __entry->dev = engine->dev->primary->index; + __entry->ring = engine->id; __entry->seqno = i915_gem_request_get_seqno(req); ), @@ -550,8 +550,8 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add, ); TRACE_EVENT(i915_gem_request_notify, - TP_PROTO(struct intel_engine_cs *ring), - TP_ARGS(ring), + TP_PROTO(struct intel_engine_cs *engine), + TP_ARGS(engine), TP_STRUCT__entry( __field(u32, dev) @@ -560,9 +560,9 @@ TRACE_EVENT(i915_gem_request_notify, ), TP_fast_assign( - __entry->dev = ring->dev->primary->index; - __entry->ring = ring->id; - __entry->seqno = ring->get_seqno(ring, false); + __entry->dev = engine->dev->primary->index; + __entry->ring = engine->id; + __entry->seqno = engine->get_seqno(engine); ), TP_printk("dev=%u, ring=%u, seqno=%u", @@ -597,13 +597,13 @@ TRACE_EVENT(i915_gem_request_wait_begin, * less desirable. */ TP_fast_assign( - struct intel_engine_cs *ring = - i915_gem_request_get_ring(req); - __entry->dev = ring->dev->primary->index; - __entry->ring = ring->id; + struct intel_engine_cs *engine = + i915_gem_request_get_engine(req); + __entry->dev = engine->dev->primary->index; + __entry->ring = engine->id; __entry->seqno = i915_gem_request_get_seqno(req); __entry->blocking = - mutex_is_locked(&ring->dev->struct_mutex); + mutex_is_locked(&engine->dev->struct_mutex); ), TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", @@ -777,9 +777,9 @@ DEFINE_EVENT(i915_context, i915_context_free, * called only if full ppgtt is enabled. */ TRACE_EVENT(switch_mm, - TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to), + TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), - TP_ARGS(ring, to), + TP_ARGS(engine, to), TP_STRUCT__entry( __field(u32, ring) @@ -789,10 +789,10 @@ TRACE_EVENT(switch_mm, ), TP_fast_assign( - __entry->ring = ring->id; + __entry->ring = engine->id; __entry->to = to; __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; - __entry->dev = ring->dev->primary->index; + __entry->dev = engine->dev->primary->index; ), TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c index dea7429be4d0..d02efb8cad4d 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.c +++ b/drivers/gpu/drm/i915/i915_vgpu.c @@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm, int intel_vgt_balloon(struct drm_device *dev) { struct drm_i915_private *dev_priv = to_i915(dev); - struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; - unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; + struct i915_ggtt *ggtt = &dev_priv->ggtt; + unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; unsigned long mappable_base, mappable_size, mappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end; @@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev) DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n", unmappable_base, unmappable_size / 1024); - if (mappable_base < ggtt_vm->start || - mappable_end > dev_priv->gtt.mappable_end || - unmappable_base < dev_priv->gtt.mappable_end || - unmappable_end > ggtt_vm_end) { + if (mappable_base < ggtt->base.start || + mappable_end > ggtt->mappable_end || + unmappable_base < ggtt->mappable_end || + unmappable_end > ggtt_end) { DRM_ERROR("Invalid ballooning configuration!\n"); return -EINVAL; } /* Unmappable graphic memory ballooning */ - if (unmappable_base > dev_priv->gtt.mappable_end) { - ret = vgt_balloon_space(&ggtt_vm->mm, + if (unmappable_base > ggtt->mappable_end) { + ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[2], - dev_priv->gtt.mappable_end, + ggtt->mappable_end, unmappable_base); if (ret) @@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev) * No need to partition out the last physical page, * because it is reserved to the guard page. */ - if (unmappable_end < ggtt_vm_end - PAGE_SIZE) { - ret = vgt_balloon_space(&ggtt_vm->mm, + if (unmappable_end < ggtt_end - PAGE_SIZE) { + ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[3], unmappable_end, - ggtt_vm_end - PAGE_SIZE); + ggtt_end - PAGE_SIZE); if (ret) goto err; } /* Mappable graphic memory ballooning */ - if (mappable_base > ggtt_vm->start) { - ret = vgt_balloon_space(&ggtt_vm->mm, + if (mappable_base > ggtt->base.start) { + ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[0], - ggtt_vm->start, mappable_base); + ggtt->base.start, mappable_base); if (ret) goto err; } - if (mappable_end < dev_priv->gtt.mappable_end) { - ret = vgt_balloon_space(&ggtt_vm->mm, + if (mappable_end < ggtt->mappable_end) { + ret = vgt_balloon_space(&ggtt->base.mm, &bl_info.space[1], mappable_end, - dev_priv->gtt.mappable_end); + ggtt->mappable_end); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 8e579a8505ac..50ff90aea721 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -96,8 +96,11 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->update_pipe = false; crtc_state->disable_lp_wm = false; crtc_state->disable_cxsr = false; - crtc_state->wm_changed = false; + crtc_state->update_wm_pre = false; + crtc_state->update_wm_post = false; crtc_state->fb_changed = false; + crtc_state->wm.need_postvbl_update = false; + crtc_state->fb_bits = 0; return &crtc_state->base; } diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index e0b851a0004a..7de7721f65bc 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -195,12 +195,10 @@ static void intel_plane_atomic_update(struct drm_plane *plane, struct intel_plane_state *intel_state = to_intel_plane_state(plane->state); struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc; - struct drm_crtc_state *crtc_state = - drm_atomic_get_existing_crtc_state(old_state->state, crtc); if (intel_state->visible) intel_plane->update_plane(plane, - to_intel_crtc_state(crtc_state), + to_intel_crtc_state(crtc->state), intel_state); else intel_plane->disable_plane(plane, crtc); diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 30f921421b0c..56ba8765816e 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -373,7 +373,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder) if (WARN_ON(port == PORT_A)) return; - if (HAS_PCH_IBX(dev_priv->dev)) { + if (HAS_PCH_IBX(dev_priv)) { aud_config = IBX_AUD_CFG(pipe); aud_cntrl_st2 = IBX_AUD_CNTL_ST2; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { @@ -564,23 +564,21 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder) } /** - * intel_init_audio - Set up chip specific audio functions - * @dev: drm device + * intel_init_audio_hooks - Set up chip specific audio hooks + * @dev_priv: device private */ -void intel_init_audio(struct drm_device *dev) +void intel_init_audio_hooks(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (IS_G4X(dev)) { + if (IS_G4X(dev_priv)) { dev_priv->display.audio_codec_enable = g4x_audio_codec_enable; dev_priv->display.audio_codec_disable = g4x_audio_codec_disable; - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; - } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) { + } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) { dev_priv->display.audio_codec_enable = hsw_audio_codec_enable; dev_priv->display.audio_codec_disable = hsw_audio_codec_disable; - } else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev_priv)) { dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; } diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index bf62a19c8f69..e72dd9a8d6bf 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -29,7 +29,9 @@ #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" -#include "intel_bios.h" + +#define _INTEL_BIOS_PRIVATE +#include "intel_vbt_defs.h" /** * DOC: Video BIOS Table (VBT) @@ -56,8 +58,6 @@ #define SLAVE_ADDR1 0x70 #define SLAVE_ADDR2 0x72 -static int panel_type; - /* Get BDB block size given a pointer to Block ID. */ static u32 _get_blocksize(const u8 *block_base) { @@ -203,17 +203,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, const struct lvds_dvo_timing *panel_dvo_timing; const struct lvds_fp_timing *fp_timing; struct drm_display_mode *panel_fixed_mode; + int panel_type; int drrs_mode; + int ret; lvds_options = find_section(bdb, BDB_LVDS_OPTIONS); if (!lvds_options) return; dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; - if (lvds_options->panel_type == 0xff) - return; - panel_type = lvds_options->panel_type; + ret = intel_opregion_get_panel_type(dev_priv->dev); + if (ret >= 0) { + WARN_ON(ret > 0xf); + panel_type = ret; + DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type); + } else { + if (lvds_options->panel_type > 0xf) { + DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n", + lvds_options->panel_type); + return; + } + panel_type = lvds_options->panel_type; + DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type); + } + + dev_priv->vbt.panel_type = panel_type; drrs_mode = (lvds_options->dps_panel_type_bits >> (panel_type * 2)) & MODE_MASK; @@ -249,7 +264,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data, lvds_lfp_data_ptrs, - lvds_options->panel_type); + panel_type); panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL); if (!panel_fixed_mode) @@ -264,7 +279,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, lvds_lfp_data_ptrs, - lvds_options->panel_type); + panel_type); if (fp_timing) { /* check the resolution, just to be sure */ if (fp_timing->x_res == panel_fixed_mode->hdisplay && @@ -282,6 +297,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, { const struct bdb_lfp_backlight_data *backlight_data; const struct bdb_lfp_backlight_data_entry *entry; + int panel_type = dev_priv->vbt.panel_type; backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); if (!backlight_data) @@ -480,7 +496,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, child->slave_addr, (child->dvo_port == DEVICE_PORT_DVOB) ? "SDVOB" : "SDVOC"); - p_mapping = &(dev_priv->sdvo_mappings[child->dvo_port - 1]); + p_mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1]; if (!p_mapping->initialized) { p_mapping->dvo_port = child->dvo_port; p_mapping->slave_addr = child->slave_addr; @@ -525,10 +541,7 @@ parse_driver_features(struct drm_i915_private *dev_priv, return; if (driver->lvds_config == BDB_DRIVER_FEATURE_EDP) - dev_priv->vbt.edp_support = 1; - - if (driver->dual_frequency) - dev_priv->render_reclock_avail = true; + dev_priv->vbt.edp.support = 1; DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled); /* @@ -547,23 +560,24 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) const struct bdb_edp *edp; const struct edp_power_seq *edp_pps; const struct edp_link_params *edp_link_params; + int panel_type = dev_priv->vbt.panel_type; edp = find_section(bdb, BDB_EDP); if (!edp) { - if (dev_priv->vbt.edp_support) + if (dev_priv->vbt.edp.support) DRM_DEBUG_KMS("No eDP BDB found but eDP panel supported.\n"); return; } switch ((edp->color_depth >> (panel_type * 2)) & 3) { case EDP_18BPP: - dev_priv->vbt.edp_bpp = 18; + dev_priv->vbt.edp.bpp = 18; break; case EDP_24BPP: - dev_priv->vbt.edp_bpp = 24; + dev_priv->vbt.edp.bpp = 24; break; case EDP_30BPP: - dev_priv->vbt.edp_bpp = 30; + dev_priv->vbt.edp.bpp = 30; break; } @@ -571,14 +585,14 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) edp_pps = &edp->power_seqs[panel_type]; edp_link_params = &edp->link_params[panel_type]; - dev_priv->vbt.edp_pps = *edp_pps; + dev_priv->vbt.edp.pps = *edp_pps; switch (edp_link_params->rate) { case EDP_RATE_1_62: - dev_priv->vbt.edp_rate = DP_LINK_BW_1_62; + dev_priv->vbt.edp.rate = DP_LINK_BW_1_62; break; case EDP_RATE_2_7: - dev_priv->vbt.edp_rate = DP_LINK_BW_2_7; + dev_priv->vbt.edp.rate = DP_LINK_BW_2_7; break; default: DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n", @@ -588,13 +602,13 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) switch (edp_link_params->lanes) { case EDP_LANE_1: - dev_priv->vbt.edp_lanes = 1; + dev_priv->vbt.edp.lanes = 1; break; case EDP_LANE_2: - dev_priv->vbt.edp_lanes = 2; + dev_priv->vbt.edp.lanes = 2; break; case EDP_LANE_4: - dev_priv->vbt.edp_lanes = 4; + dev_priv->vbt.edp.lanes = 4; break; default: DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n", @@ -604,16 +618,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) switch (edp_link_params->preemphasis) { case EDP_PREEMPHASIS_NONE: - dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0; + dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_0; break; case EDP_PREEMPHASIS_3_5dB: - dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1; + dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_1; break; case EDP_PREEMPHASIS_6dB: - dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2; + dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_2; break; case EDP_PREEMPHASIS_9_5dB: - dev_priv->vbt.edp_preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; + dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3; break; default: DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n", @@ -623,16 +637,16 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) switch (edp_link_params->vswing) { case EDP_VSWING_0_4V: - dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0; + dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_0; break; case EDP_VSWING_0_6V: - dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1; + dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_1; break; case EDP_VSWING_0_8V: - dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2; + dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_2; break; case EDP_VSWING_1_2V: - dev_priv->vbt.edp_vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; break; default: DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n", @@ -645,10 +659,10 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) /* Don't read from VBT if module parameter has valid value*/ if (i915.edp_vswing) { - dev_priv->edp_low_vswing = i915.edp_vswing == 1; + dev_priv->vbt.edp.low_vswing = i915.edp_vswing == 1; } else { vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF; - dev_priv->edp_low_vswing = vswing == 0; + dev_priv->vbt.edp.low_vswing = vswing == 0; } } } @@ -658,6 +672,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { const struct bdb_psr *psr; const struct psr_table *psr_table; + int panel_type = dev_priv->vbt.panel_type; psr = find_section(bdb, BDB_PSR); if (!psr) { @@ -704,9 +719,10 @@ parse_mipi_config(struct drm_i915_private *dev_priv, const struct bdb_mipi_config *start; const struct mipi_config *config; const struct mipi_pps_data *pps; + int panel_type = dev_priv->vbt.panel_type; /* parse MIPI blocks only if LFP type is MIPI */ - if (!dev_priv->vbt.has_mipi) + if (!intel_bios_is_dsi_present(dev_priv, NULL)) return; /* Initialize this to undefined indicating no generic MIPI support */ @@ -911,6 +927,7 @@ static void parse_mipi_sequence(struct drm_i915_private *dev_priv, const struct bdb_header *bdb) { + int panel_type = dev_priv->vbt.panel_type; const struct bdb_mipi_sequence *sequence; const u8 *seq_data; u32 seq_size; @@ -1124,7 +1141,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, } /* Parse the I_boost config for SKL and above */ - if (bdb->version >= 196 && (child->common.flags_1 & IBOOST_ENABLE)) { + if (bdb->version >= 196 && child->common.iboost) { info->dp_boost_level = translate_iboost(child->common.iboost_level & 0xF); DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n", port_name(port), info->dp_boost_level); @@ -1232,14 +1249,6 @@ parse_device_mapping(struct drm_i915_private *dev_priv, continue; } - if (p_child->common.dvo_port >= DVO_PORT_MIPIA - && p_child->common.dvo_port <= DVO_PORT_MIPID - &&p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT) { - DRM_DEBUG_KMS("Found MIPI as LFP\n"); - dev_priv->vbt.has_mipi = 1; - dev_priv->vbt.dsi.port = p_child->common.dvo_port; - } - child_dev_ptr = dev_priv->vbt.child_dev + count; count++; @@ -1250,6 +1259,19 @@ parse_device_mapping(struct drm_i915_private *dev_priv, */ memcpy(child_dev_ptr, p_child, min_t(size_t, p_defs->child_dev_size, sizeof(*p_child))); + + /* + * copied full block, now init values when they are not + * available in current version + */ + if (bdb->version < 196) { + /* Set default values for bits added from v196 */ + child_dev_ptr->common.iboost = 0; + child_dev_ptr->common.hpd_invert = 0; + } + + if (bdb->version < 192) + child_dev_ptr->common.lspcon = 0; } return; } @@ -1431,3 +1453,210 @@ intel_bios_init(struct drm_i915_private *dev_priv) return 0; } + +/** + * intel_bios_is_tv_present - is integrated TV present in VBT + * @dev_priv: i915 device instance + * + * Return true if TV is present. If no child devices were parsed from VBT, + * assume TV is present. + */ +bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv) +{ + union child_device_config *p_child; + int i; + + if (!dev_priv->vbt.int_tv_support) + return false; + + if (!dev_priv->vbt.child_dev_num) + return true; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + p_child = dev_priv->vbt.child_dev + i; + /* + * If the device type is not TV, continue. + */ + switch (p_child->old.device_type) { + case DEVICE_TYPE_INT_TV: + case DEVICE_TYPE_TV: + case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: + break; + default: + continue; + } + /* Only when the addin_offset is non-zero, it is regarded + * as present. + */ + if (p_child->old.addin_offset) + return true; + } + + return false; +} + +/** + * intel_bios_is_lvds_present - is LVDS present in VBT + * @dev_priv: i915 device instance + * @i2c_pin: i2c pin for LVDS if present + * + * Return true if LVDS is present. If no child devices were parsed from VBT, + * assume LVDS is present. + */ +bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin) +{ + int i; + + if (!dev_priv->vbt.child_dev_num) + return true; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + union child_device_config *uchild = dev_priv->vbt.child_dev + i; + struct old_child_dev_config *child = &uchild->old; + + /* If the device type is not LFP, continue. + * We have to check both the new identifiers as well as the + * old for compatibility with some BIOSes. + */ + if (child->device_type != DEVICE_TYPE_INT_LFP && + child->device_type != DEVICE_TYPE_LFP) + continue; + + if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin)) + *i2c_pin = child->i2c_pin; + + /* However, we cannot trust the BIOS writers to populate + * the VBT correctly. Since LVDS requires additional + * information from AIM blocks, a non-zero addin offset is + * a good indicator that the LVDS is actually present. + */ + if (child->addin_offset) + return true; + + /* But even then some BIOS writers perform some black magic + * and instantiate the device without reference to any + * additional data. Trust that if the VBT was written into + * the OpRegion then they have validated the LVDS's existence. + */ + if (dev_priv->opregion.vbt) + return true; + } + + return false; +} + +/** + * intel_bios_is_port_edp - is the device in given port eDP + * @dev_priv: i915 device instance + * @port: port to check + * + * Return true if the device in %port is eDP. + */ +bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) +{ + union child_device_config *p_child; + static const short port_mapping[] = { + [PORT_B] = DVO_PORT_DPB, + [PORT_C] = DVO_PORT_DPC, + [PORT_D] = DVO_PORT_DPD, + [PORT_E] = DVO_PORT_DPE, + }; + int i; + + if (!dev_priv->vbt.child_dev_num) + return false; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + p_child = dev_priv->vbt.child_dev + i; + + if (p_child->common.dvo_port == port_mapping[port] && + (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == + (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) + return true; + } + + return false; +} + +/** + * intel_bios_is_dsi_present - is DSI present in VBT + * @dev_priv: i915 device instance + * @port: port for DSI if present + * + * Return true if DSI is present, and return the port in %port. + */ +bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, + enum port *port) +{ + union child_device_config *p_child; + u8 dvo_port; + int i; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + p_child = dev_priv->vbt.child_dev + i; + + if (!(p_child->common.device_type & DEVICE_TYPE_MIPI_OUTPUT)) + continue; + + dvo_port = p_child->common.dvo_port; + + switch (dvo_port) { + case DVO_PORT_MIPIA: + case DVO_PORT_MIPIC: + if (port) + *port = dvo_port - DVO_PORT_MIPIA; + return true; + case DVO_PORT_MIPIB: + case DVO_PORT_MIPID: + DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n", + port_name(dvo_port - DVO_PORT_MIPIA)); + break; + } + } + + return false; +} + +/** + * intel_bios_is_port_hpd_inverted - is HPD inverted for %port + * @dev_priv: i915 device instance + * @port: port to check + * + * Return true if HPD should be inverted for %port. + */ +bool +intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, + enum port port) +{ + int i; + + if (WARN_ON_ONCE(!IS_BROXTON(dev_priv))) + return false; + + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { + if (!dev_priv->vbt.child_dev[i].common.hpd_invert) + continue; + + switch (dev_priv->vbt.child_dev[i].common.dvo_port) { + case DVO_PORT_DPA: + case DVO_PORT_HDMIA: + if (port == PORT_A) + return true; + break; + case DVO_PORT_DPB: + case DVO_PORT_HDMIB: + if (port == PORT_B) + return true; + break; + case DVO_PORT_DPC: + case DVO_PORT_HDMIC: + if (port == PORT_C) + return true; + break; + default: + break; + } + } + + return false; +} diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index 350d4e0f75a4..ab0ea315eddb 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h @@ -1,5 +1,5 @@ /* - * Copyright © 2006 Intel Corporation + * Copyright © 2016 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -19,543 +19,16 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. - * - * Authors: - * Eric Anholt <eric@anholt.net> - * - */ - -#ifndef _INTEL_BIOS_H_ -#define _INTEL_BIOS_H_ - -/** - * struct vbt_header - VBT Header structure - * @signature: VBT signature, always starts with "$VBT" - * @version: Version of this structure - * @header_size: Size of this structure - * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks) - * @vbt_checksum: Checksum - * @reserved0: Reserved - * @bdb_offset: Offset of &struct bdb_header from beginning of VBT - * @aim_offset: Offsets of add-in data blocks from beginning of VBT - */ -struct vbt_header { - u8 signature[20]; - u16 version; - u16 header_size; - u16 vbt_size; - u8 vbt_checksum; - u8 reserved0; - u32 bdb_offset; - u32 aim_offset[4]; -} __packed; - -/** - * struct bdb_header - BDB Header structure - * @signature: BDB signature "BIOS_DATA_BLOCK" - * @version: Version of the data block definitions - * @header_size: Size of this structure - * @bdb_size: Size of BDB (BDB Header and data blocks) - */ -struct bdb_header { - u8 signature[16]; - u16 version; - u16 header_size; - u16 bdb_size; -} __packed; - -/* strictly speaking, this is a "skip" block, but it has interesting info */ -struct vbios_data { - u8 type; /* 0 == desktop, 1 == mobile */ - u8 relstage; - u8 chipset; - u8 lvds_present:1; - u8 tv_present:1; - u8 rsvd2:6; /* finish byte */ - u8 rsvd3[4]; - u8 signon[155]; - u8 copyright[61]; - u16 code_segment; - u8 dos_boot_mode; - u8 bandwidth_percent; - u8 rsvd4; /* popup memory size */ - u8 resize_pci_bios; - u8 rsvd5; /* is crt already on ddc2 */ -} __packed; - -/* - * There are several types of BIOS data blocks (BDBs), each block has - * an ID and size in the first 3 bytes (ID in first, size in next 2). - * Known types are listed below. */ -#define BDB_GENERAL_FEATURES 1 -#define BDB_GENERAL_DEFINITIONS 2 -#define BDB_OLD_TOGGLE_LIST 3 -#define BDB_MODE_SUPPORT_LIST 4 -#define BDB_GENERIC_MODE_TABLE 5 -#define BDB_EXT_MMIO_REGS 6 -#define BDB_SWF_IO 7 -#define BDB_SWF_MMIO 8 -#define BDB_PSR 9 -#define BDB_MODE_REMOVAL_TABLE 10 -#define BDB_CHILD_DEVICE_TABLE 11 -#define BDB_DRIVER_FEATURES 12 -#define BDB_DRIVER_PERSISTENCE 13 -#define BDB_EXT_TABLE_PTRS 14 -#define BDB_DOT_CLOCK_OVERRIDE 15 -#define BDB_DISPLAY_SELECT 16 -/* 17 rsvd */ -#define BDB_DRIVER_ROTATION 18 -#define BDB_DISPLAY_REMOVE 19 -#define BDB_OEM_CUSTOM 20 -#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ -#define BDB_SDVO_LVDS_OPTIONS 22 -#define BDB_SDVO_PANEL_DTDS 23 -#define BDB_SDVO_LVDS_PNP_IDS 24 -#define BDB_SDVO_LVDS_POWER_SEQ 25 -#define BDB_TV_OPTIONS 26 -#define BDB_EDP 27 -#define BDB_LVDS_OPTIONS 40 -#define BDB_LVDS_LFP_DATA_PTRS 41 -#define BDB_LVDS_LFP_DATA 42 -#define BDB_LVDS_BACKLIGHT 43 -#define BDB_LVDS_POWER 44 -#define BDB_MIPI_CONFIG 52 -#define BDB_MIPI_SEQUENCE 53 -#define BDB_SKIP 254 /* VBIOS private block, ignore */ - -struct bdb_general_features { - /* bits 1 */ - u8 panel_fitting:2; - u8 flexaim:1; - u8 msg_enable:1; - u8 clear_screen:3; - u8 color_flip:1; - - /* bits 2 */ - u8 download_ext_vbt:1; - u8 enable_ssc:1; - u8 ssc_freq:1; - u8 enable_lfp_on_override:1; - u8 disable_ssc_ddt:1; - u8 rsvd7:1; - u8 display_clock_mode:1; - u8 rsvd8:1; /* finish byte */ - - /* bits 3 */ - u8 disable_smooth_vision:1; - u8 single_dvi:1; - u8 rsvd9:1; - u8 fdi_rx_polarity_inverted:1; - u8 rsvd10:4; /* finish byte */ - - /* bits 4 */ - u8 legacy_monitor_detect; - - /* bits 5 */ - u8 int_crt_support:1; - u8 int_tv_support:1; - u8 int_efp_support:1; - u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ - u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ - u8 rsvd11:3; /* finish byte */ -} __packed; - -/* pre-915 */ -#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ -#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */ -#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */ -#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */ - -/* Pre 915 */ -#define DEVICE_TYPE_NONE 0x00 -#define DEVICE_TYPE_CRT 0x01 -#define DEVICE_TYPE_TV 0x09 -#define DEVICE_TYPE_EFP 0x12 -#define DEVICE_TYPE_LFP 0x22 -/* On 915+ */ -#define DEVICE_TYPE_CRT_DPMS 0x6001 -#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001 -#define DEVICE_TYPE_TV_COMPOSITE 0x0209 -#define DEVICE_TYPE_TV_MACROVISION 0x0289 -#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c -#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609 -#define DEVICE_TYPE_TV_SCART 0x0209 -#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009 -#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012 -#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052 -#define DEVICE_TYPE_EFP_DVI_I 0x6053 -#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152 -#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2 -#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062 -#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162 -#define DEVICE_TYPE_LFP_PANELLINK 0x5012 -#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042 -#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062 -#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162 -#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2 - -#define DEVICE_CFG_NONE 0x00 -#define DEVICE_CFG_12BIT_DVOB 0x01 -#define DEVICE_CFG_12BIT_DVOC 0x02 -#define DEVICE_CFG_24BIT_DVOBC 0x09 -#define DEVICE_CFG_24BIT_DVOCB 0x0a -#define DEVICE_CFG_DUAL_DVOB 0x11 -#define DEVICE_CFG_DUAL_DVOC 0x12 -#define DEVICE_CFG_DUAL_DVOBC 0x13 -#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19 -#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a - -#define DEVICE_WIRE_NONE 0x00 -#define DEVICE_WIRE_DVOB 0x01 -#define DEVICE_WIRE_DVOC 0x02 -#define DEVICE_WIRE_DVOBC 0x03 -#define DEVICE_WIRE_DVOBB 0x05 -#define DEVICE_WIRE_DVOCC 0x06 -#define DEVICE_WIRE_DVOB_MASTER 0x0d -#define DEVICE_WIRE_DVOC_MASTER 0x0e - -#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */ -#define DEVICE_PORT_DVOB 0x01 -#define DEVICE_PORT_DVOC 0x02 /* - * We used to keep this struct but without any version control. We should avoid - * using it in the future, but it should be safe to keep using it in the old - * code. Do not change; we rely on its size. + * Please use intel_vbt_defs.h for VBT private data, to hide and abstract away + * the VBT from the rest of the driver. Add the parsed, clean data to struct + * intel_vbt_data within struct drm_i915_private. */ -struct old_child_dev_config { - u16 handle; - u16 device_type; - u8 device_id[10]; /* ascii string */ - u16 addin_offset; - u8 dvo_port; /* See Device_PORT_* above */ - u8 i2c_pin; - u8 slave_addr; - u8 ddc_pin; - u16 edid_ptr; - u8 dvo_cfg; /* See DEVICE_CFG_* above */ - u8 dvo2_port; - u8 i2c2_pin; - u8 slave2_addr; - u8 ddc2_pin; - u8 capabilities; - u8 dvo_wiring;/* See DEVICE_WIRE_* above */ - u8 dvo2_wiring; - u16 extended_type; - u8 dvo_function; -} __packed; - -/* This one contains field offsets that are known to be common for all BDB - * versions. Notice that the meaning of the contents contents may still change, - * but at least the offsets are consistent. */ - -/* Definitions for flags_1 */ -#define IBOOST_ENABLE (1<<3) - -struct common_child_dev_config { - u16 handle; - u16 device_type; - u8 not_common1[12]; - u8 dvo_port; - u8 not_common2[2]; - u8 ddc_pin; - u16 edid_ptr; - u8 obsolete; - u8 flags_1; - u8 not_common3[13]; - u8 iboost_level; -} __packed; - - -/* This field changes depending on the BDB version, so the most reliable way to - * read it is by checking the BDB version and reading the raw pointer. */ -union child_device_config { - /* This one is safe to be used anywhere, but the code should still check - * the BDB version. */ - u8 raw[33]; - /* This one should only be kept for legacy code. */ - struct old_child_dev_config old; - /* This one should also be safe to use anywhere, even without version - * checks. */ - struct common_child_dev_config common; -} __packed; - -struct bdb_general_definitions { - /* DDC GPIO */ - u8 crt_ddc_gmbus_pin; - - /* DPMS bits */ - u8 dpms_acpi:1; - u8 skip_boot_crt_detect:1; - u8 dpms_aim:1; - u8 rsvd1:5; /* finish byte */ - - /* boot device bits */ - u8 boot_display[2]; - u8 child_dev_size; - - /* - * Device info: - * If TV is present, it'll be at devices[0]. - * LVDS will be next, either devices[0] or [1], if present. - * On some platforms the number of device is 6. But could be as few as - * 4 if both TV and LVDS are missing. - * And the device num is related with the size of general definition - * block. It is obtained by using the following formula: - * number = (block_size - sizeof(bdb_general_definitions))/ - * defs->child_dev_size; - */ - uint8_t devices[0]; -} __packed; - -/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ -#define MODE_MASK 0x3 - -struct bdb_lvds_options { - u8 panel_type; - u8 rsvd1; - /* LVDS capabilities, stored in a dword */ - u8 pfit_mode:2; - u8 pfit_text_mode_enhanced:1; - u8 pfit_gfx_mode_enhanced:1; - u8 pfit_ratio_auto:1; - u8 pixel_dither:1; - u8 lvds_edid:1; - u8 rsvd2:1; - u8 rsvd4; - /* LVDS Panel channel bits stored here */ - u32 lvds_panel_channel_bits; - /* LVDS SSC (Spread Spectrum Clock) bits stored here. */ - u16 ssc_bits; - u16 ssc_freq; - u16 ssc_ddt; - /* Panel color depth defined here */ - u16 panel_color_depth; - /* LVDS panel type bits stored here */ - u32 dps_panel_type_bits; - /* LVDS backlight control type bits stored here */ - u32 blt_control_type_bits; -} __packed; - -/* LFP pointer table contains entries to the struct below */ -struct bdb_lvds_lfp_data_ptr { - u16 fp_timing_offset; /* offsets are from start of bdb */ - u8 fp_table_size; - u16 dvo_timing_offset; - u8 dvo_table_size; - u16 panel_pnp_id_offset; - u8 pnp_table_size; -} __packed; - -struct bdb_lvds_lfp_data_ptrs { - u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ - struct bdb_lvds_lfp_data_ptr ptr[16]; -} __packed; - -/* LFP data has 3 blocks per entry */ -struct lvds_fp_timing { - u16 x_res; - u16 y_res; - u32 lvds_reg; - u32 lvds_reg_val; - u32 pp_on_reg; - u32 pp_on_reg_val; - u32 pp_off_reg; - u32 pp_off_reg_val; - u32 pp_cycle_reg; - u32 pp_cycle_reg_val; - u32 pfit_reg; - u32 pfit_reg_val; - u16 terminator; -} __packed; - -struct lvds_dvo_timing { - u16 clock; /**< In 10khz */ - u8 hactive_lo; - u8 hblank_lo; - u8 hblank_hi:4; - u8 hactive_hi:4; - u8 vactive_lo; - u8 vblank_lo; - u8 vblank_hi:4; - u8 vactive_hi:4; - u8 hsync_off_lo; - u8 hsync_pulse_width; - u8 vsync_pulse_width:4; - u8 vsync_off:4; - u8 rsvd0:6; - u8 hsync_off_hi:2; - u8 h_image; - u8 v_image; - u8 max_hv; - u8 h_border; - u8 v_border; - u8 rsvd1:3; - u8 digital:2; - u8 vsync_positive:1; - u8 hsync_positive:1; - u8 rsvd2:1; -} __packed; - -struct lvds_pnp_id { - u16 mfg_name; - u16 product_code; - u32 serial; - u8 mfg_week; - u8 mfg_year; -} __packed; - -struct bdb_lvds_lfp_data_entry { - struct lvds_fp_timing fp_timing; - struct lvds_dvo_timing dvo_timing; - struct lvds_pnp_id pnp_id; -} __packed; - -struct bdb_lvds_lfp_data { - struct bdb_lvds_lfp_data_entry data[16]; -} __packed; - -#define BDB_BACKLIGHT_TYPE_NONE 0 -#define BDB_BACKLIGHT_TYPE_PWM 2 - -struct bdb_lfp_backlight_data_entry { - u8 type:2; - u8 active_low_pwm:1; - u8 obsolete1:5; - u16 pwm_freq_hz; - u8 min_brightness; - u8 obsolete2; - u8 obsolete3; -} __packed; - -struct bdb_lfp_backlight_data { - u8 entry_size; - struct bdb_lfp_backlight_data_entry data[16]; - u8 level[16]; -} __packed; - -struct aimdb_header { - char signature[16]; - char oem_device[20]; - u16 aimdb_version; - u16 aimdb_header_size; - u16 aimdb_size; -} __packed; - -struct aimdb_block { - u8 aimdb_id; - u16 aimdb_size; -} __packed; -struct vch_panel_data { - u16 fp_timing_offset; - u8 fp_timing_size; - u16 dvo_timing_offset; - u8 dvo_timing_size; - u16 text_fitting_offset; - u8 text_fitting_size; - u16 graphics_fitting_offset; - u8 graphics_fitting_size; -} __packed; - -struct vch_bdb_22 { - struct aimdb_block aimdb_block; - struct vch_panel_data panels[16]; -} __packed; - -struct bdb_sdvo_lvds_options { - u8 panel_backlight; - u8 h40_set_panel_type; - u8 panel_type; - u8 ssc_clk_freq; - u16 als_low_trip; - u16 als_high_trip; - u8 sclalarcoeff_tab_row_num; - u8 sclalarcoeff_tab_row_size; - u8 coefficient[8]; - u8 panel_misc_bits_1; - u8 panel_misc_bits_2; - u8 panel_misc_bits_3; - u8 panel_misc_bits_4; -} __packed; - - -#define BDB_DRIVER_FEATURE_NO_LVDS 0 -#define BDB_DRIVER_FEATURE_INT_LVDS 1 -#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 -#define BDB_DRIVER_FEATURE_EDP 3 - -struct bdb_driver_features { - u8 boot_dev_algorithm:1; - u8 block_display_switch:1; - u8 allow_display_switch:1; - u8 hotplug_dvo:1; - u8 dual_view_zoom:1; - u8 int15h_hook:1; - u8 sprite_in_clone:1; - u8 primary_lfp_id:1; - - u16 boot_mode_x; - u16 boot_mode_y; - u8 boot_mode_bpp; - u8 boot_mode_refresh; - - u16 enable_lfp_primary:1; - u16 selective_mode_pruning:1; - u16 dual_frequency:1; - u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ - u16 nt_clone_support:1; - u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ - u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ - u16 cui_aspect_scaling:1; - u16 preserve_aspect_ratio:1; - u16 sdvo_device_power_down:1; - u16 crt_hotplug:1; - u16 lvds_config:2; - u16 tv_hotplug:1; - u16 hdmi_config:2; - - u8 static_display:1; - u8 reserved2:7; - u16 legacy_crt_max_x; - u16 legacy_crt_max_y; - u8 legacy_crt_max_refresh; - - u8 hdmi_termination; - u8 custom_vbt_version; - /* Driver features data block */ - u16 rmpm_enabled:1; - u16 s2ddt_enabled:1; - u16 dpst_enabled:1; - u16 bltclt_enabled:1; - u16 adb_enabled:1; - u16 drrs_enabled:1; - u16 grs_enabled:1; - u16 gpmt_enabled:1; - u16 tbt_enabled:1; - u16 psr_enabled:1; - u16 ips_enabled:1; - u16 reserved3:4; - u16 pc_feature_valid:1; -} __packed; - -#define EDP_18BPP 0 -#define EDP_24BPP 1 -#define EDP_30BPP 2 -#define EDP_RATE_1_62 0 -#define EDP_RATE_2_7 1 -#define EDP_LANE_1 0 -#define EDP_LANE_2 1 -#define EDP_LANE_4 3 -#define EDP_PREEMPHASIS_NONE 0 -#define EDP_PREEMPHASIS_3_5dB 1 -#define EDP_PREEMPHASIS_6dB 2 -#define EDP_PREEMPHASIS_9_5dB 3 -#define EDP_VSWING_0_4V 0 -#define EDP_VSWING_0_6V 1 -#define EDP_VSWING_0_8V 2 -#define EDP_VSWING_1_2V 3 +#ifndef _INTEL_BIOS_H_ +#define _INTEL_BIOS_H_ struct edp_power_seq { u16 t1_t3; @@ -565,245 +38,37 @@ struct edp_power_seq { u16 t11_t12; } __packed; -struct edp_link_params { - u8 rate:4; - u8 lanes:4; - u8 preemphasis:4; - u8 vswing:4; -} __packed; - -struct bdb_edp { - struct edp_power_seq power_seqs[16]; - u32 color_depth; - struct edp_link_params link_params[16]; - u32 sdrrs_msa_timing_delay; - - /* ith bit indicates enabled/disabled for (i+1)th panel */ - u16 edp_s3d_feature; - u16 edp_t3_optimization; - u64 edp_vswing_preemph; /* v173 */ -} __packed; - -struct psr_table { - /* Feature bits */ - u8 full_link:1; - u8 require_aux_to_wakeup:1; - u8 feature_bits_rsvd:6; - - /* Wait times */ - u8 idle_frames:4; - u8 lines_to_wait:3; - u8 wait_times_rsvd:1; - - /* TP wake up time in multiple of 100 */ - u16 tp1_wakeup_time; - u16 tp2_tp3_wakeup_time; -} __packed; - -struct bdb_psr { - struct psr_table psr_table[16]; -} __packed; - -/* - * Driver<->VBIOS interaction occurs through scratch bits in - * GR18 & SWF*. - */ - -/* GR18 bits are set on display switch and hotkey events */ -#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ -#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ -#define GR18_HK_NONE (0x0<<3) -#define GR18_HK_LFP_STRETCH (0x1<<3) -#define GR18_HK_TOGGLE_DISP (0x2<<3) -#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ -#define GR18_HK_POPUP_DISABLED (0x6<<3) -#define GR18_HK_POPUP_ENABLED (0x7<<3) -#define GR18_HK_PFIT (0x8<<3) -#define GR18_HK_APM_CHANGE (0xa<<3) -#define GR18_HK_MULTIPLE (0xc<<3) -#define GR18_USER_INT_EN (1<<2) -#define GR18_A0000_FLUSH_EN (1<<1) -#define GR18_SMM_EN (1<<0) - -/* Set by driver, cleared by VBIOS */ -#define SWF00_YRES_SHIFT 16 -#define SWF00_XRES_SHIFT 0 -#define SWF00_RES_MASK 0xffff - -/* Set by VBIOS at boot time and driver at runtime */ -#define SWF01_TV2_FORMAT_SHIFT 8 -#define SWF01_TV1_FORMAT_SHIFT 0 -#define SWF01_TV_FORMAT_MASK 0xffff - -#define SWF10_VBIOS_BLC_I2C_EN (1<<29) -#define SWF10_GTT_OVERRIDE_EN (1<<28) -#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ -#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) -#define SWF10_OLD_TOGGLE 0x0 -#define SWF10_TOGGLE_LIST_1 0x1 -#define SWF10_TOGGLE_LIST_2 0x2 -#define SWF10_TOGGLE_LIST_3 0x3 -#define SWF10_TOGGLE_LIST_4 0x4 -#define SWF10_PANNING_EN (1<<23) -#define SWF10_DRIVER_LOADED (1<<22) -#define SWF10_EXTENDED_DESKTOP (1<<21) -#define SWF10_EXCLUSIVE_MODE (1<<20) -#define SWF10_OVERLAY_EN (1<<19) -#define SWF10_PLANEB_HOLDOFF (1<<18) -#define SWF10_PLANEA_HOLDOFF (1<<17) -#define SWF10_VGA_HOLDOFF (1<<16) -#define SWF10_ACTIVE_DISP_MASK 0xffff -#define SWF10_PIPEB_LFP2 (1<<15) -#define SWF10_PIPEB_EFP2 (1<<14) -#define SWF10_PIPEB_TV2 (1<<13) -#define SWF10_PIPEB_CRT2 (1<<12) -#define SWF10_PIPEB_LFP (1<<11) -#define SWF10_PIPEB_EFP (1<<10) -#define SWF10_PIPEB_TV (1<<9) -#define SWF10_PIPEB_CRT (1<<8) -#define SWF10_PIPEA_LFP2 (1<<7) -#define SWF10_PIPEA_EFP2 (1<<6) -#define SWF10_PIPEA_TV2 (1<<5) -#define SWF10_PIPEA_CRT2 (1<<4) -#define SWF10_PIPEA_LFP (1<<3) -#define SWF10_PIPEA_EFP (1<<2) -#define SWF10_PIPEA_TV (1<<1) -#define SWF10_PIPEA_CRT (1<<0) - -#define SWF11_MEMORY_SIZE_SHIFT 16 -#define SWF11_SV_TEST_EN (1<<15) -#define SWF11_IS_AGP (1<<14) -#define SWF11_DISPLAY_HOLDOFF (1<<13) -#define SWF11_DPMS_REDUCED (1<<12) -#define SWF11_IS_VBE_MODE (1<<11) -#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ -#define SWF11_DPMS_MASK 0x07 -#define SWF11_DPMS_OFF (1<<2) -#define SWF11_DPMS_SUSPEND (1<<1) -#define SWF11_DPMS_STANDBY (1<<0) -#define SWF11_DPMS_ON 0 - -#define SWF14_GFX_PFIT_EN (1<<31) -#define SWF14_TEXT_PFIT_EN (1<<30) -#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ -#define SWF14_POPUP_EN (1<<28) -#define SWF14_DISPLAY_HOLDOFF (1<<27) -#define SWF14_DISP_DETECT_EN (1<<26) -#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ -#define SWF14_DRIVER_STATUS (1<<24) -#define SWF14_OS_TYPE_WIN9X (1<<23) -#define SWF14_OS_TYPE_WINNT (1<<22) -/* 21:19 rsvd */ -#define SWF14_PM_TYPE_MASK 0x00070000 -#define SWF14_PM_ACPI_VIDEO (0x4 << 16) -#define SWF14_PM_ACPI (0x3 << 16) -#define SWF14_PM_APM_12 (0x2 << 16) -#define SWF14_PM_APM_11 (0x1 << 16) -#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ - /* if GR18 indicates a display switch */ -#define SWF14_DS_PIPEB_LFP2_EN (1<<15) -#define SWF14_DS_PIPEB_EFP2_EN (1<<14) -#define SWF14_DS_PIPEB_TV2_EN (1<<13) -#define SWF14_DS_PIPEB_CRT2_EN (1<<12) -#define SWF14_DS_PIPEB_LFP_EN (1<<11) -#define SWF14_DS_PIPEB_EFP_EN (1<<10) -#define SWF14_DS_PIPEB_TV_EN (1<<9) -#define SWF14_DS_PIPEB_CRT_EN (1<<8) -#define SWF14_DS_PIPEA_LFP2_EN (1<<7) -#define SWF14_DS_PIPEA_EFP2_EN (1<<6) -#define SWF14_DS_PIPEA_TV2_EN (1<<5) -#define SWF14_DS_PIPEA_CRT2_EN (1<<4) -#define SWF14_DS_PIPEA_LFP_EN (1<<3) -#define SWF14_DS_PIPEA_EFP_EN (1<<2) -#define SWF14_DS_PIPEA_TV_EN (1<<1) -#define SWF14_DS_PIPEA_CRT_EN (1<<0) - /* if GR18 indicates a panel fitting request */ -#define SWF14_PFIT_EN (1<<0) /* 0 means disable */ - /* if GR18 indicates an APM change request */ -#define SWF14_APM_HIBERNATE 0x4 -#define SWF14_APM_SUSPEND 0x3 -#define SWF14_APM_STANDBY 0x1 -#define SWF14_APM_RESTORE 0x0 - -/* Add the device class for LFP, TV, HDMI */ -#define DEVICE_TYPE_INT_LFP 0x1022 -#define DEVICE_TYPE_INT_TV 0x1009 -#define DEVICE_TYPE_HDMI 0x60D2 -#define DEVICE_TYPE_DP 0x68C6 -#define DEVICE_TYPE_eDP 0x78C6 - -#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) -#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14) -#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13) -#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12) -#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11) -#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10) -#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9) -#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8) -#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6) -#define DEVICE_TYPE_LVDS_SINGALING (1 << 5) -#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4) -#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3) -#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2) -#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) -#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) - -/* - * Bits we care about when checking for DEVICE_TYPE_eDP - * Depending on the system, the other bits may or may not - * be set for eDP outputs. - */ -#define DEVICE_TYPE_eDP_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_DUAL_CHANNEL | \ - DEVICE_TYPE_LVDS_SINGALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - -/* define the DVO port for HDMI output type */ -#define DVO_B 1 -#define DVO_C 2 -#define DVO_D 3 - -/* Possible values for the "DVO Port" field for versions >= 155: */ -#define DVO_PORT_HDMIA 0 -#define DVO_PORT_HDMIB 1 -#define DVO_PORT_HDMIC 2 -#define DVO_PORT_HDMID 3 -#define DVO_PORT_LVDS 4 -#define DVO_PORT_TV 5 -#define DVO_PORT_CRT 6 -#define DVO_PORT_DPB 7 -#define DVO_PORT_DPC 8 -#define DVO_PORT_DPD 9 -#define DVO_PORT_DPA 10 -#define DVO_PORT_DPE 11 -#define DVO_PORT_HDMIE 12 -#define DVO_PORT_MIPIA 21 -#define DVO_PORT_MIPIB 22 -#define DVO_PORT_MIPIC 23 -#define DVO_PORT_MIPID 24 +/* MIPI Sequence Block definitions */ +enum mipi_seq { + MIPI_SEQ_END = 0, + MIPI_SEQ_ASSERT_RESET, + MIPI_SEQ_INIT_OTP, + MIPI_SEQ_DISPLAY_ON, + MIPI_SEQ_DISPLAY_OFF, + MIPI_SEQ_DEASSERT_RESET, + MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ + MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ + MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ + MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ + MIPI_SEQ_POWER_ON, /* sequence block v3+ */ + MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ + MIPI_SEQ_MAX +}; -/* Block 52 contains MIPI Panel info - * 6 such enteries will there. Index into correct - * entery is based on the panel_index in #40 LFP - */ -#define MAX_MIPI_CONFIGURATIONS 6 +enum mipi_seq_element { + MIPI_SEQ_ELEM_END = 0, + MIPI_SEQ_ELEM_SEND_PKT, + MIPI_SEQ_ELEM_DELAY, + MIPI_SEQ_ELEM_GPIO, + MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ + MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ + MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ + MIPI_SEQ_ELEM_MAX +}; #define MIPI_DSI_UNDEFINED_PANEL_ID 0 #define MIPI_DSI_GENERIC_PANEL_ID 1 -/* - * PMIC vs SoC Backlight support specified in pwm_blc - * field in mipi_config block below. -*/ -#define PPS_BLC_PMIC 0 -#define PPS_BLC_SOC 1 - struct mipi_config { u16 panel_id; @@ -821,6 +86,8 @@ struct mipi_config { u32 video_transfer_mode:2; u32 cabc_supported:1; +#define PPS_BLC_PMIC 0 +#define PPS_BLC_SOC 1 u32 pwm_blc:1; /* Bit 13:10 */ @@ -924,12 +191,7 @@ struct mipi_config { } __packed; -/* Block 52 contains MIPI configuration block - * 6 * bdb_mipi_config, followed by 6 pps data - * block below - * - * all delays has a unit of 100us - */ +/* all delays have a unit of 100us */ struct mipi_pps_data { u16 panel_on_delay; u16 bl_enable_delay; @@ -938,57 +200,4 @@ struct mipi_pps_data { u16 panel_power_cycle_delay; } __packed; -struct bdb_mipi_config { - struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; - struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; -} __packed; - -/* Block 53 contains MIPI sequences as needed by the panel - * for enabling it. This block can be variable in size and - * can be maximum of 6 blocks - */ -struct bdb_mipi_sequence { - u8 version; - u8 data[0]; -} __packed; - -/* MIPI Sequnece Block definitions */ -enum mipi_seq { - MIPI_SEQ_END = 0, - MIPI_SEQ_ASSERT_RESET, - MIPI_SEQ_INIT_OTP, - MIPI_SEQ_DISPLAY_ON, - MIPI_SEQ_DISPLAY_OFF, - MIPI_SEQ_DEASSERT_RESET, - MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ - MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ - MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ - MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ - MIPI_SEQ_POWER_ON, /* sequence block v3+ */ - MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ - MIPI_SEQ_MAX -}; - -enum mipi_seq_element { - MIPI_SEQ_ELEM_END = 0, - MIPI_SEQ_ELEM_SEND_PKT, - MIPI_SEQ_ELEM_DELAY, - MIPI_SEQ_ELEM_GPIO, - MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ - MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ - MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ - MIPI_SEQ_ELEM_MAX -}; - -enum mipi_gpio_pin_index { - MIPI_GPIO_UNDEFINED = 0, - MIPI_GPIO_PANEL_ENABLE, - MIPI_GPIO_BL_ENABLE, - MIPI_GPIO_PWM_ENABLE, - MIPI_GPIO_RESET_N, - MIPI_GPIO_PWR_DOWN_R, - MIPI_GPIO_STDBY_RST_N, - MIPI_GPIO_MAX -}; - #endif /* _INTEL_BIOS_H_ */ diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c new file mode 100644 index 000000000000..1b3f97449395 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_color.c @@ -0,0 +1,553 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + */ + +#include "intel_drv.h" + +#define CTM_COEFF_SIGN (1ULL << 63) + +#define CTM_COEFF_1_0 (1ULL << 32) +#define CTM_COEFF_2_0 (CTM_COEFF_1_0 << 1) +#define CTM_COEFF_4_0 (CTM_COEFF_2_0 << 1) +#define CTM_COEFF_8_0 (CTM_COEFF_4_0 << 1) +#define CTM_COEFF_0_5 (CTM_COEFF_1_0 >> 1) +#define CTM_COEFF_0_25 (CTM_COEFF_0_5 >> 1) +#define CTM_COEFF_0_125 (CTM_COEFF_0_25 >> 1) + +#define CTM_COEFF_LIMITED_RANGE ((235ULL - 16ULL) * CTM_COEFF_1_0 / 255) + +#define CTM_COEFF_NEGATIVE(coeff) (((coeff) & CTM_COEFF_SIGN) != 0) +#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1)) + +#define LEGACY_LUT_LENGTH (sizeof(struct drm_color_lut) * 256) + +/* + * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point + * format). This macro takes the coefficient we want transformed and the + * number of fractional bits. + * + * We only have a 9 bits precision window which slides depending on the value + * of the CTM coefficient and we write the value from bit 3. We also round the + * value. + */ +#define I9XX_CSC_COEFF_FP(coeff, fbits) \ + (clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8) + +#define I9XX_CSC_COEFF_LIMITED_RANGE \ + I9XX_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9) +#define I9XX_CSC_COEFF_1_0 \ + ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) + +static bool crtc_state_is_legacy(struct drm_crtc_state *state) +{ + return !state->degamma_lut && + !state->ctm && + state->gamma_lut && + state->gamma_lut->length == LEGACY_LUT_LENGTH; +} + +/* + * When using limited range, multiply the matrix given by userspace by + * the matrix that we would use for the limited range. We do the + * multiplication in U2.30 format. + */ +static void ctm_mult_by_limited(uint64_t *result, int64_t *input) +{ + int i; + + for (i = 0; i < 9; i++) + result[i] = 0; + + for (i = 0; i < 3; i++) { + int64_t user_coeff = input[i * 3 + i]; + uint64_t limited_coeff = CTM_COEFF_LIMITED_RANGE >> 2; + uint64_t abs_coeff = clamp_val(CTM_COEFF_ABS(user_coeff), + 0, + CTM_COEFF_4_0 - 1) >> 2; + + result[i * 3 + i] = (limited_coeff * abs_coeff) >> 27; + if (CTM_COEFF_NEGATIVE(user_coeff)) + result[i * 3 + i] |= CTM_COEFF_SIGN; + } +} + +/* Set up the pipe CSC unit. */ +static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state) +{ + struct drm_crtc *crtc = crtc_state->crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int i, pipe = intel_crtc->pipe; + uint16_t coeffs[9] = { 0, }; + + if (crtc_state->ctm) { + struct drm_color_ctm *ctm = + (struct drm_color_ctm *)crtc_state->ctm->data; + uint64_t input[9] = { 0, }; + + if (intel_crtc->config->limited_color_range) { + ctm_mult_by_limited(input, ctm->matrix); + } else { + for (i = 0; i < ARRAY_SIZE(input); i++) + input[i] = ctm->matrix[i]; + } + + /* + * Convert fixed point S31.32 input to format supported by the + * hardware. + */ + for (i = 0; i < ARRAY_SIZE(coeffs); i++) { + uint64_t abs_coeff = ((1ULL << 63) - 1) & input[i]; + + /* + * Clamp input value to min/max supported by + * hardware. + */ + abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1); + + /* sign bit */ + if (CTM_COEFF_NEGATIVE(input[i])) + coeffs[i] |= 1 << 15; + + if (abs_coeff < CTM_COEFF_0_125) + coeffs[i] |= (3 << 12) | + I9XX_CSC_COEFF_FP(abs_coeff, 12); + else if (abs_coeff < CTM_COEFF_0_25) + coeffs[i] |= (2 << 12) | + I9XX_CSC_COEFF_FP(abs_coeff, 11); + else if (abs_coeff < CTM_COEFF_0_5) + coeffs[i] |= (1 << 12) | + I9XX_CSC_COEFF_FP(abs_coeff, 10); + else if (abs_coeff < CTM_COEFF_1_0) + coeffs[i] |= I9XX_CSC_COEFF_FP(abs_coeff, 9); + else if (abs_coeff < CTM_COEFF_2_0) + coeffs[i] |= (7 << 12) | + I9XX_CSC_COEFF_FP(abs_coeff, 8); + else + coeffs[i] |= (6 << 12) | + I9XX_CSC_COEFF_FP(abs_coeff, 7); + } + } else { + /* + * Load an identity matrix if no coefficients are provided. + * + * TODO: Check what kind of values actually come out of the + * pipe with these coeff/postoff values and adjust to get the + * best accuracy. Perhaps we even need to take the bpc value + * into consideration. + */ + for (i = 0; i < 3; i++) { + if (intel_crtc->config->limited_color_range) + coeffs[i * 3 + i] = + I9XX_CSC_COEFF_LIMITED_RANGE; + else + coeffs[i * 3 + i] = I9XX_CSC_COEFF_1_0; + } + } + + I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]); + I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16); + + I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]); + I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16); + + I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]); + I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16); + + I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); + I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); + I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); + + if (INTEL_INFO(dev)->gen > 6) { + uint16_t postoff = 0; + + if (intel_crtc->config->limited_color_range) + postoff = (16 * (1 << 12) / 255) & 0x1fff; + + I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); + I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); + + I915_WRITE(PIPE_CSC_MODE(pipe), 0); + } else { + uint32_t mode = CSC_MODE_YUV_TO_RGB; + + if (intel_crtc->config->limited_color_range) + mode |= CSC_BLACK_SCREEN_OFFSET; + + I915_WRITE(PIPE_CSC_MODE(pipe), mode); + } +} + +/* + * Set up the pipe CSC unit on CherryView. + */ +static void cherryview_load_csc_matrix(struct drm_crtc_state *state) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int pipe = to_intel_crtc(crtc)->pipe; + uint32_t mode; + + if (state->ctm) { + struct drm_color_ctm *ctm = + (struct drm_color_ctm *) state->ctm->data; + uint16_t coeffs[9] = { 0, }; + int i; + + for (i = 0; i < ARRAY_SIZE(coeffs); i++) { + uint64_t abs_coeff = + ((1ULL << 63) - 1) & ctm->matrix[i]; + + /* Round coefficient. */ + abs_coeff += 1 << (32 - 13); + /* Clamp to hardware limits. */ + abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1); + + /* Write coefficients in S3.12 format. */ + if (ctm->matrix[i] & (1ULL << 63)) + coeffs[i] = 1 << 15; + coeffs[i] |= ((abs_coeff >> 32) & 7) << 12; + coeffs[i] |= (abs_coeff >> 20) & 0xfff; + } + + I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe), + coeffs[1] << 16 | coeffs[0]); + I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe), + coeffs[3] << 16 | coeffs[2]); + I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe), + coeffs[5] << 16 | coeffs[4]); + I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe), + coeffs[7] << 16 | coeffs[6]); + I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]); + } + + mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); + if (!crtc_state_is_legacy(state)) { + mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | + (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); + } + I915_WRITE(CGM_PIPE_MODE(pipe), mode); +} + +void intel_color_set_csc(struct drm_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc_state->crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + if (dev_priv->display.load_csc_matrix) + dev_priv->display.load_csc_matrix(crtc_state); +} + +/* Loads the legacy palette/gamma unit for the CRTC. */ +static void i9xx_load_luts_internal(struct drm_crtc *crtc, + struct drm_property_blob *blob) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; + int i; + + if (HAS_GMCH_DISPLAY(dev)) { + if (intel_crtc->config->has_dsi_encoder) + assert_dsi_pll_enabled(dev_priv); + else + assert_pll_enabled(dev_priv, pipe); + } + + if (blob) { + struct drm_color_lut *lut = (struct drm_color_lut *) blob->data; + for (i = 0; i < 256; i++) { + uint32_t word = + (drm_color_lut_extract(lut[i].red, 8) << 16) | + (drm_color_lut_extract(lut[i].green, 8) << 8) | + drm_color_lut_extract(lut[i].blue, 8); + + if (HAS_GMCH_DISPLAY(dev)) + I915_WRITE(PALETTE(pipe, i), word); + else + I915_WRITE(LGC_PALETTE(pipe, i), word); + } + } else { + for (i = 0; i < 256; i++) { + uint32_t word = (i << 16) | (i << 8) | i; + + if (HAS_GMCH_DISPLAY(dev)) + I915_WRITE(PALETTE(pipe, i), word); + else + I915_WRITE(LGC_PALETTE(pipe, i), word); + } + } +} + +static void i9xx_load_luts(struct drm_crtc_state *crtc_state) +{ + i9xx_load_luts_internal(crtc_state->crtc, crtc_state->gamma_lut); +} + +/* Loads the legacy palette/gamma unit for the CRTC on Haswell. */ +static void haswell_load_luts(struct drm_crtc_state *crtc_state) +{ + struct drm_crtc *crtc = crtc_state->crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *intel_crtc_state = + to_intel_crtc_state(crtc_state); + bool reenable_ips = false; + + /* + * Workaround : Do not read or write the pipe palette/gamma data while + * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. + */ + if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && + (intel_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)) { + hsw_disable_ips(intel_crtc); + reenable_ips = true; + } + + intel_crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT; + I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); + + i9xx_load_luts(crtc_state); + + if (reenable_ips) + hsw_enable_ips(intel_crtc); +} + +/* Loads the palette/gamma unit for the CRTC on Broadwell+. */ +static void broadwell_load_luts(struct drm_crtc_state *state) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc_state *intel_state = to_intel_crtc_state(state); + enum pipe pipe = to_intel_crtc(crtc)->pipe; + uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; + + if (crtc_state_is_legacy(state)) { + haswell_load_luts(state); + return; + } + + I915_WRITE(PREC_PAL_INDEX(pipe), + PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT); + + if (state->degamma_lut) { + struct drm_color_lut *lut = + (struct drm_color_lut *) state->degamma_lut->data; + + for (i = 0; i < lut_size; i++) { + uint32_t word = + drm_color_lut_extract(lut[i].red, 10) << 20 | + drm_color_lut_extract(lut[i].green, 10) << 10 | + drm_color_lut_extract(lut[i].blue, 10); + + I915_WRITE(PREC_PAL_DATA(pipe), word); + } + } else { + for (i = 0; i < lut_size; i++) { + uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1); + + I915_WRITE(PREC_PAL_DATA(pipe), + (v << 20) | (v << 10) | v); + } + } + + if (state->gamma_lut) { + struct drm_color_lut *lut = + (struct drm_color_lut *) state->gamma_lut->data; + + for (i = 0; i < lut_size; i++) { + uint32_t word = + (drm_color_lut_extract(lut[i].red, 10) << 20) | + (drm_color_lut_extract(lut[i].green, 10) << 10) | + drm_color_lut_extract(lut[i].blue, 10); + + I915_WRITE(PREC_PAL_DATA(pipe), word); + } + + /* Program the max register to clamp values > 1.0. */ + I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), + drm_color_lut_extract(lut[i].red, 16)); + I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), + drm_color_lut_extract(lut[i].green, 16)); + I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), + drm_color_lut_extract(lut[i].blue, 16)); + } else { + for (i = 0; i < lut_size; i++) { + uint32_t v = (i * ((1 << 10) - 1)) / (lut_size - 1); + + I915_WRITE(PREC_PAL_DATA(pipe), + (v << 20) | (v << 10) | v); + } + + I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1); + I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1); + I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1); + } + + intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT; + I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT); + POSTING_READ(GAMMA_MODE(pipe)); + + /* + * Reset the index, otherwise it prevents the legacy palette to be + * written properly. + */ + I915_WRITE(PREC_PAL_INDEX(pipe), 0); +} + +/* Loads the palette/gamma unit for the CRTC on CherryView. */ +static void cherryview_load_luts(struct drm_crtc_state *state) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = to_intel_crtc(crtc)->pipe; + struct drm_color_lut *lut; + uint32_t i, lut_size; + uint32_t word0, word1; + + if (crtc_state_is_legacy(state)) { + /* Turn off degamma/gamma on CGM block. */ + I915_WRITE(CGM_PIPE_MODE(pipe), + (state->ctm ? CGM_PIPE_MODE_CSC : 0)); + i9xx_load_luts_internal(crtc, state->gamma_lut); + return; + } + + if (state->degamma_lut) { + lut = (struct drm_color_lut *) state->degamma_lut->data; + lut_size = INTEL_INFO(dev)->color.degamma_lut_size; + for (i = 0; i < lut_size; i++) { + /* Write LUT in U0.14 format. */ + word0 = + (drm_color_lut_extract(lut[i].green, 14) << 16) | + drm_color_lut_extract(lut[i].blue, 14); + word1 = drm_color_lut_extract(lut[i].red, 14); + + I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0), word0); + I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1), word1); + } + } + + if (state->gamma_lut) { + lut = (struct drm_color_lut *) state->gamma_lut->data; + lut_size = INTEL_INFO(dev)->color.gamma_lut_size; + for (i = 0; i < lut_size; i++) { + /* Write LUT in U0.10 format. */ + word0 = + (drm_color_lut_extract(lut[i].green, 10) << 16) | + drm_color_lut_extract(lut[i].blue, 10); + word1 = drm_color_lut_extract(lut[i].red, 10); + + I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0), word0); + I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1); + } + } + + I915_WRITE(CGM_PIPE_MODE(pipe), + (state->ctm ? CGM_PIPE_MODE_CSC : 0) | + (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | + (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0)); + + /* + * Also program a linear LUT in the legacy block (behind the + * CGM block). + */ + i9xx_load_luts_internal(crtc, NULL); +} + +void intel_color_load_luts(struct drm_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc_state->crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + dev_priv->display.load_luts(crtc_state); +} + +int intel_color_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc->dev; + size_t gamma_length, degamma_length; + + degamma_length = INTEL_INFO(dev)->color.degamma_lut_size * + sizeof(struct drm_color_lut); + gamma_length = INTEL_INFO(dev)->color.gamma_lut_size * + sizeof(struct drm_color_lut); + + /* + * We allow both degamma & gamma luts at the right size or + * NULL. + */ + if ((!crtc_state->degamma_lut || + crtc_state->degamma_lut->length == degamma_length) && + (!crtc_state->gamma_lut || + crtc_state->gamma_lut->length == gamma_length)) + return 0; + + /* + * We also allow no degamma lut and a gamma lut at the legacy + * size (256 entries). + */ + if (!crtc_state->degamma_lut && + crtc_state->gamma_lut && + crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) + return 0; + + return -EINVAL; +} + +void intel_color_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + + drm_mode_crtc_set_gamma_size(crtc, 256); + + if (IS_CHERRYVIEW(dev)) { + dev_priv->display.load_csc_matrix = cherryview_load_csc_matrix; + dev_priv->display.load_luts = cherryview_load_luts; + } else if (IS_HASWELL(dev)) { + dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; + dev_priv->display.load_luts = haswell_load_luts; + } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev) || + IS_BROXTON(dev) || IS_KABYLAKE(dev)) { + dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix; + dev_priv->display.load_luts = broadwell_load_luts; + } else { + dev_priv->display.load_luts = i9xx_load_luts; + } + + /* Enable color management support when we have degamma & gamma LUTs. */ + if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && + INTEL_INFO(dev)->color.gamma_lut_size != 0) + drm_helper_crtc_enable_color_mgmt(crtc, + INTEL_INFO(dev)->color.degamma_lut_size, + INTEL_INFO(dev)->color.gamma_lut_size); +} diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 505fc5cf26f8..a2a31fd01d1d 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -120,22 +120,16 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) static void intel_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->base.dev; - int dotclock; - pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); - dotclock = pipe_config->port_clock; - - if (HAS_PCH_SPLIT(dev)) - ironlake_check_encoder_dotclock(pipe_config, dotclock); - - pipe_config->base.adjusted_mode.crtc_clock = dotclock; + pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void hsw_crt_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + intel_ddi_get_config(encoder, pipe_config); pipe_config->base.adjusted_mode.flags &= ~(DRM_MODE_FLAG_PHSYNC | @@ -143,6 +137,8 @@ static void hsw_crt_get_config(struct intel_encoder *encoder, DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC); pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder); + + pipe_config->base.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } /* Note: The caller is required to filter out dpms modes not supported by the @@ -222,18 +218,26 @@ intel_crt_mode_valid(struct drm_connector *connector, { struct drm_device *dev = connector->dev; int max_dotclk = to_i915(dev)->max_dotclk_freq; + int max_clock; - int max_clock = 0; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; if (mode->clock < 25000) return MODE_CLOCK_LOW; - if (IS_GEN2(dev)) - max_clock = 350000; - else + if (HAS_PCH_LPT(dev)) + max_clock = 180000; + else if (IS_VALLEYVIEW(dev)) + /* + * 270 MHz due to current DPLL limits, + * DAC limit supposedly 355 MHz. + */ + max_clock = 270000; + else if (IS_GEN3(dev) || IS_GEN4(dev)) max_clock = 400000; + else + max_clock = 350000; if (mode->clock > max_clock) return MODE_CLOCK_HIGH; @@ -261,15 +265,9 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = 24; /* FDI must always be 2.7 GHz */ - if (HAS_DDI(dev)) { - pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL; + if (HAS_DDI(dev)) pipe_config->port_clock = 135000 * 2; - pipe_config->dpll_hw_state.wrpll = 0; - pipe_config->dpll_hw_state.spll = - SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; - } - return true; } @@ -652,6 +650,8 @@ intel_crt_detect(struct drm_connector *connector, bool force) else if (INTEL_INFO(dev)->gen < 4) status = intel_crt_load_detect(crt, to_intel_crtc(connector->state->crtc)->pipe); + else if (i915.load_detect_test) + status = connector_status_disconnected; else status = connector_status_unknown; intel_release_load_detect_pipe(connector, &tmp, &ctx); diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 902054efb902..3f57cb94d9ad 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -188,28 +188,49 @@ static const struct stepping_info bxt_stepping_info[] = { {'B', '0'}, {'B', '1'}, {'B', '2'} }; -static const struct stepping_info *intel_get_stepping_info(struct drm_device *dev) +static const struct stepping_info no_stepping_info = { '*', '*' }; + +static const struct stepping_info * +intel_get_stepping_info(struct drm_i915_private *dev_priv) { const struct stepping_info *si; unsigned int size; - if (IS_KABYLAKE(dev)) { + if (IS_KABYLAKE(dev_priv)) { size = ARRAY_SIZE(kbl_stepping_info); si = kbl_stepping_info; - } else if (IS_SKYLAKE(dev)) { + } else if (IS_SKYLAKE(dev_priv)) { size = ARRAY_SIZE(skl_stepping_info); si = skl_stepping_info; - } else if (IS_BROXTON(dev)) { + } else if (IS_BROXTON(dev_priv)) { size = ARRAY_SIZE(bxt_stepping_info); si = bxt_stepping_info; } else { - return NULL; + size = 0; } - if (INTEL_REVID(dev) < size) - return si + INTEL_REVID(dev); + if (INTEL_REVID(dev_priv) < size) + return si + INTEL_REVID(dev_priv); + + return &no_stepping_info; +} + +static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) +{ + uint32_t val, mask; + + mask = DC_STATE_DEBUG_MASK_MEMORY_UP; + + if (IS_BROXTON(dev_priv)) + mask |= DC_STATE_DEBUG_MASK_CORES; - return NULL; + /* The below bit doesn't need to be cleared ever afterwards */ + val = I915_READ(DC_STATE_DEBUG); + if ((val & mask) != mask) { + val |= mask; + I915_WRITE(DC_STATE_DEBUG, val); + POSTING_READ(DC_STATE_DEBUG); + } } /** @@ -220,19 +241,19 @@ static const struct stepping_info *intel_get_stepping_info(struct drm_device *de * Everytime display comes back from low power state this function is called to * copy the firmware from internal memory to registers. */ -bool intel_csr_load_program(struct drm_i915_private *dev_priv) +void intel_csr_load_program(struct drm_i915_private *dev_priv) { u32 *payload = dev_priv->csr.dmc_payload; uint32_t i, fw_size; if (!IS_GEN9(dev_priv)) { DRM_ERROR("No CSR support available for this platform\n"); - return false; + return; } if (!dev_priv->csr.dmc_payload) { DRM_ERROR("Tried to program CSR with empty payload\n"); - return false; + return; } fw_size = dev_priv->csr.dmc_fw_size; @@ -246,19 +267,17 @@ bool intel_csr_load_program(struct drm_i915_private *dev_priv) dev_priv->csr.dc_state = 0; - return true; + gen9_set_dc_state_debugmask(dev_priv); } static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, const struct firmware *fw) { - struct drm_device *dev = dev_priv->dev; struct intel_css_header *css_header; struct intel_package_header *package_header; struct intel_dmc_header *dmc_header; struct intel_csr *csr = &dev_priv->csr; - const struct stepping_info *stepping_info = intel_get_stepping_info(dev); - char stepping, substepping; + const struct stepping_info *si = intel_get_stepping_info(dev_priv); uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; uint32_t i; uint32_t *dmc_payload; @@ -266,14 +285,6 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, if (!fw) return NULL; - if (!stepping_info) { - DRM_ERROR("Unknown stepping info, firmware loading failed\n"); - return NULL; - } - - stepping = stepping_info->stepping; - substepping = stepping_info->substepping; - /* Extract CSS Header information*/ css_header = (struct intel_css_header *)fw->data; if (sizeof(struct intel_css_header) != @@ -285,7 +296,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, csr->version = css_header->version; - if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && + if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && csr->version < SKL_CSR_VERSION_REQUIRED) { DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u," " please upgrade to v%u.%u or later" @@ -313,11 +324,11 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, /* Search for dmc_offset to find firware binary. */ for (i = 0; i < package_header->num_entries; i++) { if (package_header->fw_info[i].substepping == '*' && - stepping == package_header->fw_info[i].stepping) { + si->stepping == package_header->fw_info[i].stepping) { dmc_offset = package_header->fw_info[i].offset; break; - } else if (stepping == package_header->fw_info[i].stepping && - substepping == package_header->fw_info[i].substepping) { + } else if (si->stepping == package_header->fw_info[i].stepping && + si->substepping == package_header->fw_info[i].substepping) { dmc_offset = package_header->fw_info[i].offset; break; } else if (package_header->fw_info[i].stepping == '*' && @@ -325,7 +336,8 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, dmc_offset = package_header->fw_info[i].offset; } if (dmc_offset == CSR_DEFAULT_FW_OFFSET) { - DRM_ERROR("Firmware not supported for %c stepping\n", stepping); + DRM_ERROR("Firmware not supported for %c stepping\n", + si->stepping); return NULL; } readcount += dmc_offset; @@ -371,9 +383,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, return NULL; } - memcpy(dmc_payload, &fw->data[readcount], nbytes); - - return dmc_payload; + return memcpy(dmc_payload, &fw->data[readcount], nbytes); } static void csr_load_work_fn(struct work_struct *work) @@ -388,18 +398,12 @@ static void csr_load_work_fn(struct work_struct *work) ret = request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->dev->pdev->dev); - if (!fw) - goto out; - - dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); - if (!dev_priv->csr.dmc_payload) - goto out; + if (fw) + dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); - /* load csr program during system boot, as needed for DC states */ - intel_csr_load_program(dev_priv); - -out: if (dev_priv->csr.dmc_payload) { + intel_csr_load_program(dev_priv); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); DRM_INFO("Finished loading %s (v%u.%u)\n", diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 62de9f4bce09..921edf183d22 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -315,6 +315,9 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, *dig_port = enc_to_mst(encoder)->primary; *port = (*dig_port)->port; break; + default: + WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); + /* fallthrough and treat as unknown */ case INTEL_OUTPUT_DISPLAYPORT: case INTEL_OUTPUT_EDP: case INTEL_OUTPUT_HDMI: @@ -326,9 +329,6 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder, *dig_port = NULL; *port = PORT_E; break; - default: - WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); - break; } } @@ -360,7 +360,7 @@ skl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries) static const struct ddi_buf_trans * skl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { - if (dev_priv->edp_low_vswing) { + if (dev_priv->vbt.edp.low_vswing) { if (IS_SKL_ULX(dev_priv) || IS_KBL_ULX(dev_priv)) { *n_entries = ARRAY_SIZE(skl_y_ddi_translations_edp); return skl_y_ddi_translations_edp; @@ -629,6 +629,10 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) break; } + rx_ctl_val &= ~FDI_RX_ENABLE; + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); + POSTING_READ(FDI_RX_CTL(PIPE_A)); + temp = I915_READ(DDI_BUF_CTL(PORT_E)); temp &= ~DDI_BUF_CTL_ENABLE; I915_WRITE(DDI_BUF_CTL(PORT_E), temp); @@ -643,10 +647,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc) intel_wait_ddi_buf_idle(dev_priv, PORT_E); - rx_ctl_val &= ~FDI_RX_ENABLE; - I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); - POSTING_READ(FDI_RX_CTL(PIPE_A)); - /* Reset FDI_RX_MISC pwrdn lanes */ temp = I915_READ(FDI_RX_MISC(PIPE_A)); temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); @@ -724,160 +724,6 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) } #define LC_FREQ 2700 -#define LC_FREQ_2K U64_C(LC_FREQ * 2000) - -#define P_MIN 2 -#define P_MAX 64 -#define P_INC 2 - -/* Constraints for PLL good behavior */ -#define REF_MIN 48 -#define REF_MAX 400 -#define VCO_MIN 2400 -#define VCO_MAX 4800 - -#define abs_diff(a, b) ({ \ - typeof(a) __a = (a); \ - typeof(b) __b = (b); \ - (void) (&__a == &__b); \ - __a > __b ? (__a - __b) : (__b - __a); }) - -struct hsw_wrpll_rnp { - unsigned p, n2, r2; -}; - -static unsigned hsw_wrpll_get_budget_for_freq(int clock) -{ - unsigned budget; - - switch (clock) { - case 25175000: - case 25200000: - case 27000000: - case 27027000: - case 37762500: - case 37800000: - case 40500000: - case 40541000: - case 54000000: - case 54054000: - case 59341000: - case 59400000: - case 72000000: - case 74176000: - case 74250000: - case 81000000: - case 81081000: - case 89012000: - case 89100000: - case 108000000: - case 108108000: - case 111264000: - case 111375000: - case 148352000: - case 148500000: - case 162000000: - case 162162000: - case 222525000: - case 222750000: - case 296703000: - case 297000000: - budget = 0; - break; - case 233500000: - case 245250000: - case 247750000: - case 253250000: - case 298000000: - budget = 1500; - break; - case 169128000: - case 169500000: - case 179500000: - case 202000000: - budget = 2000; - break; - case 256250000: - case 262500000: - case 270000000: - case 272500000: - case 273750000: - case 280750000: - case 281250000: - case 286000000: - case 291750000: - budget = 4000; - break; - case 267250000: - case 268500000: - budget = 5000; - break; - default: - budget = 1000; - break; - } - - return budget; -} - -static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, - unsigned r2, unsigned n2, unsigned p, - struct hsw_wrpll_rnp *best) -{ - uint64_t a, b, c, d, diff, diff_best; - - /* No best (r,n,p) yet */ - if (best->p == 0) { - best->p = p; - best->n2 = n2; - best->r2 = r2; - return; - } - - /* - * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to - * freq2k. - * - * delta = 1e6 * - * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) / - * freq2k; - * - * and we would like delta <= budget. - * - * If the discrepancy is above the PPM-based budget, always prefer to - * improve upon the previous solution. However, if you're within the - * budget, try to maximize Ref * VCO, that is N / (P * R^2). - */ - a = freq2k * budget * p * r2; - b = freq2k * budget * best->p * best->r2; - diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2); - diff_best = abs_diff(freq2k * best->p * best->r2, - LC_FREQ_2K * best->n2); - c = 1000000 * diff; - d = 1000000 * diff_best; - - if (a < c && b < d) { - /* If both are above the budget, pick the closer */ - if (best->p * best->r2 * diff < p * r2 * diff_best) { - best->p = p; - best->n2 = n2; - best->r2 = r2; - } - } else if (a >= c && b < d) { - /* If A is below the threshold but B is above it? Update. */ - best->p = p; - best->n2 = n2; - best->r2 = r2; - } else if (a >= c && b >= d) { - /* Both are below the limit, so pick the higher n2/(r2*r2) */ - if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) { - best->p = p; - best->n2 = n2; - best->r2 = r2; - } - } - /* Otherwise a < c && b >= d, do nothing */ -} static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv, i915_reg_t reg) @@ -1139,363 +985,20 @@ void intel_ddi_clock_get(struct intel_encoder *encoder, bxt_ddi_clock_get(encoder, pipe_config); } -static void -hsw_ddi_calculate_wrpll(int clock /* in Hz */, - unsigned *r2_out, unsigned *n2_out, unsigned *p_out) -{ - uint64_t freq2k; - unsigned p, n2, r2; - struct hsw_wrpll_rnp best = { 0, 0, 0 }; - unsigned budget; - - freq2k = clock / 100; - - budget = hsw_wrpll_get_budget_for_freq(clock); - - /* Special case handling for 540 pixel clock: bypass WR PLL entirely - * and directly pass the LC PLL to it. */ - if (freq2k == 5400000) { - *n2_out = 2; - *p_out = 1; - *r2_out = 2; - return; - } - - /* - * Ref = LC_FREQ / R, where Ref is the actual reference input seen by - * the WR PLL. - * - * We want R so that REF_MIN <= Ref <= REF_MAX. - * Injecting R2 = 2 * R gives: - * REF_MAX * r2 > LC_FREQ * 2 and - * REF_MIN * r2 < LC_FREQ * 2 - * - * Which means the desired boundaries for r2 are: - * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN - * - */ - for (r2 = LC_FREQ * 2 / REF_MAX + 1; - r2 <= LC_FREQ * 2 / REF_MIN; - r2++) { - - /* - * VCO = N * Ref, that is: VCO = N * LC_FREQ / R - * - * Once again we want VCO_MIN <= VCO <= VCO_MAX. - * Injecting R2 = 2 * R and N2 = 2 * N, we get: - * VCO_MAX * r2 > n2 * LC_FREQ and - * VCO_MIN * r2 < n2 * LC_FREQ) - * - * Which means the desired boundaries for n2 are: - * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ - */ - for (n2 = VCO_MIN * r2 / LC_FREQ + 1; - n2 <= VCO_MAX * r2 / LC_FREQ; - n2++) { - - for (p = P_MIN; p <= P_MAX; p += P_INC) - hsw_wrpll_update_rnp(freq2k, budget, - r2, n2, p, &best); - } - } - - *n2_out = best.n2; - *p_out = best.p; - *r2_out = best.r2; -} - static bool hsw_ddi_pll_select(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, struct intel_encoder *intel_encoder) { - int clock = crtc_state->port_clock; - - if (intel_encoder->type == INTEL_OUTPUT_HDMI) { - struct intel_shared_dpll *pll; - uint32_t val; - unsigned p, n2, r2; - - hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); - - val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | - WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | - WRPLL_DIVIDER_POST(p); - - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - - crtc_state->dpll_hw_state.wrpll = val; - - pll = intel_get_shared_dpll(intel_crtc, crtc_state); - if (pll == NULL) { - DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", - pipe_name(intel_crtc->pipe)); - return false; - } - - crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); - } else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) { - struct drm_atomic_state *state = crtc_state->base.state; - struct intel_shared_dpll_config *spll = - &intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL]; - - if (spll->crtc_mask && - WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll)) - return false; - - crtc_state->shared_dpll = DPLL_ID_SPLL; - spll->hw_state.spll = crtc_state->dpll_hw_state.spll; - spll->crtc_mask |= 1 << intel_crtc->pipe; - } - - return true; -} - -struct skl_wrpll_context { - uint64_t min_deviation; /* current minimal deviation */ - uint64_t central_freq; /* chosen central freq */ - uint64_t dco_freq; /* chosen dco freq */ - unsigned int p; /* chosen divider */ -}; - -static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) -{ - memset(ctx, 0, sizeof(*ctx)); - - ctx->min_deviation = U64_MAX; -} - -/* DCO freq must be within +1%/-6% of the DCO central freq */ -#define SKL_DCO_MAX_PDEVIATION 100 -#define SKL_DCO_MAX_NDEVIATION 600 - -static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, - uint64_t central_freq, - uint64_t dco_freq, - unsigned int divider) -{ - uint64_t deviation; - - deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), - central_freq); - - /* positive deviation */ - if (dco_freq >= central_freq) { - if (deviation < SKL_DCO_MAX_PDEVIATION && - deviation < ctx->min_deviation) { - ctx->min_deviation = deviation; - ctx->central_freq = central_freq; - ctx->dco_freq = dco_freq; - ctx->p = divider; - } - /* negative deviation */ - } else if (deviation < SKL_DCO_MAX_NDEVIATION && - deviation < ctx->min_deviation) { - ctx->min_deviation = deviation; - ctx->central_freq = central_freq; - ctx->dco_freq = dco_freq; - ctx->p = divider; - } -} - -static void skl_wrpll_get_multipliers(unsigned int p, - unsigned int *p0 /* out */, - unsigned int *p1 /* out */, - unsigned int *p2 /* out */) -{ - /* even dividers */ - if (p % 2 == 0) { - unsigned int half = p / 2; - - if (half == 1 || half == 2 || half == 3 || half == 5) { - *p0 = 2; - *p1 = 1; - *p2 = half; - } else if (half % 2 == 0) { - *p0 = 2; - *p1 = half / 2; - *p2 = 2; - } else if (half % 3 == 0) { - *p0 = 3; - *p1 = half / 3; - *p2 = 2; - } else if (half % 7 == 0) { - *p0 = 7; - *p1 = half / 7; - *p2 = 2; - } - } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ - *p0 = 3; - *p1 = 1; - *p2 = p / 3; - } else if (p == 5 || p == 7) { - *p0 = p; - *p1 = 1; - *p2 = 1; - } else if (p == 15) { - *p0 = 3; - *p1 = 1; - *p2 = 5; - } else if (p == 21) { - *p0 = 7; - *p1 = 1; - *p2 = 3; - } else if (p == 35) { - *p0 = 7; - *p1 = 1; - *p2 = 5; - } -} - -struct skl_wrpll_params { - uint32_t dco_fraction; - uint32_t dco_integer; - uint32_t qdiv_ratio; - uint32_t qdiv_mode; - uint32_t kdiv; - uint32_t pdiv; - uint32_t central_freq; -}; - -static void skl_wrpll_params_populate(struct skl_wrpll_params *params, - uint64_t afe_clock, - uint64_t central_freq, - uint32_t p0, uint32_t p1, uint32_t p2) -{ - uint64_t dco_freq; - - switch (central_freq) { - case 9600000000ULL: - params->central_freq = 0; - break; - case 9000000000ULL: - params->central_freq = 1; - break; - case 8400000000ULL: - params->central_freq = 3; - } - - switch (p0) { - case 1: - params->pdiv = 0; - break; - case 2: - params->pdiv = 1; - break; - case 3: - params->pdiv = 2; - break; - case 7: - params->pdiv = 4; - break; - default: - WARN(1, "Incorrect PDiv\n"); - } - - switch (p2) { - case 5: - params->kdiv = 0; - break; - case 2: - params->kdiv = 1; - break; - case 3: - params->kdiv = 2; - break; - case 1: - params->kdiv = 3; - break; - default: - WARN(1, "Incorrect KDiv\n"); - } - - params->qdiv_ratio = p1; - params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; + struct intel_shared_dpll *pll; - dco_freq = p0 * p1 * p2 * afe_clock; + pll = intel_get_shared_dpll(intel_crtc, crtc_state, + intel_encoder); + if (!pll) + DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", + pipe_name(intel_crtc->pipe)); - /* - * Intermediate values are in Hz. - * Divide by MHz to match bsepc - */ - params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); - params->dco_fraction = - div_u64((div_u64(dco_freq, 24) - - params->dco_integer * MHz(1)) * 0x8000, MHz(1)); -} - -static bool -skl_ddi_calculate_wrpll(int clock /* in Hz */, - struct skl_wrpll_params *wrpll_params) -{ - uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ - uint64_t dco_central_freq[3] = {8400000000ULL, - 9000000000ULL, - 9600000000ULL}; - static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, - 24, 28, 30, 32, 36, 40, 42, 44, - 48, 52, 54, 56, 60, 64, 66, 68, - 70, 72, 76, 78, 80, 84, 88, 90, - 92, 96, 98 }; - static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; - static const struct { - const int *list; - int n_dividers; - } dividers[] = { - { even_dividers, ARRAY_SIZE(even_dividers) }, - { odd_dividers, ARRAY_SIZE(odd_dividers) }, - }; - struct skl_wrpll_context ctx; - unsigned int dco, d, i; - unsigned int p0, p1, p2; - - skl_wrpll_context_init(&ctx); - - for (d = 0; d < ARRAY_SIZE(dividers); d++) { - for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { - for (i = 0; i < dividers[d].n_dividers; i++) { - unsigned int p = dividers[d].list[i]; - uint64_t dco_freq = p * afe_clock; - - skl_wrpll_try_divider(&ctx, - dco_central_freq[dco], - dco_freq, - p); - /* - * Skip the remaining dividers if we're sure to - * have found the definitive divider, we can't - * improve a 0 deviation. - */ - if (ctx.min_deviation == 0) - goto skip_remaining_dividers; - } - } - -skip_remaining_dividers: - /* - * If a solution is found with an even divider, prefer - * this one. - */ - if (d == 0 && ctx.p) - break; - } - - if (!ctx.p) { - DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); - return false; - } - - /* - * gcc incorrectly analyses that these can be used without being - * initialized. To be fair, it's hard to guess. - */ - p0 = p1 = p2 = 0; - skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); - skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, - p0, p1, p2); - - return true; + return pll; } static bool @@ -1504,218 +1007,23 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc, struct intel_encoder *intel_encoder) { struct intel_shared_dpll *pll; - uint32_t ctrl1, cfgcr1, cfgcr2; - int clock = crtc_state->port_clock; - - /* - * See comment in intel_dpll_hw_state to understand why we always use 0 - * as the DPLL id in this function. - */ - - ctrl1 = DPLL_CTRL1_OVERRIDE(0); - - if (intel_encoder->type == INTEL_OUTPUT_HDMI) { - struct skl_wrpll_params wrpll_params = { 0, }; - - ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); - - if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) - return false; - - cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | - DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | - wrpll_params.dco_integer; - - cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | - DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | - DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | - DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | - wrpll_params.central_freq; - } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_DP_MST) { - switch (crtc_state->port_clock / 2) { - case 81000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); - break; - case 135000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); - break; - case 270000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); - break; - } - - cfgcr1 = cfgcr2 = 0; - } else if (intel_encoder->type == INTEL_OUTPUT_EDP) { - return true; - } else - return false; - - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - - crtc_state->dpll_hw_state.ctrl1 = ctrl1; - crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; - crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; - pll = intel_get_shared_dpll(intel_crtc, crtc_state); + pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder); if (pll == NULL) { DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", pipe_name(intel_crtc->pipe)); return false; } - /* shared DPLL id 0 is DPLL 1 */ - crtc_state->ddi_pll_sel = pll->id + 1; - return true; } -/* bxt clock parameters */ -struct bxt_clk_div { - int clock; - uint32_t p1; - uint32_t p2; - uint32_t m2_int; - uint32_t m2_frac; - bool m2_frac_en; - uint32_t n; -}; - -/* pre-calculated values for DP linkrates */ -static const struct bxt_clk_div bxt_dp_clk_val[] = { - {162000, 4, 2, 32, 1677722, 1, 1}, - {270000, 4, 1, 27, 0, 0, 1}, - {540000, 2, 1, 27, 0, 0, 1}, - {216000, 3, 2, 32, 1677722, 1, 1}, - {243000, 4, 1, 24, 1258291, 1, 1}, - {324000, 4, 1, 32, 1677722, 1, 1}, - {432000, 3, 1, 32, 1677722, 1, 1} -}; - static bool bxt_ddi_pll_select(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, struct intel_encoder *intel_encoder) { - struct intel_shared_dpll *pll; - struct bxt_clk_div clk_div = {0}; - int vco = 0; - uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; - uint32_t lanestagger; - int clock = crtc_state->port_clock; - - if (intel_encoder->type == INTEL_OUTPUT_HDMI) { - intel_clock_t best_clock; - - /* Calculate HDMI div */ - /* - * FIXME: tie the following calculation into - * i9xx_crtc_compute_clock - */ - if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) { - DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", - clock, pipe_name(intel_crtc->pipe)); - return false; - } - - clk_div.p1 = best_clock.p1; - clk_div.p2 = best_clock.p2; - WARN_ON(best_clock.m1 != 2); - clk_div.n = best_clock.n; - clk_div.m2_int = best_clock.m2 >> 22; - clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1); - clk_div.m2_frac_en = clk_div.m2_frac != 0; - - vco = best_clock.vco; - } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || - intel_encoder->type == INTEL_OUTPUT_EDP) { - int i; - - clk_div = bxt_dp_clk_val[0]; - for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { - if (bxt_dp_clk_val[i].clock == clock) { - clk_div = bxt_dp_clk_val[i]; - break; - } - } - vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2; - } - - if (vco >= 6200000 && vco <= 6700000) { - prop_coef = 4; - int_coef = 9; - gain_ctl = 3; - targ_cnt = 8; - } else if ((vco > 5400000 && vco < 6200000) || - (vco >= 4800000 && vco < 5400000)) { - prop_coef = 5; - int_coef = 11; - gain_ctl = 3; - targ_cnt = 9; - } else if (vco == 5400000) { - prop_coef = 3; - int_coef = 8; - gain_ctl = 1; - targ_cnt = 9; - } else { - DRM_ERROR("Invalid VCO\n"); - return false; - } - - memset(&crtc_state->dpll_hw_state, 0, - sizeof(crtc_state->dpll_hw_state)); - - if (clock > 270000) - lanestagger = 0x18; - else if (clock > 135000) - lanestagger = 0x0d; - else if (clock > 67000) - lanestagger = 0x07; - else if (clock > 33000) - lanestagger = 0x04; - else - lanestagger = 0x02; - - crtc_state->dpll_hw_state.ebb0 = - PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2); - crtc_state->dpll_hw_state.pll0 = clk_div.m2_int; - crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n); - crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac; - - if (clk_div.m2_frac_en) - crtc_state->dpll_hw_state.pll3 = - PORT_PLL_M2_FRAC_ENABLE; - - crtc_state->dpll_hw_state.pll6 = - prop_coef | PORT_PLL_INT_COEFF(int_coef); - crtc_state->dpll_hw_state.pll6 |= - PORT_PLL_GAIN_CTL(gain_ctl); - - crtc_state->dpll_hw_state.pll8 = targ_cnt; - - crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT; - - crtc_state->dpll_hw_state.pll10 = - PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT) - | PORT_PLL_DCO_AMP_OVR_EN_H; - - crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE; - - crtc_state->dpll_hw_state.pcsdw12 = - LANESTAGGER_STRAP_OVRD | lanestagger; - - pll = intel_get_shared_dpll(intel_crtc, crtc_state); - if (pll == NULL) { - DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", - pipe_name(intel_crtc->pipe)); - return false; - } - - /* shared DPLL id 0 is DPLL A */ - crtc_state->ddi_pll_sel = pll->id; - - return true; + return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder); } /* @@ -1753,6 +1061,8 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) uint32_t temp; if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { + WARN_ON(transcoder_is_dsi(cpu_transcoder)); + temp = TRANS_MSA_SYNC_CLK; switch (intel_crtc->config->pipe_bpp) { case 18: @@ -2121,7 +1431,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv, u32 n_entries, i; uint32_t val; - if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) { + if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); ddi_translations = bxt_ddi_translations_edp; } else if (type == INTEL_OUTPUT_DISPLAYPORT @@ -2259,24 +1569,6 @@ void intel_ddi_clk_select(struct intel_encoder *encoder, uint32_t dpll = pipe_config->ddi_pll_sel; uint32_t val; - /* - * DPLL0 is used for eDP and is the only "private" DPLL (as - * opposed to shared) on SKL - */ - if (encoder->type == INTEL_OUTPUT_EDP) { - WARN_ON(dpll != SKL_DPLL0); - - val = I915_READ(DPLL_CTRL1); - - val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | - DPLL_CTRL1_SSC(dpll) | - DPLL_CTRL1_LINK_RATE_MASK(dpll)); - val |= pipe_config->dpll_hw_state.ctrl1 << (dpll * 6); - - I915_WRITE(DPLL_CTRL1, val); - POSTING_READ(DPLL_CTRL1); - } - /* DDI -> PLL mapping */ val = I915_READ(DPLL_CTRL2); @@ -2430,251 +1722,35 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder) } } -static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); - POSTING_READ(WRPLL_CTL(pll->id)); - udelay(20); -} - -static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); - POSTING_READ(SPLL_CTL); - udelay(20); -} - -static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - uint32_t val; - - val = I915_READ(WRPLL_CTL(pll->id)); - I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); - POSTING_READ(WRPLL_CTL(pll->id)); -} - -static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - uint32_t val; - - val = I915_READ(SPLL_CTL); - I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); - POSTING_READ(SPLL_CTL); -} - -static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) -{ - uint32_t val; - - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) - return false; - - val = I915_READ(WRPLL_CTL(pll->id)); - hw_state->wrpll = val; - - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); - - return val & WRPLL_PLL_ENABLE; -} - -static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) -{ - uint32_t val; - - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) - return false; - - val = I915_READ(SPLL_CTL); - hw_state->spll = val; - - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); - - return val & SPLL_PLL_ENABLE; -} - - -static const char * const hsw_ddi_pll_names[] = { - "WRPLL 1", - "WRPLL 2", - "SPLL" -}; - -static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv) -{ - int i; - - dev_priv->num_shared_dpll = 3; - - for (i = 0; i < 2; i++) { - dev_priv->shared_dplls[i].id = i; - dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; - dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable; - dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable; - dev_priv->shared_dplls[i].get_hw_state = - hsw_ddi_wrpll_get_hw_state; - } - - /* SPLL is special, but needs to be initialized anyway.. */ - dev_priv->shared_dplls[i].id = i; - dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i]; - dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable; - dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable; - dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state; - -} - -static const char * const skl_ddi_pll_names[] = { - "DPLL 1", - "DPLL 2", - "DPLL 3", -}; - -struct skl_dpll_regs { - i915_reg_t ctl, cfgcr1, cfgcr2; -}; - -/* this array is indexed by the *shared* pll id */ -static const struct skl_dpll_regs skl_dpll_regs[3] = { - { - /* DPLL 1 */ - .ctl = LCPLL2_CTL, - .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), - .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), - }, - { - /* DPLL 2 */ - .ctl = WRPLL_CTL(0), - .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), - .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), - }, - { - /* DPLL 3 */ - .ctl = WRPLL_CTL(1), - .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), - .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), - }, -}; - -static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - uint32_t val; - unsigned int dpll; - const struct skl_dpll_regs *regs = skl_dpll_regs; - - /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ - dpll = pll->id + 1; - - val = I915_READ(DPLL_CTRL1); - - val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) | - DPLL_CTRL1_LINK_RATE_MASK(dpll)); - val |= pll->config.hw_state.ctrl1 << (dpll * 6); - - I915_WRITE(DPLL_CTRL1, val); - POSTING_READ(DPLL_CTRL1); - - I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1); - I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2); - POSTING_READ(regs[pll->id].cfgcr1); - POSTING_READ(regs[pll->id].cfgcr2); - - /* the enable bit is always bit 31 */ - I915_WRITE(regs[pll->id].ctl, - I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); - - if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5)) - DRM_ERROR("DPLL %d not locked\n", dpll); -} - -static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - const struct skl_dpll_regs *regs = skl_dpll_regs; - - /* the enable bit is always bit 31 */ - I915_WRITE(regs[pll->id].ctl, - I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE); - POSTING_READ(regs[pll->id].ctl); -} - -static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) -{ - uint32_t val; - unsigned int dpll; - const struct skl_dpll_regs *regs = skl_dpll_regs; - bool ret; - - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) - return false; - - ret = false; - - /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */ - dpll = pll->id + 1; - - val = I915_READ(regs[pll->id].ctl); - if (!(val & LCPLL_PLL_ENABLE)) - goto out; - - val = I915_READ(DPLL_CTRL1); - hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f; - - /* avoid reading back stale values if HDMI mode is not enabled */ - if (val & DPLL_CTRL1_HDMI_MODE(dpll)) { - hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); - hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); - } - ret = true; - -out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); - - return ret; -} - -static void skl_shared_dplls_init(struct drm_i915_private *dev_priv) -{ - int i; - - dev_priv->num_shared_dpll = 3; - - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - dev_priv->shared_dplls[i].id = i; - dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i]; - dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable; - dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable; - dev_priv->shared_dplls[i].get_hw_state = - skl_ddi_pll_get_hw_state; - } -} - static void broxton_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy) { enum port port; - uint32_t val; + u32 ports, val; val = I915_READ(BXT_P_CR_GT_DISP_PWRON); val |= GT_DISPLAY_POWER_ON(phy); I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); - /* Considering 10ms timeout until BSpec is updated */ - if (wait_for(I915_READ(BXT_PORT_CL1CM_DW0(phy)) & PHY_POWER_GOOD, 10)) + /* + * The PHY registers start out inaccessible and respond to reads with + * all 1s. Eventually they become accessible as they power up, then + * the reserved bit will give the default 0. Poll on the reserved bit + * becoming 0 to find when the PHY is accessible. + * HW team confirmed that the time to reach phypowergood status is + * anywhere between 50 us and 100us. + */ + if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) & + (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) { DRM_ERROR("timeout during PHY%d power on\n", phy); + } - for (port = (phy == DPIO_PHY0 ? PORT_B : PORT_A); - port <= (phy == DPIO_PHY0 ? PORT_C : PORT_A); port++) { + if (phy == DPIO_PHY0) + ports = BIT(PORT_B) | BIT(PORT_C); + else + ports = BIT(PORT_A); + + for_each_port_masked(port, ports) { int lane; for (lane = 0; lane < 4; lane++) { @@ -2783,249 +1859,6 @@ void broxton_ddi_phy_uninit(struct drm_device *dev) I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0); } -static const char * const bxt_ddi_pll_names[] = { - "PORT PLL A", - "PORT PLL B", - "PORT PLL C", -}; - -static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - uint32_t temp; - enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ - - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); - temp &= ~PORT_PLL_REF_SEL; - /* Non-SSC reference */ - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); - - /* Disable 10 bit clock */ - temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); - temp &= ~PORT_PLL_10BIT_CLK_ENABLE; - I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); - - /* Write P1 & P2 */ - temp = I915_READ(BXT_PORT_PLL_EBB_0(port)); - temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); - temp |= pll->config.hw_state.ebb0; - I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp); - - /* Write M2 integer */ - temp = I915_READ(BXT_PORT_PLL(port, 0)); - temp &= ~PORT_PLL_M2_MASK; - temp |= pll->config.hw_state.pll0; - I915_WRITE(BXT_PORT_PLL(port, 0), temp); - - /* Write N */ - temp = I915_READ(BXT_PORT_PLL(port, 1)); - temp &= ~PORT_PLL_N_MASK; - temp |= pll->config.hw_state.pll1; - I915_WRITE(BXT_PORT_PLL(port, 1), temp); - - /* Write M2 fraction */ - temp = I915_READ(BXT_PORT_PLL(port, 2)); - temp &= ~PORT_PLL_M2_FRAC_MASK; - temp |= pll->config.hw_state.pll2; - I915_WRITE(BXT_PORT_PLL(port, 2), temp); - - /* Write M2 fraction enable */ - temp = I915_READ(BXT_PORT_PLL(port, 3)); - temp &= ~PORT_PLL_M2_FRAC_ENABLE; - temp |= pll->config.hw_state.pll3; - I915_WRITE(BXT_PORT_PLL(port, 3), temp); - - /* Write coeff */ - temp = I915_READ(BXT_PORT_PLL(port, 6)); - temp &= ~PORT_PLL_PROP_COEFF_MASK; - temp &= ~PORT_PLL_INT_COEFF_MASK; - temp &= ~PORT_PLL_GAIN_CTL_MASK; - temp |= pll->config.hw_state.pll6; - I915_WRITE(BXT_PORT_PLL(port, 6), temp); - - /* Write calibration val */ - temp = I915_READ(BXT_PORT_PLL(port, 8)); - temp &= ~PORT_PLL_TARGET_CNT_MASK; - temp |= pll->config.hw_state.pll8; - I915_WRITE(BXT_PORT_PLL(port, 8), temp); - - temp = I915_READ(BXT_PORT_PLL(port, 9)); - temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; - temp |= pll->config.hw_state.pll9; - I915_WRITE(BXT_PORT_PLL(port, 9), temp); - - temp = I915_READ(BXT_PORT_PLL(port, 10)); - temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; - temp &= ~PORT_PLL_DCO_AMP_MASK; - temp |= pll->config.hw_state.pll10; - I915_WRITE(BXT_PORT_PLL(port, 10), temp); - - /* Recalibrate with new settings */ - temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); - temp |= PORT_PLL_RECALIBRATE; - I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); - temp &= ~PORT_PLL_10BIT_CLK_ENABLE; - temp |= pll->config.hw_state.ebb4; - I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); - - /* Enable PLL */ - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); - temp |= PORT_PLL_ENABLE; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); - POSTING_READ(BXT_PORT_PLL_ENABLE(port)); - - if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_LOCK), 200)) - DRM_ERROR("PLL %d not locked\n", port); - - /* - * While we write to the group register to program all lanes at once we - * can read only lane registers and we pick lanes 0/1 for that. - */ - temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); - temp &= ~LANE_STAGGER_MASK; - temp &= ~LANESTAGGER_STRAP_OVRD; - temp |= pll->config.hw_state.pcsdw12; - I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp); -} - -static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ - uint32_t temp; - - temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); - temp &= ~PORT_PLL_ENABLE; - I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); - POSTING_READ(BXT_PORT_PLL_ENABLE(port)); -} - -static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) -{ - enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ - uint32_t val; - bool ret; - - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) - return false; - - ret = false; - - val = I915_READ(BXT_PORT_PLL_ENABLE(port)); - if (!(val & PORT_PLL_ENABLE)) - goto out; - - hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); - hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; - - hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port)); - hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; - - hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0)); - hw_state->pll0 &= PORT_PLL_M2_MASK; - - hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1)); - hw_state->pll1 &= PORT_PLL_N_MASK; - - hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2)); - hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; - - hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3)); - hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; - - hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6)); - hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | - PORT_PLL_INT_COEFF_MASK | - PORT_PLL_GAIN_CTL_MASK; - - hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8)); - hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; - - hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9)); - hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; - - hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10)); - hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | - PORT_PLL_DCO_AMP_MASK; - - /* - * While we write to the group register to program all lanes at once we - * can read only lane registers. We configure all lanes the same way, so - * here just read out lanes 0/1 and output a note if lanes 2/3 differ. - */ - hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); - if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12) - DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", - hw_state->pcsdw12, - I915_READ(BXT_PORT_PCS_DW12_LN23(port))); - hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; - - ret = true; - -out: - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); - - return ret; -} - -static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv) -{ - int i; - - dev_priv->num_shared_dpll = 3; - - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - dev_priv->shared_dplls[i].id = i; - dev_priv->shared_dplls[i].name = bxt_ddi_pll_names[i]; - dev_priv->shared_dplls[i].disable = bxt_ddi_pll_disable; - dev_priv->shared_dplls[i].enable = bxt_ddi_pll_enable; - dev_priv->shared_dplls[i].get_hw_state = - bxt_ddi_pll_get_hw_state; - } -} - -void intel_ddi_pll_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t val = I915_READ(LCPLL_CTL); - - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) - skl_shared_dplls_init(dev_priv); - else if (IS_BROXTON(dev)) - bxt_shared_dplls_init(dev_priv); - else - hsw_shared_dplls_init(dev_priv); - - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { - int cdclk_freq; - - cdclk_freq = dev_priv->display.get_display_clock_speed(dev); - dev_priv->skl_boot_cdclk = cdclk_freq; - if (skl_sanitize_cdclk(dev_priv)) - DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); - if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) - DRM_ERROR("LCPLL1 is disabled\n"); - } else if (IS_BROXTON(dev)) { - broxton_init_cdclk(dev); - broxton_ddi_phy_init(dev); - } else { - /* - * The LCPLL register should be turned on by the BIOS. For now - * let's just check its state and print errors in case - * something is wrong. Don't even try to turn it on. - */ - - if (val & LCPLL_CD_SOURCE_FCLK) - DRM_ERROR("CDCLK source is not LCPLL\n"); - - if (val & LCPLL_PLL_DISABLE) - DRM_ERROR("LCPLL is disabled\n"); - } -} - void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); @@ -3078,12 +1911,18 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc) struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); uint32_t val; - intel_ddi_post_disable(intel_encoder); - + /* + * Bspec lists this as both step 13 (before DDI_BUF_CTL disable) + * and step 18 (after clearing PORT_CLK_SEL). Based on a BUN, + * step 13 is the correct place for it. Step 18 is where it was + * originally before the BUN. + */ val = I915_READ(FDI_RX_CTL(PIPE_A)); val &= ~FDI_RX_ENABLE; I915_WRITE(FDI_RX_CTL(PIPE_A), val); + intel_ddi_post_disable(intel_encoder); + val = I915_READ(FDI_RX_MISC(PIPE_A)); val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); @@ -3124,6 +1963,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_hdmi *intel_hdmi; u32 temp, flags = 0; + /* XXX: DSI transcoder paranoia */ + if (WARN_ON(transcoder_is_dsi(cpu_transcoder))) + return; + temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); if (temp & TRANS_DDI_PHSYNC) flags |= DRM_MODE_FLAG_PHSYNC; @@ -3178,8 +2021,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, intel_crtc); - if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp && - pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { /* * This is a big fat ugly hack. * @@ -3194,8 +2037,8 @@ void intel_ddi_get_config(struct intel_encoder *encoder, * load. */ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", - pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); - dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); + dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } intel_ddi_clock_get(encoder, pipe_config); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 6e0d8283daa6..607dc41bcc68 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -36,6 +36,7 @@ #include "intel_drv.h" #include <drm/i915_drm.h> #include "i915_drv.h" +#include "intel_dsi.h" #include "i915_trace.h" #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -96,12 +97,13 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_i915_gem_object *obj); static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); +static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n, struct intel_link_m_n *m2_n2); static void ironlake_set_pipeconf(struct drm_crtc *crtc); static void haswell_set_pipeconf(struct drm_crtc *crtc); -static void intel_set_pipe_csc(struct drm_crtc *crtc); +static void haswell_set_pipemisc(struct drm_crtc *crtc); static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, @@ -110,13 +112,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); -static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, - int num_connectors); static void skylake_pfit_enable(struct intel_crtc *crtc); static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); static void ironlake_pfit_enable(struct intel_crtc *crtc); static void intel_modeset_setup_hw_state(struct drm_device *dev); -static void intel_pre_disable_primary(struct drm_crtc *crtc); +static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); typedef struct { int min, max; @@ -147,15 +147,12 @@ static int valleyview_get_vco(struct drm_i915_private *dev_priv) return vco_freq[hpll_freq] * 1000; } -static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, - const char *name, u32 reg) +int vlv_get_cck_clock(struct drm_i915_private *dev_priv, + const char *name, u32 reg, int ref_freq) { u32 val; int divider; - if (dev_priv->hpll_freq == 0) - dev_priv->hpll_freq = valleyview_get_vco(dev_priv); - mutex_lock(&dev_priv->sb_lock); val = vlv_cck_read(dev_priv, reg); mutex_unlock(&dev_priv->sb_lock); @@ -166,52 +163,75 @@ static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, (divider << CCK_FREQUENCY_STATUS_SHIFT), "%s change in progress\n", name); - return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); + return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); } -int -intel_pch_rawclk(struct drm_device *dev) +static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, + const char *name, u32 reg) { - struct drm_i915_private *dev_priv = dev->dev_private; + if (dev_priv->hpll_freq == 0) + dev_priv->hpll_freq = valleyview_get_vco(dev_priv); - WARN_ON(!HAS_PCH_SPLIT(dev)); + return vlv_get_cck_clock(dev_priv, name, reg, + dev_priv->hpll_freq); +} - return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; +static int +intel_pch_rawclk(struct drm_i915_private *dev_priv) +{ + return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; } -/* hrawclock is 1/4 the FSB frequency */ -int intel_hrawclk(struct drm_device *dev) +static int +intel_vlv_hrawclk(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - uint32_t clkcfg; + return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", + CCK_DISPLAY_REF_CLOCK_CONTROL); +} - /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - return 200; +static int +intel_g4x_hrawclk(struct drm_i915_private *dev_priv) +{ + uint32_t clkcfg; + /* hrawclock is 1/4 the FSB frequency */ clkcfg = I915_READ(CLKCFG); switch (clkcfg & CLKCFG_FSB_MASK) { case CLKCFG_FSB_400: - return 100; + return 100000; case CLKCFG_FSB_533: - return 133; + return 133333; case CLKCFG_FSB_667: - return 166; + return 166667; case CLKCFG_FSB_800: - return 200; + return 200000; case CLKCFG_FSB_1067: - return 266; + return 266667; case CLKCFG_FSB_1333: - return 333; + return 333333; /* these two are just a guess; one of them might be right */ case CLKCFG_FSB_1600: case CLKCFG_FSB_1600_ALT: - return 400; + return 400000; default: - return 133; + return 133333; } } +static void intel_update_rawclk(struct drm_i915_private *dev_priv) +{ + if (HAS_PCH_SPLIT(dev_priv)) + dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv); + else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv)) + dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv); + else + return; /* no rawclk on other platforms, or no need to know it */ + + DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq); +} + static void intel_update_czclk(struct drm_i915_private *dev_priv) { if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) @@ -224,13 +244,15 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv) } static inline u32 /* units of 100MHz */ -intel_fdi_link_freq(struct drm_device *dev) +intel_fdi_link_freq(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *pipe_config) { - if (IS_GEN5(dev)) { - struct drm_i915_private *dev_priv = dev->dev_private; - return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; - } else - return 27; + if (HAS_DDI(dev_priv)) + return pipe_config->port_clock; /* SPLL */ + else if (IS_GEN5(dev_priv)) + return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; + else + return 270000; } static const intel_limit_t intel_limits_i8xx_dac = { @@ -550,89 +572,6 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, return false; } -static const intel_limit_t * -intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) -{ - struct drm_device *dev = crtc_state->base.crtc->dev; - const intel_limit_t *limit; - - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_is_dual_link_lvds(dev)) { - if (refclk == 100000) - limit = &intel_limits_ironlake_dual_lvds_100m; - else - limit = &intel_limits_ironlake_dual_lvds; - } else { - if (refclk == 100000) - limit = &intel_limits_ironlake_single_lvds_100m; - else - limit = &intel_limits_ironlake_single_lvds; - } - } else - limit = &intel_limits_ironlake_dac; - - return limit; -} - -static const intel_limit_t * -intel_g4x_limit(struct intel_crtc_state *crtc_state) -{ - struct drm_device *dev = crtc_state->base.crtc->dev; - const intel_limit_t *limit; - - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { - if (intel_is_dual_link_lvds(dev)) - limit = &intel_limits_g4x_dual_channel_lvds; - else - limit = &intel_limits_g4x_single_channel_lvds; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || - intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { - limit = &intel_limits_g4x_hdmi; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { - limit = &intel_limits_g4x_sdvo; - } else /* The option is for other outputs */ - limit = &intel_limits_i9xx_sdvo; - - return limit; -} - -static const intel_limit_t * -intel_limit(struct intel_crtc_state *crtc_state, int refclk) -{ - struct drm_device *dev = crtc_state->base.crtc->dev; - const intel_limit_t *limit; - - if (IS_BROXTON(dev)) - limit = &intel_limits_bxt; - else if (HAS_PCH_SPLIT(dev)) - limit = intel_ironlake_limit(crtc_state, refclk); - else if (IS_G4X(dev)) { - limit = intel_g4x_limit(crtc_state); - } else if (IS_PINEVIEW(dev)) { - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_pineview_lvds; - else - limit = &intel_limits_pineview_sdvo; - } else if (IS_CHERRYVIEW(dev)) { - limit = &intel_limits_chv; - } else if (IS_VALLEYVIEW(dev)) { - limit = &intel_limits_vlv; - } else if (!IS_GEN2(dev)) { - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_i9xx_lvds; - else - limit = &intel_limits_i9xx_sdvo; - } else { - if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) - limit = &intel_limits_i8xx_lvds; - else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) - limit = &intel_limits_i8xx_dvo; - else - limit = &intel_limits_i8xx_dac; - } - return limit; -} - /* * Platform specific helpers to calculate the port PLL loopback- (clock.m), * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast @@ -763,6 +702,16 @@ i9xx_select_p2_div(const intel_limit_t *limit, } } +/* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + * + * Target and reference clocks are specified in kHz. + * + * If match_clock is provided, then best_clock P divider must match the P + * divider from @match_clock used for LVDS downclocking. + */ static bool i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc_state *crtc_state, @@ -810,6 +759,16 @@ i9xx_find_best_dpll(const intel_limit_t *limit, return (err != target); } +/* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + * + * Target and reference clocks are specified in kHz. + * + * If match_clock is provided, then best_clock P divider must match the P + * divider from @match_clock used for LVDS downclocking. + */ static bool pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc_state *crtc_state, @@ -855,6 +814,16 @@ pnv_find_best_dpll(const intel_limit_t *limit, return (err != target); } +/* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + * + * Target and reference clocks are specified in kHz. + * + * If match_clock is provided, then best_clock P divider must match the P + * divider from @match_clock used for LVDS downclocking. + */ static bool g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc_state *crtc_state, @@ -943,6 +912,11 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, return *error_ppm + 10 < best_error_ppm; } +/* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ static bool vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc_state *crtc_state, @@ -997,6 +971,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, return found; } +/* + * Returns a set of divisors for the desired target clock with the given + * refclk, or FALSE. The returned values represent the clock equation: + * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. + */ static bool chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc_state *crtc_state, @@ -1058,9 +1037,10 @@ chv_find_best_dpll(const intel_limit_t *limit, bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, intel_clock_t *best_clock) { - int refclk = i9xx_get_refclk(crtc_state, 0); + int refclk = 100000; + const intel_limit_t *limit = &intel_limits_bxt; - return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state, + return chv_find_best_dpll(limit, crtc_state, target_clock, refclk, NULL, best_clock); } @@ -1165,7 +1145,7 @@ void assert_pll(struct drm_i915_private *dev_priv, } /* XXX: the dsi pll is shared between MIPI DSI ports */ -static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) +void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) { u32 val; bool cur_state; @@ -1179,36 +1159,6 @@ static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) "DSI PLL state assertion failure (expected %s, current %s)\n", onoff(state), onoff(cur_state)); } -#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) -#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) - -struct intel_shared_dpll * -intel_crtc_to_shared_dpll(struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - - if (crtc->config->shared_dpll < 0) - return NULL; - - return &dev_priv->shared_dplls[crtc->config->shared_dpll]; -} - -/* For ILK+ */ -void assert_shared_dpll(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - bool state) -{ - bool cur_state; - struct intel_dpll_hw_state hw_state; - - if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) - return; - - cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); - I915_STATE_WARN(cur_state != state, - "%s assertion failure (expected %s, current %s)\n", - pll->name, onoff(state), onoff(cur_state)); -} static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum pipe pipe, bool state) @@ -1217,7 +1167,7 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, pipe); - if (HAS_DDI(dev_priv->dev)) { + if (HAS_DDI(dev_priv)) { /* DDI does not have a specific FDI_TX register */ u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); @@ -1253,11 +1203,11 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, u32 val; /* ILK FDI PLL is always enabled */ - if (INTEL_INFO(dev_priv->dev)->gen == 5) + if (INTEL_INFO(dev_priv)->gen == 5) return; /* On Haswell, DDI ports are responsible for the FDI PLL setup */ - if (HAS_DDI(dev_priv->dev)) + if (HAS_DDI(dev_priv)) return; val = I915_READ(FDI_TX_CTL(pipe)); @@ -1446,21 +1396,8 @@ static void assert_vblank_disabled(struct drm_crtc *crtc) drm_crtc_vblank_put(crtc); } -static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) -{ - u32 val; - bool enabled; - - I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); - - val = I915_READ(PCH_DREF_CONTROL); - enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | - DREF_SUPERSPREAD_SOURCE_MASK)); - I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); -} - -static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, - enum pipe pipe) +void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe) { u32 val; bool enabled; @@ -1478,11 +1415,11 @@ static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, if ((val & DP_PORT_EN) == 0) return false; - if (HAS_PCH_CPT(dev_priv->dev)) { + if (HAS_PCH_CPT(dev_priv)) { u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) return false; - } else if (IS_CHERRYVIEW(dev_priv->dev)) { + } else if (IS_CHERRYVIEW(dev_priv)) { if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) return false; } else { @@ -1498,10 +1435,10 @@ static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, if ((val & SDVO_ENABLE) == 0) return false; - if (HAS_PCH_CPT(dev_priv->dev)) { + if (HAS_PCH_CPT(dev_priv)) { if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) return false; - } else if (IS_CHERRYVIEW(dev_priv->dev)) { + } else if (IS_CHERRYVIEW(dev_priv)) { if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) return false; } else { @@ -1517,7 +1454,7 @@ static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, if ((val & LVDS_PORT_EN) == 0) return false; - if (HAS_PCH_CPT(dev_priv->dev)) { + if (HAS_PCH_CPT(dev_priv)) { if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) return false; } else { @@ -1532,7 +1469,7 @@ static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, { if ((val & ADPA_DAC_ENABLE) == 0) return false; - if (HAS_PCH_CPT(dev_priv->dev)) { + if (HAS_PCH_CPT(dev_priv)) { if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) return false; } else { @@ -1551,7 +1488,7 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", i915_mmio_reg_offset(reg), pipe_name(pipe)); - I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 && (val & DP_PIPEB_SELECT), "IBX PCH dp port still using transcoder B\n"); } @@ -1564,7 +1501,7 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", i915_mmio_reg_offset(reg), pipe_name(pipe)); - I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 + I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 && (val & SDVO_PIPE_B_SELECT), "IBX PCH hdmi port still using transcoder B\n"); } @@ -1598,35 +1535,24 @@ static void vlv_enable_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - i915_reg_t reg = DPLL(crtc->pipe); + enum pipe pipe = crtc->pipe; + i915_reg_t reg = DPLL(pipe); u32 dpll = pipe_config->dpll_hw_state.dpll; - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, pipe); /* PLL is protected by panel, make sure we can write it */ - if (IS_MOBILE(dev_priv->dev)) - assert_panel_unlocked(dev_priv, crtc->pipe); + assert_panel_unlocked(dev_priv, pipe); I915_WRITE(reg, dpll); POSTING_READ(reg); udelay(150); if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) - DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); - - I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); - POSTING_READ(DPLL_MD(crtc->pipe)); + DRM_ERROR("DPLL %d failed to lock\n", pipe); - /* We do this three times for luck */ - I915_WRITE(reg, dpll); - POSTING_READ(reg); - udelay(150); /* wait for warmup */ - I915_WRITE(reg, dpll); - POSTING_READ(reg); - udelay(150); /* wait for warmup */ - I915_WRITE(reg, dpll); - POSTING_READ(reg); - udelay(150); /* wait for warmup */ + I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); + POSTING_READ(DPLL_MD(pipe)); } static void chv_enable_pll(struct intel_crtc *crtc, @@ -1634,11 +1560,14 @@ static void chv_enable_pll(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); u32 tmp; - assert_pipe_disabled(dev_priv, crtc->pipe); + assert_pipe_disabled(dev_priv, pipe); + + /* PLL is protected by panel, make sure we can write it */ + assert_panel_unlocked(dev_priv, pipe); mutex_lock(&dev_priv->sb_lock); @@ -1661,9 +1590,27 @@ static void chv_enable_pll(struct intel_crtc *crtc, if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) DRM_ERROR("PLL %d failed to lock\n", pipe); - /* not sure when this should be written */ - I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); - POSTING_READ(DPLL_MD(pipe)); + if (pipe != PIPE_A) { + /* + * WaPixelRepeatModeFixForC0:chv + * + * DPLLCMD is AWOL. Use chicken bits to propagate + * the value from DPLLBMD to either pipe B or C. + */ + I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C); + I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); + I915_WRITE(CBR4_VLV, 0); + dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; + + /* + * DPLLB VGA mode also seems to cause problems. + * We should always have it disabled. + */ + WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); + } else { + I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); + POSTING_READ(DPLL_MD(pipe)); + } } static int intel_num_dvo_pipes(struct drm_device *dev) @@ -1687,9 +1634,6 @@ static void i9xx_enable_pll(struct intel_crtc *crtc) assert_pipe_disabled(dev_priv, crtc->pipe); - /* No really, not for ILK+ */ - BUG_ON(INTEL_INFO(dev)->gen >= 5); - /* PLL is protected by panel, make sure we can write it */ if (IS_MOBILE(dev) && !IS_I830(dev)) assert_panel_unlocked(dev_priv, crtc->pipe); @@ -1788,16 +1732,13 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, pipe); - /* - * Leave integrated clock source and reference clock enabled for pipe B. - * The latter is needed for VGA hotplug / manual detection. - */ - val = DPLL_VGA_MODE_DIS; - if (pipe == PIPE_B) - val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV; + val = DPLL_INTEGRATED_REF_CLK_VLV | + DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; + if (pipe != PIPE_A) + val |= DPLL_INTEGRATED_CRI_CLK_VLV; + I915_WRITE(DPLL(pipe), val); POSTING_READ(DPLL(pipe)); - } static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) @@ -1808,11 +1749,11 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) /* Make sure the pipe isn't still relying on us */ assert_pipe_disabled(dev_priv, pipe); - /* Set PLL en = 0 */ val = DPLL_SSC_REF_CLK_CHV | DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; if (pipe != PIPE_A) val |= DPLL_INTEGRATED_CRI_CLK_VLV; + I915_WRITE(DPLL(pipe), val); POSTING_READ(DPLL(pipe)); @@ -1856,100 +1797,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); } -static void intel_prepare_shared_dpll(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); - - if (WARN_ON(pll == NULL)) - return; - - WARN_ON(!pll->config.crtc_mask); - if (pll->active == 0) { - DRM_DEBUG_DRIVER("setting up %s\n", pll->name); - WARN_ON(pll->on); - assert_shared_dpll_disabled(dev_priv, pll); - - pll->mode_set(dev_priv, pll); - } -} - -/** - * intel_enable_shared_dpll - enable PCH PLL - * @dev_priv: i915 private structure - * @pipe: pipe PLL to enable - * - * The PCH PLL needs to be enabled before the PCH transcoder, since it - * drives the transcoder clock. - */ -static void intel_enable_shared_dpll(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); - - if (WARN_ON(pll == NULL)) - return; - - if (WARN_ON(pll->config.crtc_mask == 0)) - return; - - DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", - pll->name, pll->active, pll->on, - crtc->base.base.id); - - if (pll->active++) { - WARN_ON(!pll->on); - assert_shared_dpll_enabled(dev_priv, pll); - return; - } - WARN_ON(pll->on); - - intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); - - DRM_DEBUG_KMS("enabling %s\n", pll->name); - pll->enable(dev_priv, pll); - pll->on = true; -} - -static void intel_disable_shared_dpll(struct intel_crtc *crtc) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); - - /* PCH only available on ILK+ */ - if (INTEL_INFO(dev)->gen < 5) - return; - - if (pll == NULL) - return; - - if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base))))) - return; - - DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", - pll->name, pll->active, pll->on, - crtc->base.base.id); - - if (WARN_ON(pll->active == 0)) { - assert_shared_dpll_disabled(dev_priv, pll); - return; - } - - assert_shared_dpll_enabled(dev_priv, pll); - WARN_ON(!pll->on); - if (--pll->active) - return; - - DRM_DEBUG_KMS("disabling %s\n", pll->name); - pll->disable(dev_priv, pll); - pll->on = false; - - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); -} - static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1959,12 +1806,8 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, i915_reg_t reg; uint32_t val, pipeconf_val; - /* PCH only available on ILK+ */ - BUG_ON(!HAS_PCH_SPLIT(dev)); - /* Make sure PCH DPLL is enabled */ - assert_shared_dpll_enabled(dev_priv, - intel_crtc_to_shared_dpll(intel_crtc)); + assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, pipe); @@ -1983,7 +1826,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val = I915_READ(reg); pipeconf_val = I915_READ(PIPECONF(pipe)); - if (HAS_PCH_IBX(dev_priv->dev)) { + if (HAS_PCH_IBX(dev_priv)) { /* * Make the BPC in transcoder be consistent with * that in pipeconf reg. For HDMI we must use 8bpc @@ -1998,7 +1841,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, val &= ~TRANS_INTERLACE_MASK; if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) - if (HAS_PCH_IBX(dev_priv->dev) && + if (HAS_PCH_IBX(dev_priv) && intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; else @@ -2016,9 +1859,6 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, { u32 val, pipeconf_val; - /* PCH only available on ILK+ */ - BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); - /* FDI must be feeding us bits for PCH ports */ assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); @@ -2113,7 +1953,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) assert_cursor_disabled(dev_priv, pipe); assert_sprites_disabled(dev_priv, pipe); - if (HAS_PCH_LPT(dev_priv->dev)) + if (HAS_PCH_LPT(dev_priv)) pch_transcoder = TRANSCODER_A; else pch_transcoder = pipe; @@ -2123,7 +1963,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc) * a plane. On ILK+ the pipe PLLs are integrated, so we don't * need the check. */ - if (HAS_GMCH_DISPLAY(dev_priv->dev)) + if (HAS_GMCH_DISPLAY(dev_priv)) if (crtc->config->has_dsi_encoder) assert_dsi_pll_enabled(dev_priv); else @@ -2225,8 +2065,8 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) return IS_GEN2(dev_priv) ? 2048 : 4096; } -static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv, - uint64_t fb_modifier, unsigned int cpp) +static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv, + uint64_t fb_modifier, unsigned int cpp) { switch (fb_modifier) { case DRM_FORMAT_MOD_NONE: @@ -2269,7 +2109,21 @@ unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, return 1; else return intel_tile_size(dev_priv) / - intel_tile_width(dev_priv, fb_modifier, cpp); + intel_tile_width_bytes(dev_priv, fb_modifier, cpp); +} + +/* Return the tile dimensions in pixel units */ +static void intel_tile_dims(const struct drm_i915_private *dev_priv, + unsigned int *tile_width, + unsigned int *tile_height, + uint64_t fb_modifier, + unsigned int cpp) +{ + unsigned int tile_width_bytes = + intel_tile_width_bytes(dev_priv, fb_modifier, cpp); + + *tile_width = tile_width_bytes / cpp; + *tile_height = intel_tile_size(dev_priv) / tile_width_bytes; } unsigned int @@ -2282,48 +2136,54 @@ intel_fb_align_height(struct drm_device *dev, unsigned int height, return ALIGN(height, tile_height); } -static void -intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, - const struct drm_plane_state *plane_state) +unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) { - struct drm_i915_private *dev_priv = to_i915(fb->dev); - struct intel_rotation_info *info = &view->params.rotated; - unsigned int tile_size, tile_width, tile_height, cpp; + unsigned int size = 0; + int i; - *view = i915_ggtt_view_normal; + for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) + size += rot_info->plane[i].width * rot_info->plane[i].height; - if (!plane_state) - return; - - if (!intel_rotation_90_or_270(plane_state->rotation)) - return; + return size; +} - *view = i915_ggtt_view_rotated; +static void +intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, + const struct drm_framebuffer *fb, + unsigned int rotation) +{ + if (intel_rotation_90_or_270(rotation)) { + *view = i915_ggtt_view_rotated; + view->params.rotated = to_intel_framebuffer(fb)->rot_info; + } else { + *view = i915_ggtt_view_normal; + } +} - info->height = fb->height; - info->pixel_format = fb->pixel_format; - info->pitch = fb->pitches[0]; - info->uv_offset = fb->offsets[1]; - info->fb_modifier = fb->modifier[0]; +static void +intel_fill_fb_info(struct drm_i915_private *dev_priv, + struct drm_framebuffer *fb) +{ + struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info; + unsigned int tile_size, tile_width, tile_height, cpp; tile_size = intel_tile_size(dev_priv); cpp = drm_format_plane_cpp(fb->pixel_format, 0); - tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp); - tile_height = tile_size / tile_width; + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier[0], cpp); - info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width); - info->height_pages = DIV_ROUND_UP(fb->height, tile_height); - info->size = info->width_pages * info->height_pages * tile_size; + info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp); + info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height); if (info->pixel_format == DRM_FORMAT_NV12) { cpp = drm_format_plane_cpp(fb->pixel_format, 1); - tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp); - tile_height = tile_size / tile_width; + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb->modifier[1], cpp); - info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width); - info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height); - info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size; + info->uv_offset = fb->offsets[1]; + info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp); + info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height); } } @@ -2360,9 +2220,8 @@ static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv } int -intel_pin_and_fence_fb_obj(struct drm_plane *plane, - struct drm_framebuffer *fb, - const struct drm_plane_state *plane_state) +intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, + unsigned int rotation) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -2375,7 +2234,7 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); - intel_fill_fb_ggtt_view(&view, fb, plane_state); + intel_fill_fb_ggtt_view(&view, fb, rotation); /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so @@ -2433,15 +2292,14 @@ err_pm: return ret; } -static void intel_unpin_fb_obj(struct drm_framebuffer *fb, - const struct drm_plane_state *plane_state) +static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) { struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct i915_ggtt_view view; WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); - intel_fill_fb_ggtt_view(&view, fb, plane_state); + intel_fill_fb_ggtt_view(&view, fb, rotation); if (view.type == I915_GGTT_VIEW_NORMAL) i915_gem_object_unpin_fence(obj); @@ -2449,38 +2307,93 @@ static void intel_unpin_fb_obj(struct drm_framebuffer *fb, i915_gem_object_unpin_from_display_plane(obj, &view); } -/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel - * is assumed to be a power-of-two. */ -u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - uint64_t fb_modifier, - unsigned int cpp, - unsigned int pitch) +/* + * Adjust the tile offset by moving the difference into + * the x/y offsets. + * + * Input tile dimensions and pitch must already be + * rotated to match x and y, and in pixel units. + */ +static u32 intel_adjust_tile_offset(int *x, int *y, + unsigned int tile_width, + unsigned int tile_height, + unsigned int tile_size, + unsigned int pitch_tiles, + u32 old_offset, + u32 new_offset) +{ + unsigned int tiles; + + WARN_ON(old_offset & (tile_size - 1)); + WARN_ON(new_offset & (tile_size - 1)); + WARN_ON(new_offset > old_offset); + + tiles = (old_offset - new_offset) / tile_size; + + *y += tiles / pitch_tiles * tile_height; + *x += tiles % pitch_tiles * tile_width; + + return new_offset; +} + +/* + * Computes the linear offset to the base tile and adjusts + * x, y. bytes per pixel is assumed to be a power-of-two. + * + * In the 90/270 rotated case, x and y are assumed + * to be already rotated to match the rotated GTT view, and + * pitch is the tile_height aligned framebuffer height. + */ +u32 intel_compute_tile_offset(int *x, int *y, + const struct drm_framebuffer *fb, int plane, + unsigned int pitch, + unsigned int rotation) { + const struct drm_i915_private *dev_priv = to_i915(fb->dev); + uint64_t fb_modifier = fb->modifier[plane]; + unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + u32 offset, offset_aligned, alignment; + + alignment = intel_surf_alignment(dev_priv, fb_modifier); + if (alignment) + alignment--; + if (fb_modifier != DRM_FORMAT_MOD_NONE) { unsigned int tile_size, tile_width, tile_height; - unsigned int tile_rows, tiles; + unsigned int tile_rows, tiles, pitch_tiles; tile_size = intel_tile_size(dev_priv); - tile_width = intel_tile_width(dev_priv, fb_modifier, cpp); - tile_height = tile_size / tile_width; + intel_tile_dims(dev_priv, &tile_width, &tile_height, + fb_modifier, cpp); + + if (intel_rotation_90_or_270(rotation)) { + pitch_tiles = pitch / tile_height; + swap(tile_width, tile_height); + } else { + pitch_tiles = pitch / (tile_width * cpp); + } tile_rows = *y / tile_height; *y %= tile_height; - tiles = *x / (tile_width/cpp); - *x %= tile_width/cpp; + tiles = *x / tile_width; + *x %= tile_width; - return tile_rows * pitch * tile_height + tiles * tile_size; - } else { - unsigned int alignment = intel_linear_alignment(dev_priv) - 1; - unsigned int offset; + offset = (tile_rows * pitch_tiles + tiles) * tile_size; + offset_aligned = offset & ~alignment; + intel_adjust_tile_offset(x, y, tile_width, tile_height, + tile_size, pitch_tiles, + offset, offset_aligned); + } else { offset = *y * pitch + *x * cpp; + offset_aligned = offset & ~alignment; + *y = (offset & alignment) / pitch; *x = ((offset & alignment) - *y * pitch) / cpp; - return offset & ~alignment; } + + return offset_aligned; } static int i9xx_format_to_fourcc(int format) @@ -2536,6 +2449,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj = NULL; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_framebuffer *fb = &plane_config->fb->base; @@ -2551,7 +2465,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, /* If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other * features. */ - if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size) + if (size_aligned * 2 > ggtt->stolen_usable_size) return false; mutex_lock(&dev->struct_mutex); @@ -2667,7 +2581,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, */ to_intel_plane_state(plane_state)->visible = false; crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); - intel_pre_disable_primary(&intel_crtc->base); + intel_pre_disable_primary_noatomic(&intel_crtc->base); intel_plane->disable_plane(primary, &intel_crtc->base); return; @@ -2716,6 +2630,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); + unsigned int rotation = plane_state->base.rotation; int cpp = drm_format_plane_cpp(fb->pixel_format, 0); int x = plane_state->src.x1 >> 16; int y = plane_state->src.y1 >> 16; @@ -2780,15 +2695,14 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, if (INTEL_INFO(dev)->gen >= 4) { intel_crtc->dspaddr_offset = - intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], cpp, - fb->pitches[0]); + intel_compute_tile_offset(&x, &y, fb, 0, + fb->pitches[0], rotation); linear_offset -= intel_crtc->dspaddr_offset; } else { intel_crtc->dspaddr_offset = linear_offset; } - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + if (rotation == BIT(DRM_ROTATE_180)) { dspcntr |= DISPPLANE_ROTATE_180; x += (crtc_state->pipe_src_w - 1); @@ -2846,6 +2760,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, u32 linear_offset; u32 dspcntr; i915_reg_t reg = DSPCNTR(plane); + unsigned int rotation = plane_state->base.rotation; int cpp = drm_format_plane_cpp(fb->pixel_format, 0); int x = plane_state->src.x1 >> 16; int y = plane_state->src.y1 >> 16; @@ -2887,11 +2802,10 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, linear_offset = y * fb->pitches[0] + x * cpp; intel_crtc->dspaddr_offset = - intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], cpp, - fb->pitches[0]); + intel_compute_tile_offset(&x, &y, fb, 0, + fb->pitches[0], rotation); linear_offset -= intel_crtc->dspaddr_offset; - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + if (rotation == BIT(DRM_ROTATE_180)) { dspcntr |= DISPPLANE_ROTATE_180; if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { @@ -2931,7 +2845,7 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, } else { int cpp = drm_format_plane_cpp(pixel_format, 0); - return intel_tile_width(dev_priv, fb_modifier, cpp); + return intel_tile_width_bytes(dev_priv, fb_modifier, cpp); } } @@ -2944,7 +2858,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane, u64 offset; intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, - intel_plane->base.state); + intel_plane->base.state->rotation); vma = i915_gem_obj_to_ggtt_view(obj, &view); if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", @@ -3314,9 +3228,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, pipe_config->pipe_src_w, pipe_config->pipe_src_h); - if (HAS_DDI(dev)) - intel_set_pipe_csc(&crtc->base); - /* * Update pipe size and adjust fitter if needed: the reason for this is * that in compute_mode_changes we check the native mode (not the pfit @@ -3955,37 +3866,35 @@ static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) /* Program iCLKIP clock to the desired frequency */ static void lpt_program_iclkip(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(crtc->dev); int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; u32 divsel, phaseinc, auxdiv, phasedir = 0; u32 temp; lpt_disable_iclkip(dev_priv); - /* 20MHz is a corner case which is out of range for the 7-bit divisor */ - if (clock == 20000) { - auxdiv = 1; - divsel = 0x41; - phaseinc = 0x20; - } else { - /* The iCLK virtual clock root frequency is in MHz, - * but the adjusted_mode->crtc_clock in in KHz. To get the - * divisors, it is necessary to divide one by another, so we - * convert the virtual clock precision to KHz here for higher - * precision. - */ + /* The iCLK virtual clock root frequency is in MHz, + * but the adjusted_mode->crtc_clock in in KHz. To get the + * divisors, it is necessary to divide one by another, so we + * convert the virtual clock precision to KHz here for higher + * precision. + */ + for (auxdiv = 0; auxdiv < 2; auxdiv++) { u32 iclk_virtual_root_freq = 172800 * 1000; u32 iclk_pi_range = 64; - u32 desired_divisor, msb_divisor_value, pi_value; + u32 desired_divisor; - desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock); - msb_divisor_value = desired_divisor / iclk_pi_range; - pi_value = desired_divisor % iclk_pi_range; + desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + clock << auxdiv); + divsel = (desired_divisor / iclk_pi_range) - 2; + phaseinc = desired_divisor % iclk_pi_range; - auxdiv = 0; - divsel = msb_divisor_value - 2; - phaseinc = pi_value; + /* + * Near 20MHz is a corner case which is + * out of range for the 7-bit divisor + */ + if (divsel <= 0x7f) + break; } /* This should not happen with any sane values */ @@ -4032,6 +3941,43 @@ static void lpt_program_iclkip(struct drm_crtc *crtc) I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); } +int lpt_get_iclkip(struct drm_i915_private *dev_priv) +{ + u32 divsel, phaseinc, auxdiv; + u32 iclk_virtual_root_freq = 172800 * 1000; + u32 iclk_pi_range = 64; + u32 desired_divisor; + u32 temp; + + if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) + return 0; + + mutex_lock(&dev_priv->sb_lock); + + temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); + if (temp & SBI_SSCCTL_DISABLE) { + mutex_unlock(&dev_priv->sb_lock); + return 0; + } + + temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); + divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> + SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; + phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> + SBI_SSCDIVINTPHASE_INCVAL_SHIFT; + + temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); + auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> + SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; + + mutex_unlock(&dev_priv->sb_lock); + + desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; + + return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, + desired_divisor << auxdiv); +} + static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, enum pipe pch_transcoder) { @@ -4142,12 +4088,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) I915_WRITE(FDI_RX_TUSIZE1(pipe), I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); - /* - * Sometimes spurious CPU pipe underruns happen during FDI - * training, at least with VGA+HDMI cloning. Suppress them. - */ - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - /* For PCH output, training FDI link */ dev_priv->display.fdi_link_train(crtc); @@ -4159,7 +4099,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) temp = I915_READ(PCH_DPLL_SEL); temp |= TRANS_DPLL_ENABLE(pipe); sel = TRANS_DPLLB_SEL(pipe); - if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) + if (intel_crtc->config->shared_dpll == + intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) temp |= sel; else temp &= ~sel; @@ -4181,8 +4122,6 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) intel_fdi_normal_train(crtc); - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - /* For PCH DP, enable TRANS_DP_CTL */ if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { const struct drm_display_mode *adjusted_mode = @@ -4238,113 +4177,6 @@ static void lpt_pch_enable(struct drm_crtc *crtc) lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; - struct intel_shared_dpll *pll; - struct intel_shared_dpll_config *shared_dpll; - enum intel_dpll_id i; - int max = dev_priv->num_shared_dpll; - - shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); - - if (HAS_PCH_IBX(dev_priv->dev)) { - /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ - i = (enum intel_dpll_id) crtc->pipe; - pll = &dev_priv->shared_dplls[i]; - - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); - - WARN_ON(shared_dpll[i].crtc_mask); - - goto found; - } - - if (IS_BROXTON(dev_priv->dev)) { - /* PLL is attached to port in bxt */ - struct intel_encoder *encoder; - struct intel_digital_port *intel_dig_port; - - encoder = intel_ddi_get_crtc_new_encoder(crtc_state); - if (WARN_ON(!encoder)) - return NULL; - - intel_dig_port = enc_to_dig_port(&encoder->base); - /* 1:1 mapping between ports and PLLs */ - i = (enum intel_dpll_id)intel_dig_port->port; - pll = &dev_priv->shared_dplls[i]; - DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", - crtc->base.base.id, pll->name); - WARN_ON(shared_dpll[i].crtc_mask); - - goto found; - } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) - /* Do not consider SPLL */ - max = 2; - - for (i = 0; i < max; i++) { - pll = &dev_priv->shared_dplls[i]; - - /* Only want to check enabled timings first */ - if (shared_dpll[i].crtc_mask == 0) - continue; - - if (memcmp(&crtc_state->dpll_hw_state, - &shared_dpll[i].hw_state, - sizeof(crtc_state->dpll_hw_state)) == 0) { - DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", - crtc->base.base.id, pll->name, - shared_dpll[i].crtc_mask, - pll->active); - goto found; - } - } - - /* Ok no matching timings, maybe there's a free one? */ - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - pll = &dev_priv->shared_dplls[i]; - if (shared_dpll[i].crtc_mask == 0) { - DRM_DEBUG_KMS("CRTC:%d allocated %s\n", - crtc->base.base.id, pll->name); - goto found; - } - } - - return NULL; - -found: - if (shared_dpll[i].crtc_mask == 0) - shared_dpll[i].hw_state = - crtc_state->dpll_hw_state; - - crtc_state->shared_dpll = i; - DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, - pipe_name(crtc->pipe)); - - shared_dpll[i].crtc_mask |= 1 << crtc->pipe; - - return pll; -} - -static void intel_shared_dpll_commit(struct drm_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->dev); - struct intel_shared_dpll_config *shared_dpll; - struct intel_shared_dpll *pll; - enum intel_dpll_id i; - - if (!to_intel_atomic_state(state)->dpll_set) - return; - - shared_dpll = to_intel_atomic_state(state)->shared_dpll; - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - pll = &dev_priv->shared_dplls[i]; - pll->config = shared_dpll[i]; - } -} - static void cpt_verify_modeset(struct drm_device *dev, int pipe) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -4576,8 +4408,11 @@ void hsw_enable_ips(struct intel_crtc *crtc) if (!crtc->config->ips_enabled) return; - /* We can only enable IPS after we enable a plane and wait for a vblank */ - intel_wait_for_vblank(dev, crtc->pipe); + /* + * We can only enable IPS after we enable a plane and wait for a vblank + * This function is called from post_plane_update, which is run after + * a vblank wait. + */ assert_plane_enabled(dev_priv, crtc->plane); if (IS_BROADWELL(dev)) { @@ -4626,55 +4461,6 @@ void hsw_disable_ips(struct intel_crtc *crtc) intel_wait_for_vblank(dev, crtc->pipe); } -/** Loads the palette/gamma unit for the CRTC with the prepared values */ -static void intel_crtc_load_lut(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; - int i; - bool reenable_ips = false; - - /* The clocks have to be on to load the palette. */ - if (!crtc->state->active) - return; - - if (HAS_GMCH_DISPLAY(dev_priv->dev)) { - if (intel_crtc->config->has_dsi_encoder) - assert_dsi_pll_enabled(dev_priv); - else - assert_pll_enabled(dev_priv, pipe); - } - - /* Workaround : Do not read or write the pipe palette/gamma data while - * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. - */ - if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && - ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == - GAMMA_MODE_MODE_SPLIT)) { - hsw_disable_ips(intel_crtc); - reenable_ips = true; - } - - for (i = 0; i < 256; i++) { - i915_reg_t palreg; - - if (HAS_GMCH_DISPLAY(dev)) - palreg = PALETTE(pipe, i); - else - palreg = LGC_PALETTE(pipe, i); - - I915_WRITE(palreg, - (intel_crtc->lut_r[i] << 16) | - (intel_crtc->lut_g[i] << 8) | - intel_crtc->lut_b[i]); - } - - if (reenable_ips) - hsw_enable_ips(intel_crtc); -} - static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) { if (intel_crtc->overlay) { @@ -4734,16 +4520,7 @@ intel_post_enable_primary(struct drm_crtc *crtc) intel_check_pch_fifo_underruns(dev_priv); } -/** - * intel_pre_disable_primary - Perform operations before disabling primary plane - * @crtc: the CRTC whose primary plane is to be disabled - * - * Performs potentially sleeping operations that must be done before the - * primary plane is disabled, such as updating FBC and IPS. Note that this may - * be called due to an explicit primary plane update, or due to an implicit - * disable that is caused when a sprite plane completely hides the primary - * plane. - */ +/* FIXME move all this to pre_plane_update() with proper state tracking */ static void intel_pre_disable_primary(struct drm_crtc *crtc) { @@ -4762,6 +4539,26 @@ intel_pre_disable_primary(struct drm_crtc *crtc) intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); /* + * FIXME IPS should be fine as long as one plane is + * enabled, but in practice it seems to have problems + * when going from primary only to sprite only and vice + * versa. + */ + hsw_disable_ips(intel_crtc); +} + +/* FIXME get rid of this and use pre_plane_update */ +static void +intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + int pipe = intel_crtc->pipe; + + intel_pre_disable_primary(crtc); + + /* * Vblank time updates from the shadow to live plane control register * are blocked if the memory self-refresh mode is active at that * moment. So to make sure the plane gets truly disabled, disable @@ -4775,37 +4572,39 @@ intel_pre_disable_primary(struct drm_crtc *crtc) dev_priv->wm.vlv.cxsr = false; intel_wait_for_vblank(dev, pipe); } - - /* - * FIXME IPS should be fine as long as one plane is - * enabled, but in practice it seems to have problems - * when going from primary only to sprite only and vice - * versa. - */ - hsw_disable_ips(intel_crtc); } -static void intel_post_plane_update(struct intel_crtc *crtc) +static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) { - struct intel_crtc_atomic_commit *atomic = &crtc->atomic; + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct drm_atomic_state *old_state = old_crtc_state->base.state; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); struct drm_device *dev = crtc->base.dev; + struct drm_plane *primary = crtc->base.primary; + struct drm_plane_state *old_pri_state = + drm_atomic_get_existing_plane_state(old_state, primary); - intel_frontbuffer_flip(dev, atomic->fb_bits); + intel_frontbuffer_flip(dev, pipe_config->fb_bits); crtc->wm.cxsr_allowed = true; - if (pipe_config->wm_changed && pipe_config->base.active) + if (pipe_config->update_wm_post && pipe_config->base.active) intel_update_watermarks(&crtc->base); - if (atomic->update_fbc) - intel_fbc_post_update(crtc); + if (old_pri_state) { + struct intel_plane_state *primary_state = + to_intel_plane_state(primary->state); + struct intel_plane_state *old_primary_state = + to_intel_plane_state(old_pri_state); - if (atomic->post_enable_primary) - intel_post_enable_primary(&crtc->base); + intel_fbc_post_update(crtc); - memset(atomic, 0, sizeof(*atomic)); + if (primary_state->visible && + (needs_modeset(&pipe_config->base) || + !old_primary_state->visible)) + intel_post_enable_primary(&crtc->base); + } } static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) @@ -4813,7 +4612,6 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc_atomic_commit *atomic = &crtc->atomic; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->base.state); struct drm_atomic_state *old_state = old_crtc_state->base.state; @@ -4822,15 +4620,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) drm_atomic_get_existing_plane_state(old_state, primary); bool modeset = needs_modeset(&pipe_config->base); - if (atomic->update_fbc) - intel_fbc_pre_update(crtc); - if (old_pri_state) { struct intel_plane_state *primary_state = to_intel_plane_state(primary->state); struct intel_plane_state *old_primary_state = to_intel_plane_state(old_pri_state); + intel_fbc_pre_update(crtc); + if (old_primary_state->visible && (modeset || !primary_state->visible)) intel_pre_disable_primary(&crtc->base); @@ -4839,11 +4636,58 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) if (pipe_config->disable_cxsr) { crtc->wm.cxsr_allowed = false; - if (old_crtc_state->base.active) + /* + * Vblank time updates from the shadow to live plane control register + * are blocked if the memory self-refresh mode is active at that + * moment. So to make sure the plane gets truly disabled, disable + * first the self-refresh mode. The self-refresh enable bit in turn + * will be checked/applied by the HW only at the next frame start + * event which is after the vblank start event, so we need to have a + * wait-for-vblank between disabling the plane and the pipe. + */ + if (old_crtc_state->base.active) { intel_set_memory_cxsr(dev_priv, false); + dev_priv->wm.vlv.cxsr = false; + intel_wait_for_vblank(dev, crtc->pipe); + } } - if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) + /* + * IVB workaround: must disable low power watermarks for at least + * one frame before enabling scaling. LP watermarks can be re-enabled + * when scaling is disabled. + * + * WaCxSRDisabledForSpriteScaling:ivb + */ + if (pipe_config->disable_lp_wm) { + ilk_disable_lp_wm(dev); + intel_wait_for_vblank(dev, crtc->pipe); + } + + /* + * If we're doing a modeset, we're done. No need to do any pre-vblank + * watermark programming here. + */ + if (needs_modeset(&pipe_config->base)) + return; + + /* + * For platforms that support atomic watermarks, program the + * 'intermediate' watermarks immediately. On pre-gen9 platforms, these + * will be the intermediate values that are safe for both pre- and + * post- vblank; when vblank happens, the 'active' values will be set + * to the final 'target' values and we'll do this again to get the + * optimal watermarks. For gen9+ platforms, the values we program here + * will be the final target values which will get automatically latched + * at vblank time; no further programming will be necessary. + * + * If a platform hasn't been transitioned to atomic watermarks yet, + * we'll continue to update watermarks the old way, if flags tell + * us to. + */ + if (dev_priv->display.initial_watermarks != NULL) + dev_priv->display.initial_watermarks(pipe_config); + else if (pipe_config->update_wm_pre) intel_update_watermarks(&crtc->base); } @@ -4874,10 +4718,24 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe; + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->state); if (WARN_ON(intel_crtc->active)) return; + /* + * Sometimes spurious CPU pipe underruns happen during FDI + * training, at least with VGA+HDMI cloning. Suppress them. + * + * On ILK we get an occasional spurious CPU pipe underruns + * between eDP port A enable and vdd enable. Also PCH port + * enable seems to result in the occasional CPU pipe underrun. + * + * Spurious PCH underruns also occur during PCH enabling. + */ + if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); if (intel_crtc->config->has_pch_encoder) intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); @@ -4888,6 +4746,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); + intel_set_pipe_src_size(intel_crtc); if (intel_crtc->config->has_pch_encoder) { intel_cpu_transcoder_set_m_n(intel_crtc, @@ -4898,8 +4757,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) intel_crtc->active = true; - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->pre_enable) encoder->pre_enable(encoder); @@ -4920,9 +4777,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_crtc_load_lut(crtc); + intel_color_load_luts(&pipe_config->base); - intel_update_watermarks(crtc); + if (dev_priv->display.initial_watermarks != NULL) + dev_priv->display.initial_watermarks(intel_crtc->config); intel_enable_pipe(intel_crtc); if (intel_crtc->config->has_pch_encoder) @@ -4940,6 +4798,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) /* Must wait for vblank to avoid spurious PCH FIFO underruns */ if (intel_crtc->config->has_pch_encoder) intel_wait_for_vblank(dev, pipe); + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } @@ -4956,6 +4815,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; int pipe = intel_crtc->pipe, hsw_workaround_pipe; + enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); @@ -4966,16 +4826,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, false); - if (intel_crtc_to_shared_dpll(intel_crtc)) + if (intel_crtc->config->shared_dpll) intel_enable_shared_dpll(intel_crtc); if (intel_crtc->config->has_dp_encoder) intel_dp_set_m_n(intel_crtc, M1_N1); - intel_set_pipe_timings(intel_crtc); + if (!intel_crtc->config->has_dsi_encoder) + intel_set_pipe_timings(intel_crtc); - if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { - I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), + intel_set_pipe_src_size(intel_crtc); + + if (cpu_transcoder != TRANSCODER_EDP && + !transcoder_is_dsi(cpu_transcoder)) { + I915_WRITE(PIPE_MULT(cpu_transcoder), intel_crtc->config->pixel_multiplier - 1); } @@ -4984,9 +4848,12 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) &intel_crtc->config->fdi_m_n, NULL); } - haswell_set_pipeconf(crtc); + if (!intel_crtc->config->has_dsi_encoder) + haswell_set_pipeconf(crtc); - intel_set_pipe_csc(crtc); + haswell_set_pipemisc(crtc); + + intel_color_set_csc(&pipe_config->base); intel_crtc->active = true; @@ -5015,14 +4882,20 @@ static void haswell_crtc_enable(struct drm_crtc *crtc) * On ILK+ LUT must be loaded before the pipe is running but with * clocks enabled */ - intel_crtc_load_lut(crtc); + intel_color_load_luts(&pipe_config->base); intel_ddi_set_pipe_settings(crtc); if (!intel_crtc->config->has_dsi_encoder) intel_ddi_enable_transcoder_func(crtc); - intel_update_watermarks(crtc); - intel_enable_pipe(intel_crtc); + if (dev_priv->display.initial_watermarks != NULL) + dev_priv->display.initial_watermarks(pipe_config); + else + intel_update_watermarks(crtc); + + /* XXX: Do the pipe assertions at the right place for BXT DSI. */ + if (!intel_crtc->config->has_dsi_encoder) + intel_enable_pipe(intel_crtc); if (intel_crtc->config->has_pch_encoder) lpt_pch_enable(crtc); @@ -5078,8 +4951,15 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) struct intel_encoder *encoder; int pipe = intel_crtc->pipe; - if (intel_crtc->config->has_pch_encoder) + /* + * Sometimes spurious CPU pipe underruns happen when the + * pipe is already disabled, but FDI RX/TX is still enabled. + * Happens at least with VGA+HDMI cloning. Suppress them. + */ + if (intel_crtc->config->has_pch_encoder) { + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); + } for_each_encoder_on_crtc(dev, crtc, encoder) encoder->disable(encoder); @@ -5087,22 +4967,12 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); - /* - * Sometimes spurious CPU pipe underruns happen when the - * pipe is already disabled, but FDI RX/TX is still enabled. - * Happens at least with VGA+HDMI cloning. Suppress them. - */ - if (intel_crtc->config->has_pch_encoder) - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - intel_disable_pipe(intel_crtc); ironlake_pfit_disable(intel_crtc, false); - if (intel_crtc->config->has_pch_encoder) { + if (intel_crtc->config->has_pch_encoder) ironlake_fdi_disable(crtc); - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - } for_each_encoder_on_crtc(dev, crtc, encoder) if (encoder->post_disable) @@ -5132,6 +5002,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) ironlake_fdi_pll_disable(intel_crtc); } + intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); } @@ -5155,7 +5026,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) drm_crtc_vblank_off(crtc); assert_vblank_disabled(crtc); - intel_disable_pipe(intel_crtc); + /* XXX: Do the pipe assertions at the right place for BXT DSI. */ + if (!intel_crtc->config->has_dsi_encoder) + intel_disable_pipe(intel_crtc); if (intel_crtc->config->dp_encoder_is_mst) intel_ddi_set_vc_payload_alloc(crtc, false); @@ -5330,6 +5203,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc, mask |= BIT(intel_display_port_power_domain(intel_encoder)); } + if (crtc_state->shared_dpll) + mask |= BIT(POWER_DOMAIN_PLLS); + return mask; } @@ -5393,6 +5269,8 @@ static void intel_update_max_cdclk(struct drm_device *dev) dev_priv->max_cdclk_freq = 450000; else dev_priv->max_cdclk_freq = 337500; + } else if (IS_BROXTON(dev)) { + dev_priv->max_cdclk_freq = 624000; } else if (IS_BROADWELL(dev)) { /* * FIXME with extra cooling we can allow @@ -6165,6 +6043,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->state); int pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) @@ -6174,6 +6054,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); + intel_set_pipe_src_size(intel_crtc); if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -6208,8 +6089,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc) i9xx_pfit_enable(intel_crtc); - intel_crtc_load_lut(crtc); + intel_color_load_luts(&pipe_config->base); + intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); assert_vblank_disabled(crtc); @@ -6234,6 +6116,8 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_encoder *encoder; + struct intel_crtc_state *pipe_config = + to_intel_crtc_state(crtc->state); int pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) @@ -6245,6 +6129,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) intel_dp_set_m_n(intel_crtc, M1_N1); intel_set_pipe_timings(intel_crtc); + intel_set_pipe_src_size(intel_crtc); i9xx_set_pipeconf(intel_crtc); @@ -6261,7 +6146,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc) i9xx_pfit_enable(intel_crtc); - intel_crtc_load_lut(crtc); + intel_color_load_luts(&pipe_config->base); intel_update_watermarks(crtc); intel_enable_pipe(intel_crtc); @@ -6299,10 +6184,9 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) /* * On gen2 planes are double buffered but the pipe isn't, so we must * wait for planes to fully turn off before disabling the pipe. - * We also need to wait on all gmch platforms because of the - * self-refresh mode constraint explained above. */ - intel_wait_for_vblank(dev, pipe); + if (IS_GEN2(dev)) + intel_wait_for_vblank(dev, pipe); for_each_encoder_on_crtc(dev, crtc, encoder) encoder->disable(encoder); @@ -6337,6 +6221,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc) static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) { + struct intel_encoder *encoder; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum intel_display_power_domain domain; @@ -6348,14 +6233,27 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) if (to_intel_plane_state(crtc->primary->state)->visible) { WARN_ON(intel_crtc->unpin_work); - intel_pre_disable_primary(crtc); + intel_pre_disable_primary_noatomic(crtc); intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); to_intel_plane_state(crtc->primary->state)->visible = false; } dev_priv->display.crtc_disable(crtc); + + DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", + crtc->base.id); + + WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); + crtc->state->active = false; intel_crtc->active = false; + crtc->enabled = false; + crtc->state->connector_mask = 0; + crtc->state->encoder_mask = 0; + + for_each_encoder_on_crtc(crtc->dev, crtc, encoder) + encoder->base.crtc = NULL; + intel_fbc_disable(intel_crtc); intel_update_watermarks(crtc); intel_disable_shared_dpll(intel_crtc); @@ -6398,7 +6296,7 @@ void intel_encoder_destroy(struct drm_encoder *encoder) /* Cross check the actual hw state with our own modeset state tracking (and it's * internal consistency). */ -static void intel_connector_check_state(struct intel_connector *connector) +static void intel_connector_verify_state(struct intel_connector *connector) { struct drm_crtc *crtc = connector->base.state->crtc; @@ -6568,7 +6466,7 @@ retry: * Hence the bw of each lane in terms of the mode signal * is: */ - link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; + link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); fdi_dotclock = adjusted_mode->crtc_clock; @@ -6580,8 +6478,7 @@ retry: intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, link_bw, &pipe_config->fdi_m_n); - ret = ironlake_check_fdi_lanes(intel_crtc->base.dev, - intel_crtc->pipe, pipe_config); + ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { pipe_config->pipe_bpp -= 2*3; DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", @@ -6605,7 +6502,7 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, return false; /* HSW can handle pixel rate up to cdclk? */ - if (IS_HASWELL(dev_priv->dev)) + if (IS_HASWELL(dev_priv)) return true; /* @@ -7133,30 +7030,6 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, - int num_connectors) -{ - struct drm_device *dev = crtc_state->base.crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int refclk; - - WARN_ON(!crtc_state->base.state); - - if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) { - refclk = 100000; - } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv) && num_connectors < 2) { - refclk = dev_priv->vbt.lvds_ssc_freq; - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); - } else if (!IS_GEN2(dev)) { - refclk = 96000; - } else { - refclk = 48000; - } - - return refclk; -} - static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) { return (1 << dpll->n) << 16 | dpll->m2; @@ -7300,24 +7173,27 @@ void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) static void vlv_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - u32 dpll, dpll_md; + pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | + DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | + DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV; + if (crtc->pipe != PIPE_A) + pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; - /* - * Enable DPIO clock input. We should never disable the reference - * clock for pipe B, since VGA hotplug / manual detection depends - * on it. - */ - dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV | - DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV; - /* We should never disable this, set it here for state tracking */ - if (crtc->pipe == PIPE_B) - dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; - dpll |= DPLL_VCO_ENABLE; - pipe_config->dpll_hw_state.dpll = dpll; + pipe_config->dpll_hw_state.dpll_md = + (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; +} - dpll_md = (pipe_config->pixel_multiplier - 1) - << DPLL_MD_UDI_MULTIPLIER_SHIFT; - pipe_config->dpll_hw_state.dpll_md = dpll_md; +static void chv_compute_dpll(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) +{ + pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | + DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | + DPLL_VCO_ENABLE; + if (crtc->pipe != PIPE_A) + pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; + + pipe_config->dpll_hw_state.dpll_md = + (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; } static void vlv_prepare_pll(struct intel_crtc *crtc, @@ -7411,19 +7287,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, mutex_unlock(&dev_priv->sb_lock); } -static void chv_compute_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | - DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | - DPLL_VCO_ENABLE; - if (crtc->pipe != PIPE_A) - pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; - - pipe_config->dpll_hw_state.dpll_md = - (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; -} - static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config) { @@ -7586,8 +7449,7 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe) static void i9xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock, - int num_connectors) + intel_clock_t *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7646,7 +7508,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv) && num_connectors < 2) + intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -7663,8 +7525,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc, static void i8xx_compute_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, - intel_clock_t *reduced_clock, - int num_connectors) + intel_clock_t *reduced_clock) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -7690,7 +7551,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc, dpll |= DPLL_DVO_2X_MODE; if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && - intel_panel_use_ssc(dev_priv) && num_connectors < 2) + intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; @@ -7759,6 +7620,14 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) (pipe == PIPE_B || pipe == PIPE_C)) I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); +} + +static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) +{ + struct drm_device *dev = intel_crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum pipe pipe = intel_crtc->pipe; + /* pipesrc controls the size that is scaled from, which should * always be the user's requested size. */ @@ -7800,6 +7669,14 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, pipe_config->base.adjusted_mode.crtc_vtotal += 1; pipe_config->base.adjusted_mode.crtc_vblank_end += 1; } +} + +static void intel_get_pipe_src_size(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; tmp = I915_READ(PIPESRC(crtc->pipe)); pipe_config->pipe_src_h = (tmp & 0xffff) + 1; @@ -7897,69 +7774,198 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) POSTING_READ(PIPECONF(intel_crtc->pipe)); } -static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, +static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - int refclk, num_connectors = 0; - intel_clock_t clock; - bool ok; const intel_limit_t *limit; - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - int i; + int refclk = 48000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - if (crtc_state->has_dsi_encoder) - return 0; + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) { + refclk = dev_priv->vbt.lvds_ssc_freq; + DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + } - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc == &crtc->base) - num_connectors++; + limit = &intel_limits_i8xx_lvds; + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) { + limit = &intel_limits_i8xx_dvo; + } else { + limit = &intel_limits_i8xx_dac; + } + + if (!crtc_state->clock_set && + !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; } - if (!crtc_state->clock_set) { - refclk = i9xx_get_refclk(crtc_state, num_connectors); + i8xx_compute_dpll(crtc, crtc_state, NULL); - /* - * Returns a set of divisors for the desired target clock with - * the given refclk, or FALSE. The returned values represent - * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + - * 2) / p1 / p2. - */ - limit = intel_limit(crtc_state, refclk); - ok = dev_priv->display.find_dpll(limit, crtc_state, - crtc_state->port_clock, - refclk, NULL, &clock); - if (!ok) { - DRM_ERROR("Couldn't find PLL settings for mode!\n"); - return -EINVAL; + return 0; +} + +static int g4x_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const intel_limit_t *limit; + int refclk = 96000; + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) { + refclk = dev_priv->vbt.lvds_ssc_freq; + DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); } - /* Compat-code for transition, will disappear. */ - crtc_state->dpll.n = clock.n; - crtc_state->dpll.m1 = clock.m1; - crtc_state->dpll.m2 = clock.m2; - crtc_state->dpll.p1 = clock.p1; - crtc_state->dpll.p2 = clock.p2; + if (intel_is_dual_link_lvds(dev)) + limit = &intel_limits_g4x_dual_channel_lvds; + else + limit = &intel_limits_g4x_single_channel_lvds; + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { + limit = &intel_limits_g4x_hdmi; + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { + limit = &intel_limits_g4x_sdvo; + } else { + /* The option is for other outputs */ + limit = &intel_limits_i9xx_sdvo; } - if (IS_GEN2(dev)) { - i8xx_compute_dpll(crtc, crtc_state, NULL, - num_connectors); - } else if (IS_CHERRYVIEW(dev)) { - chv_compute_dpll(crtc, crtc_state); - } else if (IS_VALLEYVIEW(dev)) { - vlv_compute_dpll(crtc, crtc_state); + if (!crtc_state->clock_set && + !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + + i9xx_compute_dpll(crtc, crtc_state, NULL); + + return 0; +} + +static int pnv_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const intel_limit_t *limit; + int refclk = 96000; + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) { + refclk = dev_priv->vbt.lvds_ssc_freq; + DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + } + + limit = &intel_limits_pineview_lvds; } else { - i9xx_compute_dpll(crtc, crtc_state, NULL, - num_connectors); + limit = &intel_limits_pineview_sdvo; } + if (!crtc_state->clock_set && + !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + + i9xx_compute_dpll(crtc, crtc_state, NULL); + + return 0; +} + +static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + const intel_limit_t *limit; + int refclk = 96000; + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) { + refclk = dev_priv->vbt.lvds_ssc_freq; + DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); + } + + limit = &intel_limits_i9xx_lvds; + } else { + limit = &intel_limits_i9xx_sdvo; + } + + if (!crtc_state->clock_set && + !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + + i9xx_compute_dpll(crtc, crtc_state, NULL); + + return 0; +} + +static int chv_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + int refclk = 100000; + const intel_limit_t *limit = &intel_limits_chv; + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (crtc_state->has_dsi_encoder) + return 0; + + if (!crtc_state->clock_set && + !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + + chv_compute_dpll(crtc, crtc_state); + + return 0; +} + +static int vlv_crtc_compute_clock(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + int refclk = 100000; + const intel_limit_t *limit = &intel_limits_vlv; + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (crtc_state->has_dsi_encoder) + return 0; + + if (!crtc_state->clock_set && + !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { + DRM_ERROR("Couldn't find PLL settings for mode!\n"); + return -EINVAL; + } + + vlv_compute_dpll(crtc, crtc_state); + return 0; } @@ -8003,8 +8009,8 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, u32 mdiv; int refclk = 100000; - /* In case of MIPI DPLL will not even be used */ - if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) + /* In case of DSI, DPLL will not be used */ + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) return; mutex_lock(&dev_priv->sb_lock); @@ -8100,6 +8106,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; int refclk = 100000; + /* In case of DSI, DPLL will not be used */ + if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) + return; + mutex_lock(&dev_priv->sb_lock); cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); @@ -8133,7 +8143,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; - pipe_config->shared_dpll = DPLL_ID_PRIVATE; + pipe_config->shared_dpll = NULL; ret = false; @@ -8165,11 +8175,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; intel_get_pipe_timings(crtc, pipe_config); + intel_get_pipe_src_size(crtc, pipe_config); i9xx_get_pfit_config(crtc, pipe_config); if (INTEL_INFO(dev)->gen >= 4) { - tmp = I915_READ(DPLL_MD(crtc->pipe)); + /* No way to read it out on pipes B and C */ + if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A) + tmp = dev_priv->chv_dpll_md[crtc->pipe]; + else + tmp = I915_READ(DPLL_MD(crtc->pipe)); pipe_config->pixel_multiplier = ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; @@ -8638,42 +8653,6 @@ void intel_init_pch_refclk(struct drm_device *dev) lpt_init_pch_refclk(dev); } -static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) -{ - struct drm_device *dev = crtc_state->base.crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_connector *connector; - struct drm_connector_state *connector_state; - struct intel_encoder *encoder; - int num_connectors = 0, i; - bool is_lvds = false; - - for_each_connector_in_state(state, connector, connector_state, i) { - if (connector_state->crtc != crtc_state->base.crtc) - continue; - - encoder = to_intel_encoder(connector_state->best_encoder); - - switch (encoder->type) { - case INTEL_OUTPUT_LVDS: - is_lvds = true; - break; - default: - break; - } - num_connectors++; - } - - if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { - DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", - dev_priv->vbt.lvds_ssc_freq); - return dev_priv->vbt.lvds_ssc_freq; - } - - return 120000; -} - static void ironlake_set_pipeconf(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = crtc->dev->dev_private; @@ -8716,82 +8695,14 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc) POSTING_READ(PIPECONF(pipe)); } -/* - * Set up the pipe CSC unit. - * - * Currently only full range RGB to limited range RGB conversion - * is supported, but eventually this should handle various - * RGB<->YCbCr scenarios as well. - */ -static void intel_set_pipe_csc(struct drm_crtc *crtc) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - uint16_t coeff = 0x7800; /* 1.0 */ - - /* - * TODO: Check what kind of values actually come out of the pipe - * with these coeff/postoff values and adjust to get the best - * accuracy. Perhaps we even need to take the bpc value into - * consideration. - */ - - if (intel_crtc->config->limited_color_range) - coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ - - /* - * GY/GU and RY/RU should be the other way around according - * to BSpec, but reality doesn't agree. Just set them up in - * a way that results in the correct picture. - */ - I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); - I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); - - I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); - I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); - - I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); - I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); - - I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); - I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); - - if (INTEL_INFO(dev)->gen > 6) { - uint16_t postoff = 0; - - if (intel_crtc->config->limited_color_range) - postoff = (16 * (1 << 12) / 255) & 0x1fff; - - I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); - I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); - - I915_WRITE(PIPE_CSC_MODE(pipe), 0); - } else { - uint32_t mode = CSC_MODE_YUV_TO_RGB; - - if (intel_crtc->config->limited_color_range) - mode |= CSC_BLACK_SCREEN_OFFSET; - - I915_WRITE(PIPE_CSC_MODE(pipe), mode); - } -} - static void haswell_set_pipeconf(struct drm_crtc *crtc) { - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = crtc->dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - enum pipe pipe = intel_crtc->pipe; enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; - uint32_t val; - - val = 0; + u32 val = 0; - if (IS_HASWELL(dev) && intel_crtc->config->dither) + if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) @@ -8801,12 +8712,15 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) I915_WRITE(PIPECONF(cpu_transcoder), val); POSTING_READ(PIPECONF(cpu_transcoder)); +} - I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); - POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); +static void haswell_set_pipemisc(struct drm_crtc *crtc) +{ + struct drm_i915_private *dev_priv = crtc->dev->dev_private; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { - val = 0; + if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { + u32 val = 0; switch (intel_crtc->config->pipe_bpp) { case 18: @@ -8829,39 +8743,10 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc) if (intel_crtc->config->dither) val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; - I915_WRITE(PIPEMISC(pipe), val); + I915_WRITE(PIPEMISC(intel_crtc->pipe), val); } } -static bool ironlake_compute_clocks(struct drm_crtc *crtc, - struct intel_crtc_state *crtc_state, - intel_clock_t *clock, - bool *has_reduced_clock, - intel_clock_t *reduced_clock) -{ - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int refclk; - const intel_limit_t *limit; - bool ret; - - refclk = ironlake_get_refclk(crtc_state); - - /* - * Returns a set of divisors for the desired target clock with the given - * refclk, or FALSE. The returned values represent the clock equation: - * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. - */ - limit = intel_limit(crtc_state, refclk); - ret = dev_priv->display.find_dpll(limit, crtc_state, - crtc_state->port_clock, - refclk, NULL, clock); - if (!ret) - return false; - - return true; -} - int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) { /* @@ -8878,10 +8763,9 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) return i9xx_dpll_compute_m(dpll) < factor * dpll->n; } -static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state, - u32 *fp, - intel_clock_t *reduced_clock, u32 *fp2) +static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, + struct intel_crtc_state *crtc_state, + intel_clock_t *reduced_clock) { struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; @@ -8890,8 +8774,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct drm_connector *connector; struct drm_connector_state *connector_state; struct intel_encoder *encoder; - uint32_t dpll; - int factor, num_connectors = 0, i; + u32 dpll, fp, fp2; + int factor, i; bool is_lvds = false, is_sdvo = false; for_each_connector_in_state(state, connector, connector_state, i) { @@ -8911,8 +8795,6 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, default: break; } - - num_connectors++; } /* Enable autotuning of the PLL clock (if permissible) */ @@ -8925,11 +8807,19 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, } else if (crtc_state->sdvo_tv_clock) factor = 20; + fp = i9xx_dpll_compute_fp(&crtc_state->dpll); + if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) - *fp |= FP_CB_TUNE; + fp |= FP_CB_TUNE; - if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) - *fp2 |= FP_CB_TUNE; + if (reduced_clock) { + fp2 = i9xx_dpll_compute_fp(reduced_clock); + + if (reduced_clock->m < factor * reduced_clock->n) + fp2 |= FP_CB_TUNE; + } else { + fp2 = fp; + } dpll = 0; @@ -8966,76 +8856,80 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, break; } - if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) + if (is_lvds && intel_panel_use_ssc(dev_priv)) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else dpll |= PLL_REF_INPUT_DREFCLK; - return dpll | DPLL_VCO_ENABLE; + dpll |= DPLL_VCO_ENABLE; + + crtc_state->dpll_hw_state.dpll = dpll; + crtc_state->dpll_hw_state.fp0 = fp; + crtc_state->dpll_hw_state.fp1 = fp2; } static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state) { struct drm_device *dev = crtc->base.dev; - intel_clock_t clock, reduced_clock; - u32 dpll = 0, fp = 0, fp2 = 0; - bool ok, has_reduced_clock = false; - bool is_lvds = false; + struct drm_i915_private *dev_priv = dev->dev_private; + intel_clock_t reduced_clock; + bool has_reduced_clock = false; struct intel_shared_dpll *pll; + const intel_limit_t *limit; + int refclk = 120000; memset(&crtc_state->dpll_hw_state, 0, sizeof(crtc_state->dpll_hw_state)); - is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); + crtc->lowfreq_avail = false; + + /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ + if (!crtc_state->has_pch_encoder) + return 0; + + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { + if (intel_panel_use_ssc(dev_priv)) { + DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", + dev_priv->vbt.lvds_ssc_freq); + refclk = dev_priv->vbt.lvds_ssc_freq; + } - WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), - "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); + if (intel_is_dual_link_lvds(dev)) { + if (refclk == 100000) + limit = &intel_limits_ironlake_dual_lvds_100m; + else + limit = &intel_limits_ironlake_dual_lvds; + } else { + if (refclk == 100000) + limit = &intel_limits_ironlake_single_lvds_100m; + else + limit = &intel_limits_ironlake_single_lvds; + } + } else { + limit = &intel_limits_ironlake_dac; + } - ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, - &has_reduced_clock, &reduced_clock); - if (!ok && !crtc_state->clock_set) { + if (!crtc_state->clock_set && + !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, + refclk, NULL, &crtc_state->dpll)) { DRM_ERROR("Couldn't find PLL settings for mode!\n"); return -EINVAL; } - /* Compat-code for transition, will disappear. */ - if (!crtc_state->clock_set) { - crtc_state->dpll.n = clock.n; - crtc_state->dpll.m1 = clock.m1; - crtc_state->dpll.m2 = clock.m2; - crtc_state->dpll.p1 = clock.p1; - crtc_state->dpll.p2 = clock.p2; - } - - /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ - if (crtc_state->has_pch_encoder) { - fp = i9xx_dpll_compute_fp(&crtc_state->dpll); - if (has_reduced_clock) - fp2 = i9xx_dpll_compute_fp(&reduced_clock); - dpll = ironlake_compute_dpll(crtc, crtc_state, - &fp, &reduced_clock, - has_reduced_clock ? &fp2 : NULL); + ironlake_compute_dpll(crtc, crtc_state, + has_reduced_clock ? &reduced_clock : NULL); - crtc_state->dpll_hw_state.dpll = dpll; - crtc_state->dpll_hw_state.fp0 = fp; - if (has_reduced_clock) - crtc_state->dpll_hw_state.fp1 = fp2; - else - crtc_state->dpll_hw_state.fp1 = fp; - - pll = intel_get_shared_dpll(crtc, crtc_state); - if (pll == NULL) { - DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", - pipe_name(crtc->pipe)); - return -EINVAL; - } + pll = intel_get_shared_dpll(crtc, crtc_state, NULL); + if (pll == NULL) { + DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", + pipe_name(crtc->pipe)); + return -EINVAL; } - if (is_lvds && has_reduced_clock) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + has_reduced_clock) crtc->lowfreq_avail = true; - else - crtc->lowfreq_avail = false; return 0; } @@ -9337,7 +9231,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, return false; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; - pipe_config->shared_dpll = DPLL_ID_PRIVATE; + pipe_config->shared_dpll = NULL; ret = false; tmp = I915_READ(PIPECONF(crtc->pipe)); @@ -9366,6 +9260,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { struct intel_shared_dpll *pll; + enum intel_dpll_id pll_id; pipe_config->has_pch_encoder = true; @@ -9375,21 +9270,22 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, ironlake_get_fdi_m_n_config(crtc, pipe_config); - if (HAS_PCH_IBX(dev_priv->dev)) { - pipe_config->shared_dpll = - (enum intel_dpll_id) crtc->pipe; + if (HAS_PCH_IBX(dev_priv)) { + pll_id = (enum intel_dpll_id) crtc->pipe; } else { tmp = I915_READ(PCH_DPLL_SEL); if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) - pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; + pll_id = DPLL_ID_PCH_PLL_B; else - pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; + pll_id= DPLL_ID_PCH_PLL_A; } - pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; + pipe_config->shared_dpll = + intel_get_shared_dpll_by_id(dev_priv, pll_id); + pll = pipe_config->shared_dpll; - WARN_ON(!pll->get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); + WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); tmp = pipe_config->dpll_hw_state.dpll; pipe_config->pixel_multiplier = @@ -9402,6 +9298,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, } intel_get_pipe_timings(crtc, pipe_config); + intel_get_pipe_src_size(crtc, pipe_config); ironlake_get_pfit_config(crtc, pipe_config); @@ -9709,8 +9606,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) val |= LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) + if (wait_for_us(I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE, 1)) DRM_ERROR("Switching to FCLK failed\n"); val = I915_READ(LCPLL_CTL); @@ -9744,8 +9641,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) val &= ~LCPLL_CD_SOURCE_FCLK; I915_WRITE(LCPLL_CTL, val); - if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + if (wait_for_us((I915_READ(LCPLL_CTL) & + LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) DRM_ERROR("Switching back to LCPLL failed\n"); mutex_lock(&dev_priv->rps.hw_lock); @@ -9822,72 +9719,193 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + enum intel_dpll_id id; + switch (port) { case PORT_A: pipe_config->ddi_pll_sel = SKL_DPLL0; - pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; + id = DPLL_ID_SKL_DPLL0; break; case PORT_B: pipe_config->ddi_pll_sel = SKL_DPLL1; - pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; + id = DPLL_ID_SKL_DPLL1; break; case PORT_C: pipe_config->ddi_pll_sel = SKL_DPLL2; - pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; + id = DPLL_ID_SKL_DPLL2; break; default: DRM_ERROR("Incorrect port type\n"); + return; } + + pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { - u32 temp, dpll_ctl1; + enum intel_dpll_id id; + u32 temp; temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); switch (pipe_config->ddi_pll_sel) { case SKL_DPLL0: - /* - * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part - * of the shared DPLL framework and thus needs to be read out - * separately - */ - dpll_ctl1 = I915_READ(DPLL_CTRL1); - pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; + id = DPLL_ID_SKL_DPLL0; break; case SKL_DPLL1: - pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; + id = DPLL_ID_SKL_DPLL1; break; case SKL_DPLL2: - pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; + id = DPLL_ID_SKL_DPLL2; break; case SKL_DPLL3: - pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; + id = DPLL_ID_SKL_DPLL3; break; + default: + MISSING_CASE(pipe_config->ddi_pll_sel); + return; } + + pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); } static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, struct intel_crtc_state *pipe_config) { + enum intel_dpll_id id; + pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); switch (pipe_config->ddi_pll_sel) { case PORT_CLK_SEL_WRPLL1: - pipe_config->shared_dpll = DPLL_ID_WRPLL1; + id = DPLL_ID_WRPLL1; break; case PORT_CLK_SEL_WRPLL2: - pipe_config->shared_dpll = DPLL_ID_WRPLL2; + id = DPLL_ID_WRPLL2; break; case PORT_CLK_SEL_SPLL: - pipe_config->shared_dpll = DPLL_ID_SPLL; + id = DPLL_ID_SPLL; + break; + case PORT_CLK_SEL_LCPLL_810: + id = DPLL_ID_LCPLL_810; + break; + case PORT_CLK_SEL_LCPLL_1350: + id = DPLL_ID_LCPLL_1350; + break; + case PORT_CLK_SEL_LCPLL_2700: + id = DPLL_ID_LCPLL_2700; + break; + default: + MISSING_CASE(pipe_config->ddi_pll_sel); + /* fall through */ + case PORT_CLK_SEL_NONE: + return; + } + + pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); +} + +static bool hsw_get_transcoder_state(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config, + unsigned long *power_domain_mask) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; + u32 tmp; + + pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; + + /* + * XXX: Do intel_display_power_get_if_enabled before reading this (for + * consistency and less surprising code; it's in always on power). + */ + tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); + if (tmp & TRANS_DDI_FUNC_ENABLE) { + enum pipe trans_edp_pipe; + switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { + default: + WARN(1, "unknown pipe linked to edp transcoder\n"); + case TRANS_DDI_EDP_INPUT_A_ONOFF: + case TRANS_DDI_EDP_INPUT_A_ON: + trans_edp_pipe = PIPE_A; + break; + case TRANS_DDI_EDP_INPUT_B_ONOFF: + trans_edp_pipe = PIPE_B; + break; + case TRANS_DDI_EDP_INPUT_C_ONOFF: + trans_edp_pipe = PIPE_C; + break; + } + + if (trans_edp_pipe == crtc->pipe) + pipe_config->cpu_transcoder = TRANSCODER_EDP; + } + + power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + return false; + *power_domain_mask |= BIT(power_domain); + + tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); + + return tmp & PIPECONF_ENABLE; +} + +static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, + struct intel_crtc_state *pipe_config, + unsigned long *power_domain_mask) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum intel_display_power_domain power_domain; + enum port port; + enum transcoder cpu_transcoder; + u32 tmp; + + pipe_config->has_dsi_encoder = false; + + for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { + if (port == PORT_A) + cpu_transcoder = TRANSCODER_DSI_A; + else + cpu_transcoder = TRANSCODER_DSI_C; + + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + continue; + *power_domain_mask |= BIT(power_domain); + + /* + * The PLL needs to be enabled with a valid divider + * configuration, otherwise accessing DSI registers will hang + * the machine. See BSpec North Display Engine + * registers/MIPI[BXT]. We can break out here early, since we + * need the same DSI PLL to be enabled for both DSI ports. + */ + if (!intel_dsi_pll_is_enabled(dev_priv)) + break; + + /* XXX: this works for video mode only */ + tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); + if (!(tmp & DPI_ENABLE)) + continue; + + tmp = I915_READ(MIPI_CTRL(port)); + if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) + continue; + + pipe_config->cpu_transcoder = cpu_transcoder; + pipe_config->has_dsi_encoder = true; break; } + + return pipe_config->has_dsi_encoder; } static void haswell_get_ddi_port_state(struct intel_crtc *crtc, @@ -9910,11 +9928,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, else haswell_get_ddi_pll(dev_priv, port, pipe_config); - if (pipe_config->shared_dpll >= 0) { - pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; - - WARN_ON(!pll->get_hw_state(dev_priv, pll, - &pipe_config->dpll_hw_state)); + pll = pipe_config->shared_dpll; + if (pll) { + WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, + &pipe_config->dpll_hw_state)); } /* @@ -9941,53 +9958,37 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; enum intel_display_power_domain power_domain; unsigned long power_domain_mask; - uint32_t tmp; - bool ret; + bool active; power_domain = POWER_DOMAIN_PIPE(crtc->pipe); if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; power_domain_mask = BIT(power_domain); - ret = false; + pipe_config->shared_dpll = NULL; - pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; - pipe_config->shared_dpll = DPLL_ID_PRIVATE; + active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); - tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); - if (tmp & TRANS_DDI_FUNC_ENABLE) { - enum pipe trans_edp_pipe; - switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { - default: - WARN(1, "unknown pipe linked to edp transcoder\n"); - case TRANS_DDI_EDP_INPUT_A_ONOFF: - case TRANS_DDI_EDP_INPUT_A_ON: - trans_edp_pipe = PIPE_A; - break; - case TRANS_DDI_EDP_INPUT_B_ONOFF: - trans_edp_pipe = PIPE_B; - break; - case TRANS_DDI_EDP_INPUT_C_ONOFF: - trans_edp_pipe = PIPE_C; - break; - } - - if (trans_edp_pipe == crtc->pipe) - pipe_config->cpu_transcoder = TRANSCODER_EDP; + if (IS_BROXTON(dev_priv)) { + bxt_get_dsi_transcoder_state(crtc, pipe_config, + &power_domain_mask); + WARN_ON(active && pipe_config->has_dsi_encoder); + if (pipe_config->has_dsi_encoder) + active = true; } - power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); - if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) + if (!active) goto out; - power_domain_mask |= BIT(power_domain); - tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); - if (!(tmp & PIPECONF_ENABLE)) - goto out; + if (!pipe_config->has_dsi_encoder) { + haswell_get_ddi_port_state(crtc, pipe_config); + intel_get_pipe_timings(crtc, pipe_config); + } - haswell_get_ddi_port_state(crtc, pipe_config); + intel_get_pipe_src_size(crtc, pipe_config); - intel_get_pipe_timings(crtc, pipe_config); + pipe_config->gamma_mode = + I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; if (INTEL_INFO(dev)->gen >= 9) { skl_init_scalers(dev, crtc, pipe_config); @@ -10011,20 +10012,19 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && (I915_READ(IPS_CTL) & IPS_ENABLE); - if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { + if (pipe_config->cpu_transcoder != TRANSCODER_EDP && + !transcoder_is_dsi(pipe_config->cpu_transcoder)) { pipe_config->pixel_multiplier = I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; } else { pipe_config->pixel_multiplier = 1; } - ret = true; - out: for_each_power_domain(power_domain, power_domain_mask) intel_display_power_put(dev_priv, power_domain); - return ret; + return active; } static void i845_update_cursor(struct drm_crtc *crtc, u32 base, @@ -10217,21 +10217,6 @@ static bool cursor_size_ok(struct drm_device *dev, return true; } -static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, uint32_t start, uint32_t size) -{ - int end = (start + size > 256) ? 256 : start + size, i; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - for (i = start; i < end; i++) { - intel_crtc->lut_r[i] = red[i] >> 8; - intel_crtc->lut_g[i] = green[i] >> 8; - intel_crtc->lut_b[i] = blue[i] >> 8; - } - - intel_crtc_load_lut(crtc); -} - /* VESA 640x480x72Hz mode to set on the pipe */ static struct drm_display_mode load_detect_mode = { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, @@ -10719,19 +10704,18 @@ int intel_dotclock_calculate(int link_freq, static void ironlake_pch_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); /* read out port_clock from the DPLL */ i9xx_crtc_clock_get(crtc, pipe_config); /* - * This value does not include pixel_multiplier. - * We will check that port_clock and adjusted_mode.crtc_clock - * agree once we know their relationship in the encoder's - * get_config() function. + * In case there is an active pipe without active ports, + * we may need some idea for the dotclock anyway. + * Calculate one based on the FDI configuration. */ pipe_config->base.adjusted_mode.crtc_clock = - intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, + intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), &pipe_config->fdi_m_n); } @@ -10850,7 +10834,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) struct drm_plane *primary = crtc->base.primary; mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(work->old_fb, primary->state); + intel_unpin_fb_obj(work->old_fb, primary->state->rotation); drm_gem_object_unreference(&work->pending_flip_obj->base); if (work->flip_queued_req) @@ -11004,7 +10988,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11020,13 +11004,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_DISPLAY_FLIP | + intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); - intel_ring_emit(ring, 0); /* aux display base address, unused */ + intel_ring_emit(engine, fb->pitches[0]); + intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, 0); /* aux display base address, unused */ intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; @@ -11039,7 +11023,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); u32 flip_mask; int ret; @@ -11052,13 +11036,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev, flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; else flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | + intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, fb->pitches[0]); + intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, MI_NOOP); intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; @@ -11071,7 +11055,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11085,10 +11069,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev, * Display Registers (which do not change across a page-flip) * so we need only reprogram the base address. */ - intel_ring_emit(ring, MI_DISPLAY_FLIP | + intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(ring, fb->pitches[0]); - intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | + intel_ring_emit(engine, fb->pitches[0]); + intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | obj->tiling_mode); /* XXX Enabling the panel-fitter across page-flip is so far @@ -11097,7 +11081,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(ring, pf | pipesrc); + intel_ring_emit(engine, pf | pipesrc); intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; @@ -11110,7 +11094,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t pf, pipesrc; @@ -11120,10 +11104,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev, if (ret) return ret; - intel_ring_emit(ring, MI_DISPLAY_FLIP | + intel_ring_emit(engine, MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); - intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); - intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); + intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); /* Contrary to the suggestions in the documentation, * "Enable Panel Fitter" does not seem to be required when page @@ -11133,7 +11117,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev, */ pf = 0; pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; - intel_ring_emit(ring, pf | pipesrc); + intel_ring_emit(engine, pf | pipesrc); intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; @@ -11146,7 +11130,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, struct drm_i915_gem_request *req, uint32_t flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); uint32_t plane_bit = 0; int len, ret; @@ -11167,7 +11151,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev, } len = 4; - if (ring->id == RCS) { + if (engine->id == RCS) { len += 6; /* * On Gen 8, SRM is now taking an extra dword to accommodate @@ -11205,36 +11189,36 @@ static int intel_gen7_queue_flip(struct drm_device *dev, * for the RCS also doesn't appear to drop events. Setting the DERRMR * to zero does lead to lockups within MI_DISPLAY_FLIP. */ - if (ring->id == RCS) { - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit_reg(ring, DERRMR); - intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | - DERRMR_PIPEB_PRI_FLIP_DONE | - DERRMR_PIPEC_PRI_FLIP_DONE)); + if (engine->id == RCS) { + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); + intel_ring_emit_reg(engine, DERRMR); + intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE | + DERRMR_PIPEB_PRI_FLIP_DONE | + DERRMR_PIPEC_PRI_FLIP_DONE)); if (IS_GEN8(dev)) - intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | + intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT); else - intel_ring_emit(ring, MI_STORE_REGISTER_MEM | + intel_ring_emit(engine, MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit_reg(ring, DERRMR); - intel_ring_emit(ring, ring->scratch.gtt_offset + 256); + intel_ring_emit_reg(engine, DERRMR); + intel_ring_emit(engine, engine->scratch.gtt_offset + 256); if (IS_GEN8(dev)) { - intel_ring_emit(ring, 0); - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, MI_NOOP); } } - intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); - intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); - intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); - intel_ring_emit(ring, (MI_NOOP)); + intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); + intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); + intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); + intel_ring_emit(engine, (MI_NOOP)); intel_mark_page_flip_active(intel_crtc->unpin_work); return 0; } -static bool use_mmio_flip(struct intel_engine_cs *ring, +static bool use_mmio_flip(struct intel_engine_cs *engine, struct drm_i915_gem_object *obj) { /* @@ -11245,10 +11229,10 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, * So using MMIO flips there would disrupt this mechanism. */ - if (ring == NULL) + if (engine == NULL) return true; - if (INTEL_INFO(ring->dev)->gen < 5) + if (INTEL_INFO(engine->dev)->gen < 5) return false; if (i915.use_mmio_flip < 0) @@ -11262,7 +11246,7 @@ static bool use_mmio_flip(struct intel_engine_cs *ring, false)) return true; else - return ring != i915_gem_request_get_ring(obj->last_write_req); + return engine != i915_gem_request_get_engine(obj->last_write_req); } static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, @@ -11508,7 +11492,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, struct drm_plane *primary = crtc->primary; enum pipe pipe = intel_crtc->pipe; struct intel_unpin_work *work; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; bool mmio_flip; struct drm_i915_gem_request *request = NULL; int ret; @@ -11595,21 +11579,21 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { - ring = &dev_priv->ring[BCS]; + engine = &dev_priv->engine[BCS]; if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) /* vlv: DISPLAY_FLIP fails to change tiling */ - ring = NULL; + engine = NULL; } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { - ring = &dev_priv->ring[BCS]; + engine = &dev_priv->engine[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { - ring = i915_gem_request_get_ring(obj->last_write_req); - if (ring == NULL || ring->id != RCS) - ring = &dev_priv->ring[BCS]; + engine = i915_gem_request_get_engine(obj->last_write_req); + if (engine == NULL || engine->id != RCS) + engine = &dev_priv->engine[BCS]; } else { - ring = &dev_priv->ring[RCS]; + engine = &dev_priv->engine[RCS]; } - mmio_flip = use_mmio_flip(ring, obj); + mmio_flip = use_mmio_flip(engine, obj); /* When using CS flips, we want to emit semaphores between rings. * However, when using mmio flips we will create a task to do the @@ -11617,13 +11601,12 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, * into the display plane and skip any waits. */ if (!mmio_flip) { - ret = i915_gem_object_sync(obj, ring, &request); + ret = i915_gem_object_sync(obj, engine, &request); if (ret) goto cleanup_pending; } - ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, - crtc->primary->state); + ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); if (ret) goto cleanup_pending; @@ -11640,7 +11623,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, obj->last_write_req); } else { if (!request) { - request = i915_gem_request_alloc(ring, NULL); + request = i915_gem_request_alloc(engine, NULL); if (IS_ERR(request)) { ret = PTR_ERR(request); goto cleanup_unpin; @@ -11673,7 +11656,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_unpin: - intel_unpin_fb_obj(fb, crtc->primary->state); + intel_unpin_fb_obj(fb, crtc->primary->state->rotation); cleanup_pending: if (!IS_ERR_OR_NULL(request)) i915_gem_request_cancel(request); @@ -11786,6 +11769,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct drm_plane *plane = plane_state->plane; struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = to_i915(dev); struct intel_plane_state *old_plane_state = to_intel_plane_state(plane->state); int idx = intel_crtc->base.base.id, ret; @@ -11834,42 +11818,43 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, plane->base.id, was_visible, visible, turn_off, turn_on, mode_changed); - if (turn_on || turn_off) { - pipe_config->wm_changed = true; + if (turn_on) { + pipe_config->update_wm_pre = true; + + /* must disable cxsr around plane enable/disable */ + if (plane->type != DRM_PLANE_TYPE_CURSOR) + pipe_config->disable_cxsr = true; + } else if (turn_off) { + pipe_config->update_wm_post = true; /* must disable cxsr around plane enable/disable */ if (plane->type != DRM_PLANE_TYPE_CURSOR) pipe_config->disable_cxsr = true; } else if (intel_wm_need_update(plane, plane_state)) { - pipe_config->wm_changed = true; + /* FIXME bollocks */ + pipe_config->update_wm_pre = true; + pipe_config->update_wm_post = true; } - if (visible || was_visible) - intel_crtc->atomic.fb_bits |= - to_intel_plane(plane)->frontbuffer_bit; + /* Pre-gen9 platforms need two-step watermark updates */ + if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) && + INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks) + to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true; - switch (plane->type) { - case DRM_PLANE_TYPE_PRIMARY: - intel_crtc->atomic.post_enable_primary = turn_on; - intel_crtc->atomic.update_fbc = true; + if (visible || was_visible) + pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit; - break; - case DRM_PLANE_TYPE_CURSOR: - break; - case DRM_PLANE_TYPE_OVERLAY: - /* - * WaCxSRDisabledForSpriteScaling:ivb - * - * cstate->update_wm was already set above, so this flag will - * take effect when we commit and program watermarks. - */ - if (IS_IVYBRIDGE(dev) && - needs_scaling(to_intel_plane_state(plane_state)) && - !needs_scaling(old_plane_state)) - pipe_config->disable_lp_wm = true; + /* + * WaCxSRDisabledForSpriteScaling:ivb + * + * cstate->update_wm was already set above, so this flag will + * take effect when we commit and program watermarks. + */ + if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) && + needs_scaling(to_intel_plane_state(plane_state)) && + !needs_scaling(old_plane_state)) + pipe_config->disable_lp_wm = true; - break; - } return 0; } @@ -11941,22 +11926,49 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } if (mode_changed && !crtc_state->active) - pipe_config->wm_changed = true; + pipe_config->update_wm_post = true; if (mode_changed && crtc_state->enable && dev_priv->display.crtc_compute_clock && - !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) { + !WARN_ON(pipe_config->shared_dpll)) { ret = dev_priv->display.crtc_compute_clock(intel_crtc, pipe_config); if (ret) return ret; } + if (crtc_state->color_mgmt_changed) { + ret = intel_color_check(crtc, crtc_state); + if (ret) + return ret; + } + ret = 0; if (dev_priv->display.compute_pipe_wm) { - ret = dev_priv->display.compute_pipe_wm(intel_crtc, state); - if (ret) + ret = dev_priv->display.compute_pipe_wm(pipe_config); + if (ret) { + DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); + return ret; + } + } + + if (dev_priv->display.compute_intermediate_wm && + !to_intel_atomic_state(state)->skip_intermediate_wm) { + if (WARN_ON(!dev_priv->display.compute_pipe_wm)) + return 0; + + /* + * Calculate 'intermediate' watermarks that satisfy both the + * old state and the new state. We can program these + * immediately. + */ + ret = dev_priv->display.compute_intermediate_wm(crtc->dev, + intel_crtc, + pipe_config); + if (ret) { + DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); return ret; + } } if (INTEL_INFO(dev)->gen >= 9) { @@ -11973,7 +11985,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs intel_helper_funcs = { .mode_set_base_atomic = intel_pipe_set_base_atomic, - .load_lut = intel_crtc_load_lut, .atomic_begin = intel_begin_crtc_commit, .atomic_flush = intel_finish_crtc_commit, .atomic_check = intel_crtc_atomic_check, @@ -12090,7 +12101,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, context, pipe_config, pipe_name(crtc->pipe)); - DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); + DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", pipe_config->pipe_bpp, pipe_config->dither); DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", @@ -12166,7 +12177,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, pipe_config->dpll_hw_state.cfgcr1, pipe_config->dpll_hw_state.cfgcr2); } else if (HAS_DDI(dev)) { - DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", + DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", pipe_config->ddi_pll_sel, pipe_config->dpll_hw_state.wrpll, pipe_config->dpll_hw_state.spll); @@ -12269,7 +12280,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) struct drm_crtc_state tmp_state; struct intel_crtc_scaler_state scaler_state; struct intel_dpll_hw_state dpll_hw_state; - enum intel_dpll_id shared_dpll; + struct intel_shared_dpll *shared_dpll; uint32_t ddi_pll_sel; bool force_thru; @@ -12539,6 +12550,15 @@ intel_pipe_config_compare(struct drm_device *dev, ret = false; \ } +#define PIPE_CONF_CHECK_P(name) \ + if (current_config->name != pipe_config->name) { \ + INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ + "(expected %p, found %p)\n", \ + current_config->name, \ + pipe_config->name); \ + ret = false; \ + } + #define PIPE_CONF_CHECK_M_N(name) \ if (!intel_compare_link_m_n(¤t_config->name, \ &pipe_config->name,\ @@ -12559,6 +12579,11 @@ intel_pipe_config_compare(struct drm_device *dev, ret = false; \ } +/* This is required for BDW+ where there is only one set of registers for + * switching between high and low RR. + * This macro can be used whenever a comparison has to be made between one + * hw state and multiple sw state variables. + */ #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ if (!intel_compare_link_m_n(¤t_config->name, \ &pipe_config->name, adjust) && \ @@ -12586,22 +12611,6 @@ intel_pipe_config_compare(struct drm_device *dev, ret = false; \ } -/* This is required for BDW+ where there is only one set of registers for - * switching between high and low RR. - * This macro can be used whenever a comparison has to be made between one - * hw state and multiple sw state variables. - */ -#define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ - if ((current_config->name != pipe_config->name) && \ - (current_config->alt_name != pipe_config->name)) { \ - INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ - "(expected %i or %i, found %i)\n", \ - current_config->name, \ - current_config->alt_name, \ - pipe_config->name); \ - ret = false; \ - } - #define PIPE_CONF_CHECK_FLAGS(name, mask) \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ @@ -12682,7 +12691,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(gmch_pfit.control); /* pfit ratios are autocomputed by the hw on gen4+ */ if (INTEL_INFO(dev)->gen < 4) - PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); + PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); if (!adjust) { @@ -12706,7 +12715,7 @@ intel_pipe_config_compare(struct drm_device *dev, PIPE_CONF_CHECK_X(ddi_pll_sel); - PIPE_CONF_CHECK_I(shared_dpll); + PIPE_CONF_CHECK_P(shared_dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll); PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); PIPE_CONF_CHECK_X(dpll_hw_state.fp0); @@ -12725,7 +12734,7 @@ intel_pipe_config_compare(struct drm_device *dev, #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I -#undef PIPE_CONF_CHECK_I_ALT +#undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_CLOCK_FUZZY #undef PIPE_CONF_QUIRK @@ -12734,48 +12743,61 @@ intel_pipe_config_compare(struct drm_device *dev, return ret; } -static void check_wm_state(struct drm_device *dev) +static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, + const struct intel_crtc_state *pipe_config) { + if (pipe_config->has_pch_encoder) { + int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), + &pipe_config->fdi_m_n); + int dotclock = pipe_config->base.adjusted_mode.crtc_clock; + + /* + * FDI already provided one idea for the dotclock. + * Yell if the encoder disagrees. + */ + WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), + "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", + fdi_dotclock, dotclock); + } +} + +static void verify_wm_state(struct drm_crtc *crtc, + struct drm_crtc_state *new_state) +{ + struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct skl_ddb_allocation hw_ddb, *sw_ddb; - struct intel_crtc *intel_crtc; + struct skl_ddb_entry *hw_entry, *sw_entry; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + const enum pipe pipe = intel_crtc->pipe; int plane; - if (INTEL_INFO(dev)->gen < 9) + if (INTEL_INFO(dev)->gen < 9 || !new_state->active) return; skl_ddb_get_hw_state(dev_priv, &hw_ddb); sw_ddb = &dev_priv->wm.skl_hw.ddb; - for_each_intel_crtc(dev, intel_crtc) { - struct skl_ddb_entry *hw_entry, *sw_entry; - const enum pipe pipe = intel_crtc->pipe; + /* planes */ + for_each_plane(dev_priv, pipe, plane) { + hw_entry = &hw_ddb.plane[pipe][plane]; + sw_entry = &sw_ddb->plane[pipe][plane]; - if (!intel_crtc->active) + if (skl_ddb_entry_equal(hw_entry, sw_entry)) continue; - /* planes */ - for_each_plane(dev_priv, pipe, plane) { - hw_entry = &hw_ddb.plane[pipe][plane]; - sw_entry = &sw_ddb->plane[pipe][plane]; - - if (skl_ddb_entry_equal(hw_entry, sw_entry)) - continue; - - DRM_ERROR("mismatch in DDB state pipe %c plane %d " - "(expected (%u,%u), found (%u,%u))\n", - pipe_name(pipe), plane + 1, - sw_entry->start, sw_entry->end, - hw_entry->start, hw_entry->end); - } - - /* cursor */ - hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; - sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; + DRM_ERROR("mismatch in DDB state pipe %c plane %d " + "(expected (%u,%u), found (%u,%u))\n", + pipe_name(pipe), plane + 1, + sw_entry->start, sw_entry->end, + hw_entry->start, hw_entry->end); + } - if (skl_ddb_entry_equal(hw_entry, sw_entry)) - continue; + /* cursor */ + hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; + sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; + if (!skl_ddb_entry_equal(hw_entry, sw_entry)) { DRM_ERROR("mismatch in DDB state pipe %c cursor " "(expected (%u,%u), found (%u,%u))\n", pipe_name(pipe), @@ -12785,20 +12807,18 @@ static void check_wm_state(struct drm_device *dev) } static void -check_connector_state(struct drm_device *dev, - struct drm_atomic_state *old_state) +verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc) { - struct drm_connector_state *old_conn_state; struct drm_connector *connector; - int i; - for_each_connector_in_state(old_state, connector, old_conn_state, i) { + drm_for_each_connector(connector, dev) { struct drm_encoder *encoder = connector->encoder; struct drm_connector_state *state = connector->state; - /* This also checks the encoder/connector hw state with the - * ->get_hw_state callbacks. */ - intel_connector_check_state(to_intel_connector(connector)); + if (state->crtc != crtc) + continue; + + intel_connector_verify_state(to_intel_connector(connector)); I915_STATE_WARN(state->best_encoder != encoder, "connector's atomic encoder doesn't match legacy encoder\n"); @@ -12806,7 +12826,7 @@ check_connector_state(struct drm_device *dev, } static void -check_encoder_state(struct drm_device *dev) +verify_encoder_state(struct drm_device *dev) { struct intel_encoder *encoder; struct intel_connector *connector; @@ -12846,149 +12866,186 @@ check_encoder_state(struct drm_device *dev) } static void -check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state) +verify_crtc_state(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) { + struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_encoder *encoder; - struct drm_crtc_state *old_crtc_state; - struct drm_crtc *crtc; - int i; - - for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config, *sw_config; - bool active; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *pipe_config, *sw_config; + struct drm_atomic_state *old_state; + bool active; - if (!needs_modeset(crtc->state) && - !to_intel_crtc_state(crtc->state)->update_pipe) - continue; + old_state = old_crtc_state->state; + __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); + pipe_config = to_intel_crtc_state(old_crtc_state); + memset(pipe_config, 0, sizeof(*pipe_config)); + pipe_config->base.crtc = crtc; + pipe_config->base.state = old_state; - __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); - pipe_config = to_intel_crtc_state(old_crtc_state); - memset(pipe_config, 0, sizeof(*pipe_config)); - pipe_config->base.crtc = crtc; - pipe_config->base.state = old_state; + DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); - DRM_DEBUG_KMS("[CRTC:%d]\n", - crtc->base.id); + active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); - active = dev_priv->display.get_pipe_config(intel_crtc, - pipe_config); + /* hw state is inconsistent with the pipe quirk */ + if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || + (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) + active = new_crtc_state->active; - /* hw state is inconsistent with the pipe quirk */ - if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || - (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) - active = crtc->state->active; + I915_STATE_WARN(new_crtc_state->active != active, + "crtc active state doesn't match with hw state " + "(expected %i, found %i)\n", new_crtc_state->active, active); - I915_STATE_WARN(crtc->state->active != active, - "crtc active state doesn't match with hw state " - "(expected %i, found %i)\n", crtc->state->active, active); + I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, + "transitional active state does not match atomic hw state " + "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); - I915_STATE_WARN(intel_crtc->active != crtc->state->active, - "transitional active state does not match atomic hw state " - "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active); + for_each_encoder_on_crtc(dev, crtc, encoder) { + enum pipe pipe; - for_each_encoder_on_crtc(dev, crtc, encoder) { - enum pipe pipe; + active = encoder->get_hw_state(encoder, &pipe); + I915_STATE_WARN(active != new_crtc_state->active, + "[ENCODER:%i] active %i with crtc active %i\n", + encoder->base.base.id, active, new_crtc_state->active); - active = encoder->get_hw_state(encoder, &pipe); - I915_STATE_WARN(active != crtc->state->active, - "[ENCODER:%i] active %i with crtc active %i\n", - encoder->base.base.id, active, crtc->state->active); + I915_STATE_WARN(active && intel_crtc->pipe != pipe, + "Encoder connected to wrong pipe %c\n", + pipe_name(pipe)); - I915_STATE_WARN(active && intel_crtc->pipe != pipe, - "Encoder connected to wrong pipe %c\n", - pipe_name(pipe)); + if (active) + encoder->get_config(encoder, pipe_config); + } - if (active) - encoder->get_config(encoder, pipe_config); - } + if (!new_crtc_state->active) + return; - if (!crtc->state->active) - continue; + intel_pipe_config_sanity_check(dev_priv, pipe_config); - sw_config = to_intel_crtc_state(crtc->state); - if (!intel_pipe_config_compare(dev, sw_config, - pipe_config, false)) { - I915_STATE_WARN(1, "pipe state doesn't match!\n"); - intel_dump_pipe_config(intel_crtc, pipe_config, - "[hw state]"); - intel_dump_pipe_config(intel_crtc, sw_config, - "[sw state]"); - } + sw_config = to_intel_crtc_state(crtc->state); + if (!intel_pipe_config_compare(dev, sw_config, + pipe_config, false)) { + I915_STATE_WARN(1, "pipe state doesn't match!\n"); + intel_dump_pipe_config(intel_crtc, pipe_config, + "[hw state]"); + intel_dump_pipe_config(intel_crtc, sw_config, + "[sw state]"); } } static void -check_shared_dpll_state(struct drm_device *dev) +verify_single_dpll_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct drm_crtc *crtc, + struct drm_crtc_state *new_state) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *crtc; struct intel_dpll_hw_state dpll_hw_state; - int i; + unsigned crtc_mask; + bool active; - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - int enabled_crtcs = 0, active_crtcs = 0; - bool active; + memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); - memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); + DRM_DEBUG_KMS("%s\n", pll->name); - DRM_DEBUG_KMS("%s\n", pll->name); + active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); - active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); - - I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), - "more active pll users than references: %i vs %i\n", - pll->active, hweight32(pll->config.crtc_mask)); - I915_STATE_WARN(pll->active && !pll->on, + if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { + I915_STATE_WARN(!pll->on && pll->active_mask, "pll in active use but not on in sw tracking\n"); - I915_STATE_WARN(pll->on && !pll->active, - "pll in on but not on in use in sw tracking\n"); + I915_STATE_WARN(pll->on && !pll->active_mask, + "pll is on but not used by any active crtc\n"); I915_STATE_WARN(pll->on != active, "pll on state mismatch (expected %i, found %i)\n", pll->on, active); + } - for_each_intel_crtc(dev, crtc) { - if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) - enabled_crtcs++; - if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) - active_crtcs++; - } - I915_STATE_WARN(pll->active != active_crtcs, - "pll active crtcs mismatch (expected %i, found %i)\n", - pll->active, active_crtcs); - I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, - "pll enabled crtcs mismatch (expected %i, found %i)\n", - hweight32(pll->config.crtc_mask), enabled_crtcs); + if (!crtc) { + I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask, + "more active pll users than references: %x vs %x\n", + pll->active_mask, pll->config.crtc_mask); - I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, - sizeof(dpll_hw_state)), - "pll hw state mismatch\n"); + return; } + + crtc_mask = 1 << drm_crtc_index(crtc); + + if (new_state->active) + I915_STATE_WARN(!(pll->active_mask & crtc_mask), + "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", + pipe_name(drm_crtc_index(crtc)), pll->active_mask); + else + I915_STATE_WARN(pll->active_mask & crtc_mask, + "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", + pipe_name(drm_crtc_index(crtc)), pll->active_mask); + + I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask), + "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", + crtc_mask, pll->config.crtc_mask); + + I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, + &dpll_hw_state, + sizeof(dpll_hw_state)), + "pll hw state mismatch\n"); } static void -intel_modeset_check_state(struct drm_device *dev, - struct drm_atomic_state *old_state) +verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) { - check_wm_state(dev); - check_connector_state(dev, old_state); - check_encoder_state(dev); - check_crtc_state(dev, old_state); - check_shared_dpll_state(dev); + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); + struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); + + if (new_state->shared_dpll) + verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); + + if (old_state->shared_dpll && + old_state->shared_dpll != new_state->shared_dpll) { + unsigned crtc_mask = 1 << drm_crtc_index(crtc); + struct intel_shared_dpll *pll = old_state->shared_dpll; + + I915_STATE_WARN(pll->active_mask & crtc_mask, + "pll active mismatch (didn't expect pipe %c in active mask)\n", + pipe_name(drm_crtc_index(crtc))); + I915_STATE_WARN(pll->config.crtc_mask & crtc_mask, + "pll enabled crtcs mismatch (found %x in enabled mask)\n", + pipe_name(drm_crtc_index(crtc))); + } } -void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, - int dotclock) +static void +intel_modeset_verify_crtc(struct drm_crtc *crtc, + struct drm_crtc_state *old_state, + struct drm_crtc_state *new_state) { - /* - * FDI already provided one idea for the dotclock. - * Yell if the encoder disagrees. - */ - WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), - "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", - pipe_config->base.adjusted_mode.crtc_clock, dotclock); + if (!needs_modeset(new_state) && + !to_intel_crtc_state(new_state)->update_pipe) + return; + + verify_wm_state(crtc, new_state); + verify_connector_state(crtc->dev, crtc); + verify_crtc_state(crtc, old_state, new_state); + verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); +} + +static void +verify_disabled_dpll_state(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int i; + + for (i = 0; i < dev_priv->num_shared_dpll; i++) + verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); +} + +static void +intel_modeset_verify_disabled(struct drm_device *dev) +{ + verify_encoder_state(dev); + verify_connector_state(dev, NULL); + verify_disabled_dpll_state(dev); } static void update_scanline_offset(struct intel_crtc *crtc) @@ -13043,20 +13100,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state) for_each_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll; + struct intel_shared_dpll *old_dpll = + to_intel_crtc_state(crtc->state)->shared_dpll; if (!needs_modeset(crtc_state)) continue; - to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE; + to_intel_crtc_state(crtc_state)->shared_dpll = NULL; - if (old_dpll == DPLL_ID_PRIVATE) + if (!old_dpll) continue; if (!shared_dpll) shared_dpll = intel_atomic_get_shared_dpll_state(state); - shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe); + intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc); } } @@ -13268,9 +13326,6 @@ static int intel_atomic_check(struct drm_device *dev, struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); - memset(&to_intel_crtc(crtc)->atomic, 0, - sizeof(struct intel_crtc_atomic_commit)); - /* Catch I915_MODE_FLAG_INHERITED */ if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) crtc_state->mode_changed = true; @@ -13451,12 +13506,12 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) return true; /* wm changes, need vblank before final wm's */ - if (crtc_state->wm_changed) + if (crtc_state->update_wm_post) return true; /* * cxsr is re-enabled after vblank. - * This is already handled by crtc_state->wm_changed, + * This is already handled by crtc_state->update_wm_post, * but added for clarity. */ if (crtc_state->disable_cxsr) @@ -13487,8 +13542,9 @@ static int intel_atomic_commit(struct drm_device *dev, { struct intel_atomic_state *intel_state = to_intel_atomic_state(state); struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_crtc_state *crtc_state; + struct drm_crtc_state *old_crtc_state; struct drm_crtc *crtc; + struct intel_crtc_state *intel_cstate; int ret = 0, i; bool hw_check = intel_state->modeset; unsigned long put_domains[I915_MAX_PIPES] = {}; @@ -13501,7 +13557,8 @@ static int intel_atomic_commit(struct drm_device *dev, } drm_atomic_helper_swap_state(dev, state); - dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; + dev_priv->wm.config = intel_state->wm_config; + intel_shared_dpll_commit(state); if (intel_state->modeset) { memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, @@ -13512,7 +13569,7 @@ static int intel_atomic_commit(struct drm_device *dev, intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); } - for_each_crtc_in_state(state, crtc, crtc_state, i) { + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); if (needs_modeset(crtc->state) || @@ -13527,10 +13584,10 @@ static int intel_atomic_commit(struct drm_device *dev, if (!needs_modeset(crtc->state)) continue; - intel_pre_plane_update(to_intel_crtc_state(crtc_state)); + intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); - if (crtc_state->active) { - intel_crtc_disable_planes(crtc, crtc_state->plane_mask); + if (old_crtc_state->active) { + intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); dev_priv->display.crtc_disable(crtc); intel_crtc->active = false; intel_fbc_disable(intel_crtc); @@ -13553,17 +13610,17 @@ static int intel_atomic_commit(struct drm_device *dev, intel_modeset_update_crtc_state(state); if (intel_state->modeset) { - intel_shared_dpll_commit(state); - drm_atomic_helper_update_legacy_modeset_state(state->dev, state); if (dev_priv->display.modeset_commit_cdclk && intel_state->dev_cdclk != dev_priv->cdclk_freq) dev_priv->display.modeset_commit_cdclk(state); + + intel_modeset_verify_disabled(dev); } /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - for_each_crtc_in_state(state, crtc, crtc_state, i) { + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); bool modeset = needs_modeset(crtc->state); struct intel_crtc_state *pipe_config = @@ -13576,14 +13633,15 @@ static int intel_atomic_commit(struct drm_device *dev, } if (!modeset) - intel_pre_plane_update(to_intel_crtc_state(crtc_state)); + intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); - if (crtc->state->active && intel_crtc->atomic.update_fbc) + if (crtc->state->active && + drm_atomic_get_existing_plane_state(state, crtc->primary)) intel_fbc_enable(intel_crtc); if (crtc->state->active && (crtc->state->planes_changed || update_pipe)) - drm_atomic_helper_commit_planes_on_crtc(crtc_state); + drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); if (pipe_config->base.active && needs_vblank_wait(pipe_config)) crtc_vblank_mask |= 1 << i; @@ -13594,11 +13652,27 @@ static int intel_atomic_commit(struct drm_device *dev, if (!state->legacy_cursor_update) intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); - for_each_crtc_in_state(state, crtc, crtc_state, i) { - intel_post_plane_update(to_intel_crtc(crtc)); + /* + * Now that the vblank has passed, we can go ahead and program the + * optimal watermarks on platforms that need two-step watermark + * programming. + * + * TODO: Move this (and other cleanup) to an async worker eventually. + */ + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + intel_cstate = to_intel_crtc_state(crtc->state); + + if (dev_priv->display.optimize_watermarks) + dev_priv->display.optimize_watermarks(intel_cstate); + } + + for_each_crtc_in_state(state, crtc, old_crtc_state, i) { + intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); if (put_domains[i]) modeset_put_power_domains(dev_priv, put_domains[i]); + + intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); } if (intel_state->modeset) @@ -13608,9 +13682,6 @@ static int intel_atomic_commit(struct drm_device *dev, drm_atomic_helper_cleanup_planes(dev, state); mutex_unlock(&dev->struct_mutex); - if (hw_check) - intel_modeset_check_state(dev, state); - drm_atomic_state_free(state); /* As one of the primary mmio accessors, KMS has a high likelihood @@ -13670,116 +13741,15 @@ out: #undef for_each_intel_crtc_masked static const struct drm_crtc_funcs intel_crtc_funcs = { - .gamma_set = intel_crtc_gamma_set, + .gamma_set = drm_atomic_helper_legacy_gamma_set, .set_config = drm_atomic_helper_set_config, + .set_property = drm_atomic_helper_crtc_set_property, .destroy = intel_crtc_destroy, .page_flip = intel_crtc_page_flip, .atomic_duplicate_state = intel_crtc_duplicate_state, .atomic_destroy_state = intel_crtc_destroy_state, }; -static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state) -{ - uint32_t val; - - if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) - return false; - - val = I915_READ(PCH_DPLL(pll->id)); - hw_state->dpll = val; - hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); - hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); - - intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); - - return val & DPLL_VCO_ENABLE; -} - -static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); - I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); -} - -static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - /* PCH refclock must be enabled first */ - ibx_assert_pch_refclk_enabled(dev_priv); - - I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(PCH_DPLL(pll->id)); - udelay(150); - - /* The pixel multiplier can only be updated once the - * DPLL is enabled and the clocks are stable. - * - * So write it again. - */ - I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); - POSTING_READ(PCH_DPLL(pll->id)); - udelay(200); -} - -static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll) -{ - struct drm_device *dev = dev_priv->dev; - struct intel_crtc *crtc; - - /* Make sure no transcoder isn't still depending on us. */ - for_each_intel_crtc(dev, crtc) { - if (intel_crtc_to_shared_dpll(crtc) == pll) - assert_pch_transcoder_disabled(dev_priv, crtc->pipe); - } - - I915_WRITE(PCH_DPLL(pll->id), 0); - POSTING_READ(PCH_DPLL(pll->id)); - udelay(200); -} - -static char *ibx_pch_dpll_names[] = { - "PCH DPLL A", - "PCH DPLL B", -}; - -static void ibx_pch_dpll_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - dev_priv->num_shared_dpll = 2; - - for (i = 0; i < dev_priv->num_shared_dpll; i++) { - dev_priv->shared_dplls[i].id = i; - dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; - dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; - dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; - dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; - dev_priv->shared_dplls[i].get_hw_state = - ibx_pch_dpll_get_hw_state; - } -} - -static void intel_shared_dpll_init(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (HAS_DDI(dev)) - intel_ddi_pll_init(dev); - else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) - ibx_pch_dpll_init(dev); - else - dev_priv->num_shared_dpll = 0; - - BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); -} - /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @plane: drm plane to prepare for @@ -13853,7 +13823,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) DRM_DEBUG_KMS("failed to attach phys object\n"); } else { - ret = intel_pin_and_fence_fb_obj(plane, fb, new_state); + ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation); } if (ret == 0) { @@ -13897,7 +13867,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || !INTEL_INFO(dev)->cursor_needs_physical)) - intel_unpin_fb_obj(old_state->fb, old_state); + intel_unpin_fb_obj(old_state->fb, old_state->rotation); /* prepare_fb aborted? */ if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || @@ -13905,7 +13875,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane, i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); i915_gem_request_assign(&old_intel_state->wait_req, NULL); - } int @@ -13980,6 +13949,11 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, if (modeset) return; + if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { + intel_color_set_csc(crtc->state); + intel_color_load_luts(crtc->state); + } + if (to_intel_crtc_state(crtc->state)->update_pipe) intel_update_pipe_config(intel_crtc, old_intel_state); else if (INTEL_INFO(dev)->gen >= 9) @@ -14023,20 +13997,19 @@ const struct drm_plane_funcs intel_plane_funcs = { static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, int pipe) { - struct intel_plane *primary; - struct intel_plane_state *state; + struct intel_plane *primary = NULL; + struct intel_plane_state *state = NULL; const uint32_t *intel_primary_formats; unsigned int num_formats; + int ret; primary = kzalloc(sizeof(*primary), GFP_KERNEL); - if (primary == NULL) - return NULL; + if (!primary) + goto fail; state = intel_create_plane_state(&primary->base); - if (!state) { - kfree(primary); - return NULL; - } + if (!state) + goto fail; primary->base.state = &state->base; primary->can_scale = false; @@ -14078,10 +14051,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, primary->disable_plane = i9xx_disable_primary_plane; } - drm_universal_plane_init(dev, &primary->base, 0, - &intel_plane_funcs, - intel_primary_formats, num_formats, - DRM_PLANE_TYPE_PRIMARY, NULL); + ret = drm_universal_plane_init(dev, &primary->base, 0, + &intel_plane_funcs, + intel_primary_formats, num_formats, + DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + goto fail; if (INTEL_INFO(dev)->gen >= 4) intel_create_rotation_property(dev, primary); @@ -14089,6 +14064,12 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); return &primary->base; + +fail: + kfree(state); + kfree(primary); + + return NULL; } void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) @@ -14205,18 +14186,17 @@ intel_update_cursor_plane(struct drm_plane *plane, static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, int pipe) { - struct intel_plane *cursor; - struct intel_plane_state *state; + struct intel_plane *cursor = NULL; + struct intel_plane_state *state = NULL; + int ret; cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); - if (cursor == NULL) - return NULL; + if (!cursor) + goto fail; state = intel_create_plane_state(&cursor->base); - if (!state) { - kfree(cursor); - return NULL; - } + if (!state) + goto fail; cursor->base.state = &state->base; cursor->can_scale = false; @@ -14228,11 +14208,13 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, cursor->update_plane = intel_update_cursor_plane; cursor->disable_plane = intel_disable_cursor_plane; - drm_universal_plane_init(dev, &cursor->base, 0, - &intel_plane_funcs, - intel_cursor_formats, - ARRAY_SIZE(intel_cursor_formats), - DRM_PLANE_TYPE_CURSOR, NULL); + ret = drm_universal_plane_init(dev, &cursor->base, 0, + &intel_plane_funcs, + intel_cursor_formats, + ARRAY_SIZE(intel_cursor_formats), + DRM_PLANE_TYPE_CURSOR, NULL); + if (ret) + goto fail; if (INTEL_INFO(dev)->gen >= 4) { if (!dev->mode_config.rotation_property) @@ -14252,6 +14234,12 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); return &cursor->base; + +fail: + kfree(state); + kfree(cursor); + + return NULL; } static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, @@ -14277,7 +14265,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) struct intel_crtc_state *crtc_state = NULL; struct drm_plane *primary = NULL; struct drm_plane *cursor = NULL; - int i, ret; + int ret; intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); if (intel_crtc == NULL) @@ -14313,13 +14301,6 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) if (ret) goto fail; - drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); - for (i = 0; i < 256; i++) { - intel_crtc->lut_r[i] = i; - intel_crtc->lut_g[i] = i; - intel_crtc->lut_b[i] = i; - } - /* * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port * is hooked to pipe B. Hence we want plane A feeding pipe B. @@ -14344,6 +14325,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); + intel_color_init(&intel_crtc->base); + WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); return; @@ -14468,6 +14451,8 @@ static void intel_setup_outputs(struct drm_device *dev) intel_ddi_init(dev, PORT_A); intel_ddi_init(dev, PORT_B); intel_ddi_init(dev, PORT_C); + + intel_dsi_init(dev); } else if (HAS_DDI(dev)) { int found; @@ -14837,6 +14822,8 @@ static int intel_framebuffer_init(struct drm_device *dev, drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); intel_fb->obj = obj; + intel_fill_fb_info(dev_priv, &intel_fb->base); + ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); if (ret) { DRM_ERROR("framebuffer init failed %d\n", ret); @@ -14884,23 +14871,13 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { .atomic_state_clear = intel_atomic_state_clear, }; -/* Set up chip specific display functions */ -static void intel_init_display(struct drm_device *dev) +/** + * intel_init_display_hooks - initialize the display modesetting hooks + * @dev_priv: device private + */ +void intel_init_display_hooks(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - - if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) - dev_priv->display.find_dpll = g4x_find_best_dpll; - else if (IS_CHERRYVIEW(dev)) - dev_priv->display.find_dpll = chv_find_best_dpll; - else if (IS_VALLEYVIEW(dev)) - dev_priv->display.find_dpll = vlv_find_best_dpll; - else if (IS_PINEVIEW(dev)) - dev_priv->display.find_dpll = pnv_find_best_dpll; - else - dev_priv->display.find_dpll = i9xx_find_best_dpll; - - if (INTEL_INFO(dev)->gen >= 9) { + if (INTEL_INFO(dev_priv)->gen >= 9) { dev_priv->display.get_pipe_config = haswell_get_pipe_config; dev_priv->display.get_initial_plane_config = skylake_get_initial_plane_config; @@ -14908,7 +14885,7 @@ static void intel_init_display(struct drm_device *dev) haswell_crtc_compute_clock; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - } else if (HAS_DDI(dev)) { + } else if (HAS_DDI(dev_priv)) { dev_priv->display.get_pipe_config = haswell_get_pipe_config; dev_priv->display.get_initial_plane_config = ironlake_get_initial_plane_config; @@ -14916,7 +14893,7 @@ static void intel_init_display(struct drm_device *dev) haswell_crtc_compute_clock; dev_priv->display.crtc_enable = haswell_crtc_enable; dev_priv->display.crtc_disable = haswell_crtc_disable; - } else if (HAS_PCH_SPLIT(dev)) { + } else if (HAS_PCH_SPLIT(dev_priv)) { dev_priv->display.get_pipe_config = ironlake_get_pipe_config; dev_priv->display.get_initial_plane_config = ironlake_get_initial_plane_config; @@ -14924,106 +14901,134 @@ static void intel_init_display(struct drm_device *dev) ironlake_crtc_compute_clock; dev_priv->display.crtc_enable = ironlake_crtc_enable; dev_priv->display.crtc_disable = ironlake_crtc_disable; - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_CHERRYVIEW(dev_priv)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; - dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; + dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; dev_priv->display.crtc_enable = valleyview_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; - } else { + } else if (IS_VALLEYVIEW(dev_priv)) { + dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_initial_plane_config = + i9xx_get_initial_plane_config; + dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; + dev_priv->display.crtc_enable = valleyview_crtc_enable; + dev_priv->display.crtc_disable = i9xx_crtc_disable; + } else if (IS_G4X(dev_priv)) { + dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_initial_plane_config = + i9xx_get_initial_plane_config; + dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; + dev_priv->display.crtc_enable = i9xx_crtc_enable; + dev_priv->display.crtc_disable = i9xx_crtc_disable; + } else if (IS_PINEVIEW(dev_priv)) { + dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_initial_plane_config = + i9xx_get_initial_plane_config; + dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; + dev_priv->display.crtc_enable = i9xx_crtc_enable; + dev_priv->display.crtc_disable = i9xx_crtc_disable; + } else if (!IS_GEN2(dev_priv)) { dev_priv->display.get_pipe_config = i9xx_get_pipe_config; dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config; dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; dev_priv->display.crtc_enable = i9xx_crtc_enable; dev_priv->display.crtc_disable = i9xx_crtc_disable; + } else { + dev_priv->display.get_pipe_config = i9xx_get_pipe_config; + dev_priv->display.get_initial_plane_config = + i9xx_get_initial_plane_config; + dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; + dev_priv->display.crtc_enable = i9xx_crtc_enable; + dev_priv->display.crtc_disable = i9xx_crtc_disable; } /* Returns the core display clock speed */ - if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) dev_priv->display.get_display_clock_speed = skylake_get_display_clock_speed; - else if (IS_BROXTON(dev)) + else if (IS_BROXTON(dev_priv)) dev_priv->display.get_display_clock_speed = broxton_get_display_clock_speed; - else if (IS_BROADWELL(dev)) + else if (IS_BROADWELL(dev_priv)) dev_priv->display.get_display_clock_speed = broadwell_get_display_clock_speed; - else if (IS_HASWELL(dev)) + else if (IS_HASWELL(dev_priv)) dev_priv->display.get_display_clock_speed = haswell_get_display_clock_speed; - else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) dev_priv->display.get_display_clock_speed = valleyview_get_display_clock_speed; - else if (IS_GEN5(dev)) + else if (IS_GEN5(dev_priv)) dev_priv->display.get_display_clock_speed = ilk_get_display_clock_speed; - else if (IS_I945G(dev) || IS_BROADWATER(dev) || - IS_GEN6(dev) || IS_IVYBRIDGE(dev)) + else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) || + IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) dev_priv->display.get_display_clock_speed = i945_get_display_clock_speed; - else if (IS_GM45(dev)) + else if (IS_GM45(dev_priv)) dev_priv->display.get_display_clock_speed = gm45_get_display_clock_speed; - else if (IS_CRESTLINE(dev)) + else if (IS_CRESTLINE(dev_priv)) dev_priv->display.get_display_clock_speed = i965gm_get_display_clock_speed; - else if (IS_PINEVIEW(dev)) + else if (IS_PINEVIEW(dev_priv)) dev_priv->display.get_display_clock_speed = pnv_get_display_clock_speed; - else if (IS_G33(dev) || IS_G4X(dev)) + else if (IS_G33(dev_priv) || IS_G4X(dev_priv)) dev_priv->display.get_display_clock_speed = g33_get_display_clock_speed; - else if (IS_I915G(dev)) + else if (IS_I915G(dev_priv)) dev_priv->display.get_display_clock_speed = i915_get_display_clock_speed; - else if (IS_I945GM(dev) || IS_845G(dev)) + else if (IS_I945GM(dev_priv) || IS_845G(dev_priv)) dev_priv->display.get_display_clock_speed = i9xx_misc_get_display_clock_speed; - else if (IS_I915GM(dev)) + else if (IS_I915GM(dev_priv)) dev_priv->display.get_display_clock_speed = i915gm_get_display_clock_speed; - else if (IS_I865G(dev)) + else if (IS_I865G(dev_priv)) dev_priv->display.get_display_clock_speed = i865_get_display_clock_speed; - else if (IS_I85X(dev)) + else if (IS_I85X(dev_priv)) dev_priv->display.get_display_clock_speed = i85x_get_display_clock_speed; else { /* 830 */ - WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n"); + WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n"); dev_priv->display.get_display_clock_speed = i830_get_display_clock_speed; } - if (IS_GEN5(dev)) { + if (IS_GEN5(dev_priv)) { dev_priv->display.fdi_link_train = ironlake_fdi_link_train; - } else if (IS_GEN6(dev)) { + } else if (IS_GEN6(dev_priv)) { dev_priv->display.fdi_link_train = gen6_fdi_link_train; - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev_priv)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { + } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; - if (IS_BROADWELL(dev)) { + if (IS_BROADWELL(dev_priv)) { dev_priv->display.modeset_commit_cdclk = broadwell_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = broadwell_modeset_calc_cdclk; } - } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { + } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { dev_priv->display.modeset_commit_cdclk = valleyview_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = valleyview_modeset_calc_cdclk; - } else if (IS_BROXTON(dev)) { + } else if (IS_BROXTON(dev_priv)) { dev_priv->display.modeset_commit_cdclk = broxton_modeset_commit_cdclk; dev_priv->display.modeset_calc_cdclk = broxton_modeset_calc_cdclk; } - switch (INTEL_INFO(dev)->gen) { + switch (INTEL_INFO(dev_priv)->gen) { case 2: dev_priv->display.queue_flip = intel_gen2_queue_flip; break; @@ -15050,8 +15055,6 @@ static void intel_init_display(struct drm_device *dev) /* Default just returns -ENODEV to indicate unsupported */ dev_priv->display.queue_flip = intel_default_queue_flip; } - - mutex_init(&dev_priv->pps_mutex); } /* @@ -15274,7 +15277,7 @@ static void sanitize_watermarks(struct drm_device *dev) int i; /* Only supported on platforms that use atomic watermark design */ - if (!dev_priv->display.program_watermarks) + if (!dev_priv->display.optimize_watermarks) return; /* @@ -15295,6 +15298,13 @@ retry: if (WARN_ON(IS_ERR(state))) goto fail; + /* + * Hardware readout is the only time we don't want to calculate + * intermediate watermarks (since we don't trust the current + * watermarks). + */ + to_intel_atomic_state(state)->skip_intermediate_wm = true; + ret = intel_atomic_check(dev, state); if (ret) { /* @@ -15317,7 +15327,8 @@ retry: for_each_crtc_in_state(state, crtc, cstate, i) { struct intel_crtc_state *cs = to_intel_crtc_state(cstate); - dev_priv->display.program_watermarks(cs); + cs->wm.need_postvbl_update = true; + dev_priv->display.optimize_watermarks(cs); } drm_atomic_state_free(state); @@ -15328,7 +15339,8 @@ fail: void intel_modeset_init(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; int sprite, ret; enum pipe pipe; struct intel_crtc *crtc; @@ -15370,9 +15382,6 @@ void intel_modeset_init(struct drm_device *dev) } } - intel_init_display(dev); - intel_init_audio(dev); - if (IS_GEN2(dev)) { dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; @@ -15395,7 +15404,7 @@ void intel_modeset_init(struct drm_device *dev) dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; } - dev->mode_config.fb_base = dev_priv->gtt.mappable_base; + dev->mode_config.fb_base = ggtt->mappable_base; DRM_DEBUG_KMS("%d display pipe%s available.\n", INTEL_INFO(dev)->num_pipes, @@ -15412,6 +15421,7 @@ void intel_modeset_init(struct drm_device *dev) } intel_update_czclk(dev_priv); + intel_update_rawclk(dev_priv); intel_update_cdclk(dev); intel_shared_dpll_init(dev); @@ -15524,10 +15534,15 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder); + enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; /* Clear any frame start delays used for debugging left by the BIOS */ - I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + if (!transcoder_is_dsi(cpu_transcoder)) { + i915_reg_t reg = PIPECONF(cpu_transcoder); + + I915_WRITE(reg, + I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); + } /* restore vblank interrupts to correct state */ drm_crtc_vblank_reset(&crtc->base); @@ -15575,38 +15590,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) /* Adjust the state of the output pipe according to whether we * have active connectors/encoders. */ - if (!intel_crtc_has_encoders(crtc)) + if (crtc->active && !intel_crtc_has_encoders(crtc)) intel_crtc_disable_noatomic(&crtc->base); - if (crtc->active != crtc->base.state->active) { - struct intel_encoder *encoder; - - /* This can happen either due to bugs in the get_hw_state - * functions or because of calls to intel_crtc_disable_noatomic, - * or because the pipe is force-enabled due to the - * pipe A quirk. */ - DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", - crtc->base.base.id, - crtc->base.state->enable ? "enabled" : "disabled", - crtc->active ? "enabled" : "disabled"); - - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); - crtc->base.state->active = crtc->active; - crtc->base.enabled = crtc->active; - crtc->base.state->connector_mask = 0; - crtc->base.state->encoder_mask = 0; - - /* Because we only establish the connector -> encoder -> - * crtc links if something is active, this means the - * crtc is now deactivated. Break the links. connector - * -> encoder links are only establish when things are - * actually up, hence no need to break them. */ - WARN_ON(crtc->active); - - for_each_encoder_on_crtc(dev, &crtc->base, encoder) - encoder->base.crtc = NULL; - } - if (crtc->active || HAS_GMCH_DISPLAY(dev)) { /* * We start out with underrun reporting disabled to avoid races. @@ -15775,22 +15761,17 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - pll->on = pll->get_hw_state(dev_priv, pll, - &pll->config.hw_state); - pll->active = 0; + pll->on = pll->funcs.get_hw_state(dev_priv, pll, + &pll->config.hw_state); pll->config.crtc_mask = 0; for_each_intel_crtc(dev, crtc) { - if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { - pll->active++; + if (crtc->active && crtc->config->shared_dpll == pll) pll->config.crtc_mask |= 1 << crtc->pipe; - } } + pll->active_mask = pll->config.crtc_mask; DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", pll->name, pll->config.crtc_mask, pll->on); - - if (pll->config.crtc_mask) - intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); } for_each_intel_encoder(dev, encoder) { @@ -15872,6 +15853,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); update_scanline_offset(crtc); } + + intel_pipe_config_sanity_check(dev_priv, crtc->config); } } @@ -15906,12 +15889,12 @@ intel_modeset_setup_hw_state(struct drm_device *dev) for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; - if (!pll->on || pll->active) + if (!pll->on || pll->active_mask) continue; DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); - pll->disable(dev_priv, pll); + pll->funcs.disable(dev_priv, pll); pll->on = false; } @@ -16020,9 +16003,8 @@ void intel_modeset_gem_init(struct drm_device *dev) continue; mutex_lock(&dev->struct_mutex); - ret = intel_pin_and_fence_fb_obj(c->primary, - c->primary->fb, - c->primary->state); + ret = intel_pin_and_fence_fb_obj(c->primary->fb, + c->primary->state->rotation); mutex_unlock(&dev->struct_mutex); if (ret) { DRM_ERROR("failed to pin boot fb on pipe %d\n", @@ -16231,8 +16213,9 @@ intel_display_capture_error_state(struct drm_device *dev) error->pipe[i].stat = I915_READ(PIPESTAT(i)); } + /* Note: this does not include DSI transcoders. */ error->num_transcoders = INTEL_INFO(dev)->num_pipes; - if (HAS_DDI(dev_priv->dev)) + if (HAS_DDI(dev_priv)) error->num_transcoders++; /* Account for eDP. */ for (i = 0; i < error->num_transcoders; i++) { @@ -16303,7 +16286,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, } for (i = 0; i < error->num_transcoders; i++) { - err_printf(m, "CPU transcoder: %c\n", + err_printf(m, "CPU transcoder: %s\n", transcoder_name(error->transcoder[i].cpu_transcoder)); err_printf(m, " Power: %s\n", onoff(error->transcoder[i].power_domain_on)); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f069a82deb57..7523558190d1 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -129,6 +129,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe); +static void intel_dp_unset_edid(struct intel_dp *intel_dp); static unsigned int intel_dp_unused_lane_mask(int lane_count) { @@ -671,60 +672,55 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) return status; } -static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index) +static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); + + if (index) + return 0; /* * The clock divider is based off the hrawclk, and would like to run at - * 2MHz. So, take the hrawclk value and divide by 2 and use that + * 2MHz. So, take the hrawclk value and divide by 2000 and use that */ - return index ? 0 : DIV_ROUND_CLOSEST(intel_hrawclk(dev), 2); + return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); } static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); if (index) return 0; - if (intel_dig_port->port == PORT_A) { + /* + * The clock divider is based off the cdclk or PCH rawclk, and would + * like to run at 2MHz. So, take the cdclk or PCH rawclk value and + * divide by 2000 and use that + */ + if (intel_dig_port->port == PORT_A) return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); - - } else { - return DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2); - } + else + return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000); } static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - struct drm_device *dev = intel_dig_port->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - if (intel_dig_port->port == PORT_A) { - if (index) - return 0; - return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000); - } else if (HAS_PCH_LPT_H(dev_priv)) { + if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) { /* Workaround for non-ULT HSW */ switch (index) { case 0: return 63; case 1: return 72; default: return 0; } - } else { - return index ? 0 : DIV_ROUND_CLOSEST(intel_pch_rawclk(dev), 2); } -} -static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index) -{ - return index ? 0 : 100; + return ilk_get_aux_clock_divider(intel_dp, index); } static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) @@ -737,10 +733,10 @@ static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index) return index ? 0 : 1; } -static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, - bool has_aux_irq, - int send_bytes, - uint32_t aux_clock_divider) +static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp, + bool has_aux_irq, + int send_bytes, + uint32_t aux_clock_divider) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = intel_dig_port->base.base.dev; @@ -1229,71 +1225,6 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) intel_connector_unregister(intel_connector); } -static void -skl_edp_set_pll_config(struct intel_crtc_state *pipe_config) -{ - u32 ctrl1; - - memset(&pipe_config->dpll_hw_state, 0, - sizeof(pipe_config->dpll_hw_state)); - - pipe_config->ddi_pll_sel = SKL_DPLL0; - pipe_config->dpll_hw_state.cfgcr1 = 0; - pipe_config->dpll_hw_state.cfgcr2 = 0; - - ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - switch (pipe_config->port_clock / 2) { - case 81000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, - SKL_DPLL0); - break; - case 135000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, - SKL_DPLL0); - break; - case 270000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, - SKL_DPLL0); - break; - case 162000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, - SKL_DPLL0); - break; - /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which - results in CDCLK change. Need to handle the change of CDCLK by - disabling pipes and re-enabling them */ - case 108000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, - SKL_DPLL0); - break; - case 216000: - ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, - SKL_DPLL0); - break; - - } - pipe_config->dpll_hw_state.ctrl1 = ctrl1; -} - -void -hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config) -{ - memset(&pipe_config->dpll_hw_state, 0, - sizeof(pipe_config->dpll_hw_state)); - - switch (pipe_config->port_clock / 2) { - case 81000: - pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810; - break; - case 135000: - pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350; - break; - case 270000: - pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700; - break; - } -} - static int intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) { @@ -1570,10 +1501,10 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Get bpp from vbt only for panels that dont have bpp in edid */ if (intel_connector->base.display_info.bpc == 0 && - (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) { + (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) { DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n", - dev_priv->vbt.edp_bpp); - bpp = dev_priv->vbt.edp_bpp; + dev_priv->vbt.edp.bpp); + bpp = dev_priv->vbt.edp.bpp; } /* @@ -1651,13 +1582,7 @@ found: &pipe_config->dp_m2_n2); } - if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && is_edp(intel_dp)) - skl_edp_set_pll_config(pipe_config); - else if (IS_BROXTON(dev)) - /* handled in ddi */; - else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - hsw_dp_set_ddi_pll_sel(pipe_config); - else + if (!HAS_DDI(dev)) intel_dp_set_clock(encoder, pipe_config); return true; @@ -1779,11 +1704,11 @@ static void wait_panel_status(struct intel_dp *intel_dp, I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) { + if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, + 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) DRM_ERROR("Panel status timeout: status %08x control %08x\n", I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg)); - } DRM_DEBUG_KMS("Wait complete\n"); } @@ -2290,6 +2215,15 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp) POSTING_READ(DP_A); udelay(500); + /* + * [DevILK] Work around required when enabling DP PLL + * while a pipe is enabled going to FDI: + * 1. Wait for the start of vertical blank on the enabled pipe going to FDI + * 2. Program DP PLL enable + */ + if (IS_GEN5(dev_priv)) + intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); + intel_dp->DP |= DP_PLL_ENABLE; I915_WRITE(DP_A, intel_dp->DP); @@ -2409,7 +2343,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; enum port port = dp_to_dig_port(intel_dp)->port; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); - int dotclock; tmp = I915_READ(intel_dp->output_reg); @@ -2459,16 +2392,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->port_clock = 270000; } - dotclock = intel_dotclock_calculate(pipe_config->port_clock, - &pipe_config->dp_m_n); - - if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A) - ironlake_check_encoder_dotclock(pipe_config, dotclock); + pipe_config->base.adjusted_mode.crtc_clock = + intel_dotclock_calculate(pipe_config->port_clock, + &pipe_config->dp_m_n); - pipe_config->base.adjusted_mode.crtc_clock = dotclock; - - if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp && - pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) { + if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp && + pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) { /* * This is a big fat ugly hack. * @@ -2483,8 +2412,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder, * load. */ DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n", - pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp); - dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; + pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp); + dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp; } } @@ -2710,7 +2639,6 @@ static void intel_enable_dp(struct intel_encoder *encoder) struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); uint32_t dp_reg = I915_READ(intel_dp->output_reg); - enum port port = dp_to_dig_port(intel_dp)->port; enum pipe pipe = crtc->pipe; if (WARN_ON(dp_reg & DP_PORT_EN)) @@ -2721,35 +2649,12 @@ static void intel_enable_dp(struct intel_encoder *encoder) if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) vlv_init_panel_power_sequencer(intel_dp); - /* - * We get an occasional spurious underrun between the port - * enable and vdd enable, when enabling port A eDP. - * - * FIXME: Not sure if this applies to (PCH) port D eDP as well - */ - if (port == PORT_A) - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); - intel_dp_enable_port(intel_dp); - if (port == PORT_A && IS_GEN5(dev_priv)) { - /* - * Underrun reporting for the other pipe was disabled in - * g4x_pre_enable_dp(). The eDP PLL and port have now been - * enabled, so it's now safe to re-enable underrun reporting. - */ - intel_wait_for_vblank_if_active(dev_priv->dev, !pipe); - intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, true); - intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, true); - } - edp_panel_vdd_on(intel_dp); edp_panel_on(intel_dp); edp_panel_vdd_off(intel_dp, true); - if (port == PORT_A) - intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); - pps_unlock(intel_dp); if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { @@ -2791,26 +2696,11 @@ static void vlv_enable_dp(struct intel_encoder *encoder) static void g4x_pre_enable_dp(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; - enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe; intel_dp_prepare(encoder); - if (port == PORT_A && IS_GEN5(dev_priv)) { - /* - * We get FIFO underruns on the other pipe when - * enabling the CPU eDP PLL, and when enabling CPU - * eDP port. We could potentially avoid the PLL - * underrun with a vblank wait just prior to enabling - * the PLL, but that doesn't appear to help the port - * enable case. Just sweep it all under the rug. - */ - intel_set_cpu_fifo_underrun_reporting(dev_priv, !pipe, false); - intel_set_pch_fifo_underrun_reporting(dev_priv, !pipe, false); - } - /* Only ilk+ has port A */ if (port == PORT_A) ironlake_edp_pll_on(intel_dp); @@ -3238,7 +3128,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp) if (IS_BROXTON(dev)) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; else if (INTEL_INFO(dev)->gen >= 9) { - if (dev_priv->edp_low_vswing && port == PORT_A) + if (dev_priv->vbt.edp.low_vswing && port == PORT_A) return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) @@ -3868,6 +3758,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) if (intel_dp->dpcd[DP_DPCD_REV] == 0) return false; /* DPCD not present */ + if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, + &intel_dp->sink_count, 1) < 0) + return false; + + /* + * Sink count can change between short pulse hpd hence + * a member variable in intel_dp will track any changes + * between short pulse interrupts. + */ + intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count); + + /* + * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that + * a dongle is present but no display. Unless we require to know + * if a dongle is present or not, we don't need to update + * downstream port information. So, an early return here saves + * time from performing other operations which are not required. + */ + if (!intel_dp->sink_count) + return false; + /* Check if the panel supports PSR */ memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd)); if (is_edp(intel_dp)) { @@ -3963,6 +3874,9 @@ intel_dp_probe_mst(struct intel_dp *intel_dp) { u8 buf[1]; + if (!i915.enable_dp_mst) + return false; + if (!intel_dp->can_mst) return false; @@ -4292,6 +4206,36 @@ go_again: return -EINVAL; } +static void +intel_dp_check_link_status(struct intel_dp *intel_dp) +{ + struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_device *dev = intel_dp_to_dev(intel_dp); + u8 link_status[DP_LINK_STATUS_SIZE]; + + WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + + if (!intel_dp_get_link_status(intel_dp, link_status)) { + DRM_ERROR("Failed to get link status\n"); + return; + } + + if (!intel_encoder->base.crtc) + return; + + if (!to_intel_crtc(intel_encoder->base.crtc)->active) + return; + + /* if link training is requested we should perform it always */ + if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) || + (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { + DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", + intel_encoder->base.name); + intel_dp_start_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); + } +} + /* * According to DP spec * 5.1.2: @@ -4299,16 +4243,19 @@ go_again: * 2. Configure link according to Receiver Capabilities * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 * 4. Check link status on receipt of hot-plug interrupt + * + * intel_dp_short_pulse - handles short pulse interrupts + * when full detection is not required. + * Returns %true if short pulse is handled and full detection + * is NOT required and %false otherwise. */ -static void -intel_dp_check_link_status(struct intel_dp *intel_dp) +static bool +intel_dp_short_pulse(struct intel_dp *intel_dp) { struct drm_device *dev = intel_dp_to_dev(intel_dp); - struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base; u8 sink_irq_vector; - u8 link_status[DP_LINK_STATUS_SIZE]; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + u8 old_sink_count = intel_dp->sink_count; + bool ret; /* * Clearing compliance test variables to allow capturing @@ -4318,20 +4265,17 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) intel_dp->compliance_test_type = 0; intel_dp->compliance_test_data = 0; - if (!intel_encoder->base.crtc) - return; - - if (!to_intel_crtc(intel_encoder->base.crtc)->active) - return; - - /* Try to read receiver status if the link appears to be up */ - if (!intel_dp_get_link_status(intel_dp, link_status)) { - return; - } + /* + * Now read the DPCD to see if it's actually running + * If the current value of sink count doesn't match with + * the value that was stored earlier or dpcd read failed + * we need to do full detection + */ + ret = intel_dp_get_dpcd(intel_dp); - /* Now read the DPCD to see if it's actually running */ - if (!intel_dp_get_dpcd(intel_dp)) { - return; + if ((old_sink_count != intel_dp->sink_count) || !ret) { + /* No need to proceed if we are going to do full detect */ + return false; } /* Try to read the source of the interrupt */ @@ -4348,14 +4292,11 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); } - /* if link training is requested we should perform it always */ - if ((intel_dp->compliance_test_type == DP_TEST_LINK_TRAINING) || - (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) { - DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", - intel_encoder->base.name); - intel_dp_start_link_train(intel_dp); - intel_dp_stop_link_train(intel_dp); - } + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + intel_dp_check_link_status(intel_dp); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return true; } /* XXX this is probably wrong for multiple downstream ports */ @@ -4375,14 +4316,9 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp) /* If we're HPD-aware, SINK_COUNT changes dynamically */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) { - uint8_t reg; - if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT, - ®, 1) < 0) - return connector_status_unknown; - - return DP_GET_SINK_COUNT(reg) ? connector_status_connected - : connector_status_disconnected; + return intel_dp->sink_count ? + connector_status_connected : connector_status_disconnected; } /* If no HPD, poke DDC gently */ @@ -4591,6 +4527,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp) struct intel_connector *intel_connector = intel_dp->attached_connector; struct edid *edid; + intel_dp_unset_edid(intel_dp); edid = intel_dp_get_edid(intel_dp); intel_connector->detect_edid = edid; @@ -4611,9 +4548,10 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) intel_dp->has_audio = false; } -static enum drm_connector_status -intel_dp_detect(struct drm_connector *connector, bool force) +static void +intel_dp_long_pulse(struct intel_connector *intel_connector) { + struct drm_connector *connector = &intel_connector->base; struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct intel_encoder *intel_encoder = &intel_dig_port->base; @@ -4623,17 +4561,6 @@ intel_dp_detect(struct drm_connector *connector, bool force) bool ret; u8 sink_irq_vector; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", - connector->base.id, connector->name); - intel_dp_unset_edid(intel_dp); - - if (intel_dp->is_mst) { - /* MST devices are disconnected from a monitor POV */ - if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; - return connector_status_disconnected; - } - power_domain = intel_display_port_aux_power_domain(intel_encoder); intel_display_power_get(to_i915(dev), power_domain); @@ -4654,16 +4581,30 @@ intel_dp_detect(struct drm_connector *connector, bool force) goto out; } + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + intel_dp_probe_oui(intel_dp); ret = intel_dp_probe_mst(intel_dp); if (ret) { - /* if we are in MST mode then this connector - won't appear connected or have anything with EDID on it */ - if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + /* + * If we are in MST mode then this connector + * won't appear connected or have anything + * with EDID on it + */ status = connector_status_disconnected; goto out; + } else if (connector->status == connector_status_connected) { + /* + * If display was connected already and is still connected + * check links status, there has been known issues of + * link loss triggerring long pulse!!!! + */ + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + intel_dp_check_link_status(intel_dp); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + goto out; } /* @@ -4676,9 +4617,8 @@ intel_dp_detect(struct drm_connector *connector, bool force) intel_dp_set_edid(intel_dp); - if (intel_encoder->type != INTEL_OUTPUT_EDP) - intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; status = connector_status_connected; + intel_dp->detect_done = true; /* Try to read the source of the interrupt */ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && @@ -4695,8 +4635,54 @@ intel_dp_detect(struct drm_connector *connector, bool force) } out: + if (status != connector_status_connected) { + intel_dp_unset_edid(intel_dp); + /* + * If we were in MST mode, and device is not there, + * get out of MST mode + */ + if (intel_dp->is_mst) { + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); + } + } + intel_display_power_put(to_i915(dev), power_domain); - return status; + return; +} + +static enum drm_connector_status +intel_dp_detect(struct drm_connector *connector, bool force) +{ + struct intel_dp *intel_dp = intel_attached_dp(connector); + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct intel_encoder *intel_encoder = &intel_dig_port->base; + struct intel_connector *intel_connector = to_intel_connector(connector); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", + connector->base.id, connector->name); + + if (intel_dp->is_mst) { + /* MST devices are disconnected from a monitor POV */ + intel_dp_unset_edid(intel_dp); + if (intel_encoder->type != INTEL_OUTPUT_EDP) + intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; + return connector_status_disconnected; + } + + /* If full detect is not performed yet, do a full detect */ + if (!intel_dp->detect_done) + intel_dp_long_pulse(intel_dp->attached_connector); + + intel_dp->detect_done = false; + + if (intel_connector->detect_edid) + return connector_status_connected; + else + return connector_status_disconnected; } static void @@ -5023,44 +5009,37 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) /* indicate that we need to restart link training */ intel_dp->train_set_valid = false; - if (!intel_digital_port_connected(dev_priv, intel_dig_port)) - goto mst_fail; + intel_dp_long_pulse(intel_dp->attached_connector); + if (intel_dp->is_mst) + ret = IRQ_HANDLED; + goto put_power; - if (!intel_dp_get_dpcd(intel_dp)) { - goto mst_fail; - } - - intel_dp_probe_oui(intel_dp); - - if (!intel_dp_probe_mst(intel_dp)) { - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - intel_dp_check_link_status(intel_dp); - drm_modeset_unlock(&dev->mode_config.connection_mutex); - goto mst_fail; - } } else { if (intel_dp->is_mst) { - if (intel_dp_check_mst_status(intel_dp) == -EINVAL) - goto mst_fail; + if (intel_dp_check_mst_status(intel_dp) == -EINVAL) { + /* + * If we were in MST mode, and device is not + * there, get out of MST mode + */ + DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", + intel_dp->is_mst, intel_dp->mst_mgr.mst_state); + intel_dp->is_mst = false; + drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, + intel_dp->is_mst); + goto put_power; + } } if (!intel_dp->is_mst) { - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - intel_dp_check_link_status(intel_dp); - drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (!intel_dp_short_pulse(intel_dp)) { + intel_dp_long_pulse(intel_dp->attached_connector); + goto put_power; + } } } ret = IRQ_HANDLED; - goto put_power; -mst_fail: - /* if we were in MST mode, and device is not there get out of MST mode */ - if (intel_dp->is_mst) { - DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state); - intel_dp->is_mst = false; - drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); - } put_power: intel_display_power_put(dev_priv, power_domain); @@ -5071,14 +5050,6 @@ put_power: bool intel_dp_is_edp(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; - union child_device_config *p_child; - int i; - static const short port_mapping[] = { - [PORT_B] = DVO_PORT_DPB, - [PORT_C] = DVO_PORT_DPC, - [PORT_D] = DVO_PORT_DPD, - [PORT_E] = DVO_PORT_DPE, - }; /* * eDP not supported on g4x. so bail out early just @@ -5090,18 +5061,7 @@ bool intel_dp_is_edp(struct drm_device *dev, enum port port) if (port == PORT_A) return true; - if (!dev_priv->vbt.child_dev_num) - return false; - - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - p_child = dev_priv->vbt.child_dev + i; - - if (p_child->common.dvo_port == port_mapping[port] && - (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) == - (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) - return true; - } - return false; + return intel_bios_is_port_edp(dev_priv, port); } void @@ -5208,7 +5168,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); - vbt = dev_priv->vbt.edp_pps; + vbt = dev_priv->vbt.edp.pps; /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of * our hw here, which are all in 100usec. */ @@ -5259,7 +5219,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, { struct drm_i915_private *dev_priv = dev->dev_private; u32 pp_on, pp_off, pp_div, port_sel = 0; - int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); + int div = dev_priv->rawclk_freq / 1000; i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg; enum port port = dp_to_dig_port(intel_dp)->port; const struct edp_power_seq *seq = &intel_dp->pps_delays; @@ -5852,19 +5812,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, /* intel_dp vfuncs */ if (INTEL_INFO(dev)->gen >= 9) intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider; - else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) - intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; else if (HAS_PCH_SPLIT(dev)) intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider; else - intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; + intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider; if (INTEL_INFO(dev)->gen >= 9) intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl; else - intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; + intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl; if (HAS_DDI(dev)) intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index a2bd698fe2f7..94b4e833dadd 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -33,7 +33,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_device *dev = encoder->base.dev; struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; @@ -92,9 +91,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->dp_m_n.tu = slots; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - hsw_dp_set_ddi_pll_sel(pipe_config); - return true; } @@ -506,6 +502,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct intel_connector *intel_connector = to_intel_connector(connector); struct drm_device *dev = connector->dev; + intel_connector->unregister(intel_connector); + /* need to nuke the connector */ drm_modeset_lock_all(dev); if (connector->state->crtc) { @@ -519,11 +517,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr, WARN(ret, "Disabling mst crtc failed with %i\n", ret); } - drm_modeset_unlock_all(dev); - - intel_connector->unregister(intel_connector); - drm_modeset_lock_all(dev); intel_connector_remove_from_fbdev(intel_connector); drm_connector_cleanup(connector); drm_modeset_unlock_all(dev); diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c new file mode 100644 index 000000000000..0bde6a4259fd --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c @@ -0,0 +1,1794 @@ +/* + * Copyright © 2006-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "intel_drv.h" + +struct intel_shared_dpll * +intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, + enum intel_dpll_id id) +{ + return &dev_priv->shared_dplls[id]; +} + +enum intel_dpll_id +intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + if (WARN_ON(pll < dev_priv->shared_dplls|| + pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll])) + return -1; + + return (enum intel_dpll_id) (pll - dev_priv->shared_dplls); +} + +void +intel_shared_dpll_config_get(struct intel_shared_dpll_config *config, + struct intel_shared_dpll *pll, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll); + + config[id].crtc_mask |= 1 << crtc->pipe; +} + +void +intel_shared_dpll_config_put(struct intel_shared_dpll_config *config, + struct intel_shared_dpll *pll, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum intel_dpll_id id = intel_get_shared_dpll_id(dev_priv, pll); + + config[id].crtc_mask &= ~(1 << crtc->pipe); +} + +/* For ILK+ */ +void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state) +{ + bool cur_state; + struct intel_dpll_hw_state hw_state; + + if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) + return; + + cur_state = pll->funcs.get_hw_state(dev_priv, pll, &hw_state); + I915_STATE_WARN(cur_state != state, + "%s assertion failure (expected %s, current %s)\n", + pll->name, onoff(state), onoff(cur_state)); +} + +void intel_prepare_shared_dpll(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_shared_dpll *pll = crtc->config->shared_dpll; + + if (WARN_ON(pll == NULL)) + return; + + mutex_lock(&dev_priv->dpll_lock); + WARN_ON(!pll->config.crtc_mask); + if (!pll->active_mask) { + DRM_DEBUG_DRIVER("setting up %s\n", pll->name); + WARN_ON(pll->on); + assert_shared_dpll_disabled(dev_priv, pll); + + pll->funcs.mode_set(dev_priv, pll); + } + mutex_unlock(&dev_priv->dpll_lock); +} + +/** + * intel_enable_shared_dpll - enable PCH PLL + * @dev_priv: i915 private structure + * @pipe: pipe PLL to enable + * + * The PCH PLL needs to be enabled before the PCH transcoder, since it + * drives the transcoder clock. + */ +void intel_enable_shared_dpll(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_shared_dpll *pll = crtc->config->shared_dpll; + unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); + unsigned old_mask; + + if (WARN_ON(pll == NULL)) + return; + + mutex_lock(&dev_priv->dpll_lock); + old_mask = pll->active_mask; + + if (WARN_ON(!(pll->config.crtc_mask & crtc_mask)) || + WARN_ON(pll->active_mask & crtc_mask)) + goto out; + + pll->active_mask |= crtc_mask; + + DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n", + pll->name, pll->active_mask, pll->on, + crtc->base.base.id); + + if (old_mask) { + WARN_ON(!pll->on); + assert_shared_dpll_enabled(dev_priv, pll); + goto out; + } + WARN_ON(pll->on); + + DRM_DEBUG_KMS("enabling %s\n", pll->name); + pll->funcs.enable(dev_priv, pll); + pll->on = true; + +out: + mutex_unlock(&dev_priv->dpll_lock); +} + +void intel_disable_shared_dpll(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_shared_dpll *pll = crtc->config->shared_dpll; + unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); + + /* PCH only available on ILK+ */ + if (INTEL_INFO(dev)->gen < 5) + return; + + if (pll == NULL) + return; + + mutex_lock(&dev_priv->dpll_lock); + if (WARN_ON(!(pll->active_mask & crtc_mask))) + goto out; + + DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n", + pll->name, pll->active_mask, pll->on, + crtc->base.base.id); + + assert_shared_dpll_enabled(dev_priv, pll); + WARN_ON(!pll->on); + + pll->active_mask &= ~crtc_mask; + if (pll->active_mask) + goto out; + + DRM_DEBUG_KMS("disabling %s\n", pll->name); + pll->funcs.disable(dev_priv, pll); + pll->on = false; + +out: + mutex_unlock(&dev_priv->dpll_lock); +} + +static struct intel_shared_dpll * +intel_find_shared_dpll(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + enum intel_dpll_id range_min, + enum intel_dpll_id range_max) +{ + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; + struct intel_shared_dpll *pll; + struct intel_shared_dpll_config *shared_dpll; + enum intel_dpll_id i; + + shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); + + for (i = range_min; i <= range_max; i++) { + pll = &dev_priv->shared_dplls[i]; + + /* Only want to check enabled timings first */ + if (shared_dpll[i].crtc_mask == 0) + continue; + + if (memcmp(&crtc_state->dpll_hw_state, + &shared_dpll[i].hw_state, + sizeof(crtc_state->dpll_hw_state)) == 0) { + DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", + crtc->base.base.id, pll->name, + shared_dpll[i].crtc_mask, + pll->active_mask); + return pll; + } + } + + /* Ok no matching timings, maybe there's a free one? */ + for (i = range_min; i <= range_max; i++) { + pll = &dev_priv->shared_dplls[i]; + if (shared_dpll[i].crtc_mask == 0) { + DRM_DEBUG_KMS("CRTC:%d allocated %s\n", + crtc->base.base.id, pll->name); + return pll; + } + } + + return NULL; +} + +static void +intel_reference_shared_dpll(struct intel_shared_dpll *pll, + struct intel_crtc_state *crtc_state) +{ + struct intel_shared_dpll_config *shared_dpll; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + enum intel_dpll_id i = pll->id; + + shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); + + if (shared_dpll[i].crtc_mask == 0) + shared_dpll[i].hw_state = + crtc_state->dpll_hw_state; + + crtc_state->shared_dpll = pll; + DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, + pipe_name(crtc->pipe)); + + intel_shared_dpll_config_get(shared_dpll, pll, crtc); +} + +void intel_shared_dpll_commit(struct drm_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->dev); + struct intel_shared_dpll_config *shared_dpll; + struct intel_shared_dpll *pll; + enum intel_dpll_id i; + + if (!to_intel_atomic_state(state)->dpll_set) + return; + + shared_dpll = to_intel_atomic_state(state)->shared_dpll; + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + pll = &dev_priv->shared_dplls[i]; + pll->config = shared_dpll[i]; + } +} + +static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + val = I915_READ(PCH_DPLL(pll->id)); + hw_state->dpll = val; + hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); + hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return val & DPLL_VCO_ENABLE; +} + +static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); + I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); +} + +static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) +{ + u32 val; + bool enabled; + + I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))); + + val = I915_READ(PCH_DREF_CONTROL); + enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | + DREF_SUPERSPREAD_SOURCE_MASK)); + I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); +} + +static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + /* PCH refclock must be enabled first */ + ibx_assert_pch_refclk_enabled(dev_priv); + + I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(PCH_DPLL(pll->id)); + udelay(150); + + /* The pixel multiplier can only be updated once the + * DPLL is enabled and the clocks are stable. + * + * So write it again. + */ + I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); + POSTING_READ(PCH_DPLL(pll->id)); + udelay(200); +} + +static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + struct drm_device *dev = dev_priv->dev; + struct intel_crtc *crtc; + + /* Make sure no transcoder isn't still depending on us. */ + for_each_intel_crtc(dev, crtc) { + if (crtc->config->shared_dpll == pll) + assert_pch_transcoder_disabled(dev_priv, crtc->pipe); + } + + I915_WRITE(PCH_DPLL(pll->id), 0); + POSTING_READ(PCH_DPLL(pll->id)); + udelay(200); +} + +static struct intel_shared_dpll * +ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll; + enum intel_dpll_id i; + + if (HAS_PCH_IBX(dev_priv)) { + /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ + i = (enum intel_dpll_id) crtc->pipe; + pll = &dev_priv->shared_dplls[i]; + + DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", + crtc->base.base.id, pll->name); + } else { + pll = intel_find_shared_dpll(crtc, crtc_state, + DPLL_ID_PCH_PLL_A, + DPLL_ID_PCH_PLL_B); + } + + /* reference the pll */ + intel_reference_shared_dpll(pll, crtc_state); + + return pll; +} + +static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = { + .mode_set = ibx_pch_dpll_mode_set, + .enable = ibx_pch_dpll_enable, + .disable = ibx_pch_dpll_disable, + .get_hw_state = ibx_pch_dpll_get_hw_state, +}; + +static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll); + POSTING_READ(WRPLL_CTL(pll->id)); + udelay(20); +} + +static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + I915_WRITE(SPLL_CTL, pll->config.hw_state.spll); + POSTING_READ(SPLL_CTL); + udelay(20); +} + +static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + + val = I915_READ(WRPLL_CTL(pll->id)); + I915_WRITE(WRPLL_CTL(pll->id), val & ~WRPLL_PLL_ENABLE); + POSTING_READ(WRPLL_CTL(pll->id)); +} + +static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + + val = I915_READ(SPLL_CTL); + I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE); + POSTING_READ(SPLL_CTL); +} + +static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + val = I915_READ(WRPLL_CTL(pll->id)); + hw_state->wrpll = val; + + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return val & WRPLL_PLL_ENABLE; +} + +static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + val = I915_READ(SPLL_CTL); + hw_state->spll = val; + + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return val & SPLL_PLL_ENABLE; +} + +static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll) +{ + switch (pll->id) { + case DPLL_ID_WRPLL1: + return PORT_CLK_SEL_WRPLL1; + case DPLL_ID_WRPLL2: + return PORT_CLK_SEL_WRPLL2; + case DPLL_ID_SPLL: + return PORT_CLK_SEL_SPLL; + case DPLL_ID_LCPLL_810: + return PORT_CLK_SEL_LCPLL_810; + case DPLL_ID_LCPLL_1350: + return PORT_CLK_SEL_LCPLL_1350; + case DPLL_ID_LCPLL_2700: + return PORT_CLK_SEL_LCPLL_2700; + default: + return PORT_CLK_SEL_NONE; + } +} + +#define LC_FREQ 2700 +#define LC_FREQ_2K U64_C(LC_FREQ * 2000) + +#define P_MIN 2 +#define P_MAX 64 +#define P_INC 2 + +/* Constraints for PLL good behavior */ +#define REF_MIN 48 +#define REF_MAX 400 +#define VCO_MIN 2400 +#define VCO_MAX 4800 + +struct hsw_wrpll_rnp { + unsigned p, n2, r2; +}; + +static unsigned hsw_wrpll_get_budget_for_freq(int clock) +{ + unsigned budget; + + switch (clock) { + case 25175000: + case 25200000: + case 27000000: + case 27027000: + case 37762500: + case 37800000: + case 40500000: + case 40541000: + case 54000000: + case 54054000: + case 59341000: + case 59400000: + case 72000000: + case 74176000: + case 74250000: + case 81000000: + case 81081000: + case 89012000: + case 89100000: + case 108000000: + case 108108000: + case 111264000: + case 111375000: + case 148352000: + case 148500000: + case 162000000: + case 162162000: + case 222525000: + case 222750000: + case 296703000: + case 297000000: + budget = 0; + break; + case 233500000: + case 245250000: + case 247750000: + case 253250000: + case 298000000: + budget = 1500; + break; + case 169128000: + case 169500000: + case 179500000: + case 202000000: + budget = 2000; + break; + case 256250000: + case 262500000: + case 270000000: + case 272500000: + case 273750000: + case 280750000: + case 281250000: + case 286000000: + case 291750000: + budget = 4000; + break; + case 267250000: + case 268500000: + budget = 5000; + break; + default: + budget = 1000; + break; + } + + return budget; +} + +static void hsw_wrpll_update_rnp(uint64_t freq2k, unsigned budget, + unsigned r2, unsigned n2, unsigned p, + struct hsw_wrpll_rnp *best) +{ + uint64_t a, b, c, d, diff, diff_best; + + /* No best (r,n,p) yet */ + if (best->p == 0) { + best->p = p; + best->n2 = n2; + best->r2 = r2; + return; + } + + /* + * Output clock is (LC_FREQ_2K / 2000) * N / (P * R), which compares to + * freq2k. + * + * delta = 1e6 * + * abs(freq2k - (LC_FREQ_2K * n2/(p * r2))) / + * freq2k; + * + * and we would like delta <= budget. + * + * If the discrepancy is above the PPM-based budget, always prefer to + * improve upon the previous solution. However, if you're within the + * budget, try to maximize Ref * VCO, that is N / (P * R^2). + */ + a = freq2k * budget * p * r2; + b = freq2k * budget * best->p * best->r2; + diff = abs_diff(freq2k * p * r2, LC_FREQ_2K * n2); + diff_best = abs_diff(freq2k * best->p * best->r2, + LC_FREQ_2K * best->n2); + c = 1000000 * diff; + d = 1000000 * diff_best; + + if (a < c && b < d) { + /* If both are above the budget, pick the closer */ + if (best->p * best->r2 * diff < p * r2 * diff_best) { + best->p = p; + best->n2 = n2; + best->r2 = r2; + } + } else if (a >= c && b < d) { + /* If A is below the threshold but B is above it? Update. */ + best->p = p; + best->n2 = n2; + best->r2 = r2; + } else if (a >= c && b >= d) { + /* Both are below the limit, so pick the higher n2/(r2*r2) */ + if (n2 * best->r2 * best->r2 > best->n2 * r2 * r2) { + best->p = p; + best->n2 = n2; + best->r2 = r2; + } + } + /* Otherwise a < c && b >= d, do nothing */ +} + +static void +hsw_ddi_calculate_wrpll(int clock /* in Hz */, + unsigned *r2_out, unsigned *n2_out, unsigned *p_out) +{ + uint64_t freq2k; + unsigned p, n2, r2; + struct hsw_wrpll_rnp best = { 0, 0, 0 }; + unsigned budget; + + freq2k = clock / 100; + + budget = hsw_wrpll_get_budget_for_freq(clock); + + /* Special case handling for 540 pixel clock: bypass WR PLL entirely + * and directly pass the LC PLL to it. */ + if (freq2k == 5400000) { + *n2_out = 2; + *p_out = 1; + *r2_out = 2; + return; + } + + /* + * Ref = LC_FREQ / R, where Ref is the actual reference input seen by + * the WR PLL. + * + * We want R so that REF_MIN <= Ref <= REF_MAX. + * Injecting R2 = 2 * R gives: + * REF_MAX * r2 > LC_FREQ * 2 and + * REF_MIN * r2 < LC_FREQ * 2 + * + * Which means the desired boundaries for r2 are: + * LC_FREQ * 2 / REF_MAX < r2 < LC_FREQ * 2 / REF_MIN + * + */ + for (r2 = LC_FREQ * 2 / REF_MAX + 1; + r2 <= LC_FREQ * 2 / REF_MIN; + r2++) { + + /* + * VCO = N * Ref, that is: VCO = N * LC_FREQ / R + * + * Once again we want VCO_MIN <= VCO <= VCO_MAX. + * Injecting R2 = 2 * R and N2 = 2 * N, we get: + * VCO_MAX * r2 > n2 * LC_FREQ and + * VCO_MIN * r2 < n2 * LC_FREQ) + * + * Which means the desired boundaries for n2 are: + * VCO_MIN * r2 / LC_FREQ < n2 < VCO_MAX * r2 / LC_FREQ + */ + for (n2 = VCO_MIN * r2 / LC_FREQ + 1; + n2 <= VCO_MAX * r2 / LC_FREQ; + n2++) { + + for (p = P_MIN; p <= P_MAX; p += P_INC) + hsw_wrpll_update_rnp(freq2k, budget, + r2, n2, p, &best); + } + } + + *n2_out = best.n2; + *p_out = best.p; + *r2_out = best.r2; +} + +static struct intel_shared_dpll * +hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll; + int clock = crtc_state->port_clock; + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (encoder->type == INTEL_OUTPUT_HDMI) { + uint32_t val; + unsigned p, n2, r2; + + hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p); + + val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL | + WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | + WRPLL_DIVIDER_POST(p); + + crtc_state->dpll_hw_state.wrpll = val; + + pll = intel_find_shared_dpll(crtc, crtc_state, + DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); + + } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + encoder->type == INTEL_OUTPUT_DP_MST || + encoder->type == INTEL_OUTPUT_EDP) { + enum intel_dpll_id pll_id; + + switch (clock / 2) { + case 81000: + pll_id = DPLL_ID_LCPLL_810; + break; + case 135000: + pll_id = DPLL_ID_LCPLL_1350; + break; + case 270000: + pll_id = DPLL_ID_LCPLL_2700; + break; + default: + DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock); + return NULL; + } + + pll = intel_get_shared_dpll_by_id(dev_priv, pll_id); + + } else if (encoder->type == INTEL_OUTPUT_ANALOG) { + if (WARN_ON(crtc_state->port_clock / 2 != 135000)) + return NULL; + + crtc_state->dpll_hw_state.spll = + SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC; + + pll = intel_find_shared_dpll(crtc, crtc_state, + DPLL_ID_SPLL, DPLL_ID_SPLL); + } else { + return NULL; + } + + if (!pll) + return NULL; + + crtc_state->ddi_pll_sel = hsw_pll_to_ddi_pll_sel(pll); + + intel_reference_shared_dpll(pll, crtc_state); + + return pll; +} + + +static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = { + .enable = hsw_ddi_wrpll_enable, + .disable = hsw_ddi_wrpll_disable, + .get_hw_state = hsw_ddi_wrpll_get_hw_state, +}; + +static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = { + .enable = hsw_ddi_spll_enable, + .disable = hsw_ddi_spll_disable, + .get_hw_state = hsw_ddi_spll_get_hw_state, +}; + +static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ +} + +static void hsw_ddi_lcpll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ +} + +static bool hsw_ddi_lcpll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + return true; +} + +static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = { + .enable = hsw_ddi_lcpll_enable, + .disable = hsw_ddi_lcpll_disable, + .get_hw_state = hsw_ddi_lcpll_get_hw_state, +}; + +struct skl_dpll_regs { + i915_reg_t ctl, cfgcr1, cfgcr2; +}; + +/* this array is indexed by the *shared* pll id */ +static const struct skl_dpll_regs skl_dpll_regs[4] = { + { + /* DPLL 0 */ + .ctl = LCPLL1_CTL, + /* DPLL 0 doesn't support HDMI mode */ + }, + { + /* DPLL 1 */ + .ctl = LCPLL2_CTL, + .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), + .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), + }, + { + /* DPLL 2 */ + .ctl = WRPLL_CTL(0), + .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), + .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), + }, + { + /* DPLL 3 */ + .ctl = WRPLL_CTL(1), + .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), + .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), + }, +}; + +static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t val; + + val = I915_READ(DPLL_CTRL1); + + val &= ~(DPLL_CTRL1_HDMI_MODE(pll->id) | DPLL_CTRL1_SSC(pll->id) | + DPLL_CTRL1_LINK_RATE_MASK(pll->id)); + val |= pll->config.hw_state.ctrl1 << (pll->id * 6); + + I915_WRITE(DPLL_CTRL1, val); + POSTING_READ(DPLL_CTRL1); +} + +static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + const struct skl_dpll_regs *regs = skl_dpll_regs; + + skl_ddi_pll_write_ctrl1(dev_priv, pll); + + I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1); + I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2); + POSTING_READ(regs[pll->id].cfgcr1); + POSTING_READ(regs[pll->id].cfgcr2); + + /* the enable bit is always bit 31 */ + I915_WRITE(regs[pll->id].ctl, + I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); + + if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5)) + DRM_ERROR("DPLL %d not locked\n", pll->id); +} + +static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + skl_ddi_pll_write_ctrl1(dev_priv, pll); +} + +static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + const struct skl_dpll_regs *regs = skl_dpll_regs; + + /* the enable bit is always bit 31 */ + I915_WRITE(regs[pll->id].ctl, + I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE); + POSTING_READ(regs[pll->id].ctl); +} + +static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ +} + +static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + const struct skl_dpll_regs *regs = skl_dpll_regs; + bool ret; + + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + ret = false; + + val = I915_READ(regs[pll->id].ctl); + if (!(val & LCPLL_PLL_ENABLE)) + goto out; + + val = I915_READ(DPLL_CTRL1); + hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; + + /* avoid reading back stale values if HDMI mode is not enabled */ + if (val & DPLL_CTRL1_HDMI_MODE(pll->id)) { + hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1); + hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2); + } + ret = true; + +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return ret; +} + +static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + uint32_t val; + const struct skl_dpll_regs *regs = skl_dpll_regs; + bool ret; + + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + ret = false; + + /* DPLL0 is always enabled since it drives CDCLK */ + val = I915_READ(regs[pll->id].ctl); + if (WARN_ON(!(val & LCPLL_PLL_ENABLE))) + goto out; + + val = I915_READ(DPLL_CTRL1); + hw_state->ctrl1 = (val >> (pll->id * 6)) & 0x3f; + + ret = true; + +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return ret; +} + +struct skl_wrpll_context { + uint64_t min_deviation; /* current minimal deviation */ + uint64_t central_freq; /* chosen central freq */ + uint64_t dco_freq; /* chosen dco freq */ + unsigned int p; /* chosen divider */ +}; + +static void skl_wrpll_context_init(struct skl_wrpll_context *ctx) +{ + memset(ctx, 0, sizeof(*ctx)); + + ctx->min_deviation = U64_MAX; +} + +/* DCO freq must be within +1%/-6% of the DCO central freq */ +#define SKL_DCO_MAX_PDEVIATION 100 +#define SKL_DCO_MAX_NDEVIATION 600 + +static void skl_wrpll_try_divider(struct skl_wrpll_context *ctx, + uint64_t central_freq, + uint64_t dco_freq, + unsigned int divider) +{ + uint64_t deviation; + + deviation = div64_u64(10000 * abs_diff(dco_freq, central_freq), + central_freq); + + /* positive deviation */ + if (dco_freq >= central_freq) { + if (deviation < SKL_DCO_MAX_PDEVIATION && + deviation < ctx->min_deviation) { + ctx->min_deviation = deviation; + ctx->central_freq = central_freq; + ctx->dco_freq = dco_freq; + ctx->p = divider; + } + /* negative deviation */ + } else if (deviation < SKL_DCO_MAX_NDEVIATION && + deviation < ctx->min_deviation) { + ctx->min_deviation = deviation; + ctx->central_freq = central_freq; + ctx->dco_freq = dco_freq; + ctx->p = divider; + } +} + +static void skl_wrpll_get_multipliers(unsigned int p, + unsigned int *p0 /* out */, + unsigned int *p1 /* out */, + unsigned int *p2 /* out */) +{ + /* even dividers */ + if (p % 2 == 0) { + unsigned int half = p / 2; + + if (half == 1 || half == 2 || half == 3 || half == 5) { + *p0 = 2; + *p1 = 1; + *p2 = half; + } else if (half % 2 == 0) { + *p0 = 2; + *p1 = half / 2; + *p2 = 2; + } else if (half % 3 == 0) { + *p0 = 3; + *p1 = half / 3; + *p2 = 2; + } else if (half % 7 == 0) { + *p0 = 7; + *p1 = half / 7; + *p2 = 2; + } + } else if (p == 3 || p == 9) { /* 3, 5, 7, 9, 15, 21, 35 */ + *p0 = 3; + *p1 = 1; + *p2 = p / 3; + } else if (p == 5 || p == 7) { + *p0 = p; + *p1 = 1; + *p2 = 1; + } else if (p == 15) { + *p0 = 3; + *p1 = 1; + *p2 = 5; + } else if (p == 21) { + *p0 = 7; + *p1 = 1; + *p2 = 3; + } else if (p == 35) { + *p0 = 7; + *p1 = 1; + *p2 = 5; + } +} + +struct skl_wrpll_params { + uint32_t dco_fraction; + uint32_t dco_integer; + uint32_t qdiv_ratio; + uint32_t qdiv_mode; + uint32_t kdiv; + uint32_t pdiv; + uint32_t central_freq; +}; + +static void skl_wrpll_params_populate(struct skl_wrpll_params *params, + uint64_t afe_clock, + uint64_t central_freq, + uint32_t p0, uint32_t p1, uint32_t p2) +{ + uint64_t dco_freq; + + switch (central_freq) { + case 9600000000ULL: + params->central_freq = 0; + break; + case 9000000000ULL: + params->central_freq = 1; + break; + case 8400000000ULL: + params->central_freq = 3; + } + + switch (p0) { + case 1: + params->pdiv = 0; + break; + case 2: + params->pdiv = 1; + break; + case 3: + params->pdiv = 2; + break; + case 7: + params->pdiv = 4; + break; + default: + WARN(1, "Incorrect PDiv\n"); + } + + switch (p2) { + case 5: + params->kdiv = 0; + break; + case 2: + params->kdiv = 1; + break; + case 3: + params->kdiv = 2; + break; + case 1: + params->kdiv = 3; + break; + default: + WARN(1, "Incorrect KDiv\n"); + } + + params->qdiv_ratio = p1; + params->qdiv_mode = (params->qdiv_ratio == 1) ? 0 : 1; + + dco_freq = p0 * p1 * p2 * afe_clock; + + /* + * Intermediate values are in Hz. + * Divide by MHz to match bsepc + */ + params->dco_integer = div_u64(dco_freq, 24 * MHz(1)); + params->dco_fraction = + div_u64((div_u64(dco_freq, 24) - + params->dco_integer * MHz(1)) * 0x8000, MHz(1)); +} + +static bool +skl_ddi_calculate_wrpll(int clock /* in Hz */, + struct skl_wrpll_params *wrpll_params) +{ + uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */ + uint64_t dco_central_freq[3] = {8400000000ULL, + 9000000000ULL, + 9600000000ULL}; + static const int even_dividers[] = { 4, 6, 8, 10, 12, 14, 16, 18, 20, + 24, 28, 30, 32, 36, 40, 42, 44, + 48, 52, 54, 56, 60, 64, 66, 68, + 70, 72, 76, 78, 80, 84, 88, 90, + 92, 96, 98 }; + static const int odd_dividers[] = { 3, 5, 7, 9, 15, 21, 35 }; + static const struct { + const int *list; + int n_dividers; + } dividers[] = { + { even_dividers, ARRAY_SIZE(even_dividers) }, + { odd_dividers, ARRAY_SIZE(odd_dividers) }, + }; + struct skl_wrpll_context ctx; + unsigned int dco, d, i; + unsigned int p0, p1, p2; + + skl_wrpll_context_init(&ctx); + + for (d = 0; d < ARRAY_SIZE(dividers); d++) { + for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) { + for (i = 0; i < dividers[d].n_dividers; i++) { + unsigned int p = dividers[d].list[i]; + uint64_t dco_freq = p * afe_clock; + + skl_wrpll_try_divider(&ctx, + dco_central_freq[dco], + dco_freq, + p); + /* + * Skip the remaining dividers if we're sure to + * have found the definitive divider, we can't + * improve a 0 deviation. + */ + if (ctx.min_deviation == 0) + goto skip_remaining_dividers; + } + } + +skip_remaining_dividers: + /* + * If a solution is found with an even divider, prefer + * this one. + */ + if (d == 0 && ctx.p) + break; + } + + if (!ctx.p) { + DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock); + return false; + } + + /* + * gcc incorrectly analyses that these can be used without being + * initialized. To be fair, it's hard to guess. + */ + p0 = p1 = p2 = 0; + skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2); + skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq, + p0, p1, p2); + + return true; +} + +static struct intel_shared_dpll * +skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct intel_shared_dpll *pll; + uint32_t ctrl1, cfgcr1, cfgcr2; + int clock = crtc_state->port_clock; + + /* + * See comment in intel_dpll_hw_state to understand why we always use 0 + * as the DPLL id in this function. + */ + + ctrl1 = DPLL_CTRL1_OVERRIDE(0); + + if (encoder->type == INTEL_OUTPUT_HDMI) { + struct skl_wrpll_params wrpll_params = { 0, }; + + ctrl1 |= DPLL_CTRL1_HDMI_MODE(0); + + if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params)) + return NULL; + + cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE | + DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) | + wrpll_params.dco_integer; + + cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) | + DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) | + DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | + DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | + wrpll_params.central_freq; + } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + encoder->type == INTEL_OUTPUT_DP_MST || + encoder->type == INTEL_OUTPUT_EDP) { + switch (crtc_state->port_clock / 2) { + case 81000: + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0); + break; + case 135000: + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0); + break; + case 270000: + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0); + break; + /* eDP 1.4 rates */ + case 162000: + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); + break; + /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which + results in CDCLK change. Need to handle the change of CDCLK by + disabling pipes and re-enabling them */ + case 108000: + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); + break; + case 216000: + ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, 0); + break; + } + + cfgcr1 = cfgcr2 = 0; + } else { + return NULL; + } + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + crtc_state->dpll_hw_state.ctrl1 = ctrl1; + crtc_state->dpll_hw_state.cfgcr1 = cfgcr1; + crtc_state->dpll_hw_state.cfgcr2 = cfgcr2; + + if (encoder->type == INTEL_OUTPUT_EDP) + pll = intel_find_shared_dpll(crtc, crtc_state, + DPLL_ID_SKL_DPLL0, + DPLL_ID_SKL_DPLL0); + else + pll = intel_find_shared_dpll(crtc, crtc_state, + DPLL_ID_SKL_DPLL1, + DPLL_ID_SKL_DPLL3); + if (!pll) + return NULL; + + crtc_state->ddi_pll_sel = pll->id; + + intel_reference_shared_dpll(pll, crtc_state); + + return pll; +} + +static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = { + .enable = skl_ddi_pll_enable, + .disable = skl_ddi_pll_disable, + .get_hw_state = skl_ddi_pll_get_hw_state, +}; + +static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = { + .enable = skl_ddi_dpll0_enable, + .disable = skl_ddi_dpll0_disable, + .get_hw_state = skl_ddi_dpll0_get_hw_state, +}; + +static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + uint32_t temp; + enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ + + temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + /* + * Definition of each bit polarity has been changed + * after A1 stepping + */ + if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) + temp &= ~PORT_PLL_REF_SEL; + else + temp |= PORT_PLL_REF_SEL; + + /* Non-SSC reference */ + I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + + /* Disable 10 bit clock */ + temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); + temp &= ~PORT_PLL_10BIT_CLK_ENABLE; + I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); + + /* Write P1 & P2 */ + temp = I915_READ(BXT_PORT_PLL_EBB_0(port)); + temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK); + temp |= pll->config.hw_state.ebb0; + I915_WRITE(BXT_PORT_PLL_EBB_0(port), temp); + + /* Write M2 integer */ + temp = I915_READ(BXT_PORT_PLL(port, 0)); + temp &= ~PORT_PLL_M2_MASK; + temp |= pll->config.hw_state.pll0; + I915_WRITE(BXT_PORT_PLL(port, 0), temp); + + /* Write N */ + temp = I915_READ(BXT_PORT_PLL(port, 1)); + temp &= ~PORT_PLL_N_MASK; + temp |= pll->config.hw_state.pll1; + I915_WRITE(BXT_PORT_PLL(port, 1), temp); + + /* Write M2 fraction */ + temp = I915_READ(BXT_PORT_PLL(port, 2)); + temp &= ~PORT_PLL_M2_FRAC_MASK; + temp |= pll->config.hw_state.pll2; + I915_WRITE(BXT_PORT_PLL(port, 2), temp); + + /* Write M2 fraction enable */ + temp = I915_READ(BXT_PORT_PLL(port, 3)); + temp &= ~PORT_PLL_M2_FRAC_ENABLE; + temp |= pll->config.hw_state.pll3; + I915_WRITE(BXT_PORT_PLL(port, 3), temp); + + /* Write coeff */ + temp = I915_READ(BXT_PORT_PLL(port, 6)); + temp &= ~PORT_PLL_PROP_COEFF_MASK; + temp &= ~PORT_PLL_INT_COEFF_MASK; + temp &= ~PORT_PLL_GAIN_CTL_MASK; + temp |= pll->config.hw_state.pll6; + I915_WRITE(BXT_PORT_PLL(port, 6), temp); + + /* Write calibration val */ + temp = I915_READ(BXT_PORT_PLL(port, 8)); + temp &= ~PORT_PLL_TARGET_CNT_MASK; + temp |= pll->config.hw_state.pll8; + I915_WRITE(BXT_PORT_PLL(port, 8), temp); + + temp = I915_READ(BXT_PORT_PLL(port, 9)); + temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK; + temp |= pll->config.hw_state.pll9; + I915_WRITE(BXT_PORT_PLL(port, 9), temp); + + temp = I915_READ(BXT_PORT_PLL(port, 10)); + temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H; + temp &= ~PORT_PLL_DCO_AMP_MASK; + temp |= pll->config.hw_state.pll10; + I915_WRITE(BXT_PORT_PLL(port, 10), temp); + + /* Recalibrate with new settings */ + temp = I915_READ(BXT_PORT_PLL_EBB_4(port)); + temp |= PORT_PLL_RECALIBRATE; + I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); + temp &= ~PORT_PLL_10BIT_CLK_ENABLE; + temp |= pll->config.hw_state.ebb4; + I915_WRITE(BXT_PORT_PLL_EBB_4(port), temp); + + /* Enable PLL */ + temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp |= PORT_PLL_ENABLE; + I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + POSTING_READ(BXT_PORT_PLL_ENABLE(port)); + + if (wait_for_atomic_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & + PORT_PLL_LOCK), 200)) + DRM_ERROR("PLL %d not locked\n", port); + + /* + * While we write to the group register to program all lanes at once we + * can read only lane registers and we pick lanes 0/1 for that. + */ + temp = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); + temp &= ~LANE_STAGGER_MASK; + temp &= ~LANESTAGGER_STRAP_OVRD; + temp |= pll->config.hw_state.pcsdw12; + I915_WRITE(BXT_PORT_PCS_DW12_GRP(port), temp); +} + +static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ + uint32_t temp; + + temp = I915_READ(BXT_PORT_PLL_ENABLE(port)); + temp &= ~PORT_PLL_ENABLE; + I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp); + POSTING_READ(BXT_PORT_PLL_ENABLE(port)); +} + +static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */ + uint32_t val; + bool ret; + + if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) + return false; + + ret = false; + + val = I915_READ(BXT_PORT_PLL_ENABLE(port)); + if (!(val & PORT_PLL_ENABLE)) + goto out; + + hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port)); + hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK; + + hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(port)); + hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE; + + hw_state->pll0 = I915_READ(BXT_PORT_PLL(port, 0)); + hw_state->pll0 &= PORT_PLL_M2_MASK; + + hw_state->pll1 = I915_READ(BXT_PORT_PLL(port, 1)); + hw_state->pll1 &= PORT_PLL_N_MASK; + + hw_state->pll2 = I915_READ(BXT_PORT_PLL(port, 2)); + hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK; + + hw_state->pll3 = I915_READ(BXT_PORT_PLL(port, 3)); + hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE; + + hw_state->pll6 = I915_READ(BXT_PORT_PLL(port, 6)); + hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK | + PORT_PLL_INT_COEFF_MASK | + PORT_PLL_GAIN_CTL_MASK; + + hw_state->pll8 = I915_READ(BXT_PORT_PLL(port, 8)); + hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK; + + hw_state->pll9 = I915_READ(BXT_PORT_PLL(port, 9)); + hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK; + + hw_state->pll10 = I915_READ(BXT_PORT_PLL(port, 10)); + hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H | + PORT_PLL_DCO_AMP_MASK; + + /* + * While we write to the group register to program all lanes at once we + * can read only lane registers. We configure all lanes the same way, so + * here just read out lanes 0/1 and output a note if lanes 2/3 differ. + */ + hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(port)); + if (I915_READ(BXT_PORT_PCS_DW12_LN23(port)) != hw_state->pcsdw12) + DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n", + hw_state->pcsdw12, + I915_READ(BXT_PORT_PCS_DW12_LN23(port))); + hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD; + + ret = true; + +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); + + return ret; +} + +/* bxt clock parameters */ +struct bxt_clk_div { + int clock; + uint32_t p1; + uint32_t p2; + uint32_t m2_int; + uint32_t m2_frac; + bool m2_frac_en; + uint32_t n; +}; + +/* pre-calculated values for DP linkrates */ +static const struct bxt_clk_div bxt_dp_clk_val[] = { + {162000, 4, 2, 32, 1677722, 1, 1}, + {270000, 4, 1, 27, 0, 0, 1}, + {540000, 2, 1, 27, 0, 0, 1}, + {216000, 3, 2, 32, 1677722, 1, 1}, + {243000, 4, 1, 24, 1258291, 1, 1}, + {324000, 4, 1, 32, 1677722, 1, 1}, + {432000, 3, 1, 32, 1677722, 1, 1} +}; + +static struct intel_shared_dpll * +bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_shared_dpll *pll; + enum intel_dpll_id i; + struct intel_digital_port *intel_dig_port; + struct bxt_clk_div clk_div = {0}; + int vco = 0; + uint32_t prop_coef, int_coef, gain_ctl, targ_cnt; + uint32_t lanestagger; + int clock = crtc_state->port_clock; + + if (encoder->type == INTEL_OUTPUT_HDMI) { + intel_clock_t best_clock; + + /* Calculate HDMI div */ + /* + * FIXME: tie the following calculation into + * i9xx_crtc_compute_clock + */ + if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) { + DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n", + clock, pipe_name(crtc->pipe)); + return NULL; + } + + clk_div.p1 = best_clock.p1; + clk_div.p2 = best_clock.p2; + WARN_ON(best_clock.m1 != 2); + clk_div.n = best_clock.n; + clk_div.m2_int = best_clock.m2 >> 22; + clk_div.m2_frac = best_clock.m2 & ((1 << 22) - 1); + clk_div.m2_frac_en = clk_div.m2_frac != 0; + + vco = best_clock.vco; + } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || + encoder->type == INTEL_OUTPUT_EDP) { + int i; + + clk_div = bxt_dp_clk_val[0]; + for (i = 0; i < ARRAY_SIZE(bxt_dp_clk_val); ++i) { + if (bxt_dp_clk_val[i].clock == clock) { + clk_div = bxt_dp_clk_val[i]; + break; + } + } + vco = clock * 10 / 2 * clk_div.p1 * clk_div.p2; + } + + if (vco >= 6200000 && vco <= 6700000) { + prop_coef = 4; + int_coef = 9; + gain_ctl = 3; + targ_cnt = 8; + } else if ((vco > 5400000 && vco < 6200000) || + (vco >= 4800000 && vco < 5400000)) { + prop_coef = 5; + int_coef = 11; + gain_ctl = 3; + targ_cnt = 9; + } else if (vco == 5400000) { + prop_coef = 3; + int_coef = 8; + gain_ctl = 1; + targ_cnt = 9; + } else { + DRM_ERROR("Invalid VCO\n"); + return NULL; + } + + memset(&crtc_state->dpll_hw_state, 0, + sizeof(crtc_state->dpll_hw_state)); + + if (clock > 270000) + lanestagger = 0x18; + else if (clock > 135000) + lanestagger = 0x0d; + else if (clock > 67000) + lanestagger = 0x07; + else if (clock > 33000) + lanestagger = 0x04; + else + lanestagger = 0x02; + + crtc_state->dpll_hw_state.ebb0 = + PORT_PLL_P1(clk_div.p1) | PORT_PLL_P2(clk_div.p2); + crtc_state->dpll_hw_state.pll0 = clk_div.m2_int; + crtc_state->dpll_hw_state.pll1 = PORT_PLL_N(clk_div.n); + crtc_state->dpll_hw_state.pll2 = clk_div.m2_frac; + + if (clk_div.m2_frac_en) + crtc_state->dpll_hw_state.pll3 = + PORT_PLL_M2_FRAC_ENABLE; + + crtc_state->dpll_hw_state.pll6 = + prop_coef | PORT_PLL_INT_COEFF(int_coef); + crtc_state->dpll_hw_state.pll6 |= + PORT_PLL_GAIN_CTL(gain_ctl); + + crtc_state->dpll_hw_state.pll8 = targ_cnt; + + crtc_state->dpll_hw_state.pll9 = 5 << PORT_PLL_LOCK_THRESHOLD_SHIFT; + + crtc_state->dpll_hw_state.pll10 = + PORT_PLL_DCO_AMP(PORT_PLL_DCO_AMP_DEFAULT) + | PORT_PLL_DCO_AMP_OVR_EN_H; + + crtc_state->dpll_hw_state.ebb4 = PORT_PLL_10BIT_CLK_ENABLE; + + crtc_state->dpll_hw_state.pcsdw12 = + LANESTAGGER_STRAP_OVRD | lanestagger; + + intel_dig_port = enc_to_dig_port(&encoder->base); + + /* 1:1 mapping between ports and PLLs */ + i = (enum intel_dpll_id) intel_dig_port->port; + pll = intel_get_shared_dpll_by_id(dev_priv, i); + + DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", + crtc->base.base.id, pll->name); + + intel_reference_shared_dpll(pll, crtc_state); + + /* shared DPLL id 0 is DPLL A */ + crtc_state->ddi_pll_sel = pll->id; + + return pll; +} + +static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = { + .enable = bxt_ddi_pll_enable, + .disable = bxt_ddi_pll_disable, + .get_hw_state = bxt_ddi_pll_get_hw_state, +}; + +static void intel_ddi_pll_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + uint32_t val = I915_READ(LCPLL_CTL); + + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { + int cdclk_freq; + + cdclk_freq = dev_priv->display.get_display_clock_speed(dev); + dev_priv->skl_boot_cdclk = cdclk_freq; + if (skl_sanitize_cdclk(dev_priv)) + DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n"); + if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) + DRM_ERROR("LCPLL1 is disabled\n"); + } else if (IS_BROXTON(dev)) { + broxton_init_cdclk(dev); + broxton_ddi_phy_init(dev); + } else { + /* + * The LCPLL register should be turned on by the BIOS. For now + * let's just check its state and print errors in case + * something is wrong. Don't even try to turn it on. + */ + + if (val & LCPLL_CD_SOURCE_FCLK) + DRM_ERROR("CDCLK source is not LCPLL\n"); + + if (val & LCPLL_PLL_DISABLE) + DRM_ERROR("LCPLL is disabled\n"); + } +} + +struct dpll_info { + const char *name; + const int id; + const struct intel_shared_dpll_funcs *funcs; + uint32_t flags; +}; + +struct intel_dpll_mgr { + const struct dpll_info *dpll_info; + + struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder); +}; + +static const struct dpll_info pch_plls[] = { + { "PCH DPLL A", DPLL_ID_PCH_PLL_A, &ibx_pch_dpll_funcs, 0 }, + { "PCH DPLL B", DPLL_ID_PCH_PLL_B, &ibx_pch_dpll_funcs, 0 }, + { NULL, -1, NULL, 0 }, +}; + +static const struct intel_dpll_mgr pch_pll_mgr = { + .dpll_info = pch_plls, + .get_dpll = ibx_get_dpll, +}; + +static const struct dpll_info hsw_plls[] = { + { "WRPLL 1", DPLL_ID_WRPLL1, &hsw_ddi_wrpll_funcs, 0 }, + { "WRPLL 2", DPLL_ID_WRPLL2, &hsw_ddi_wrpll_funcs, 0 }, + { "SPLL", DPLL_ID_SPLL, &hsw_ddi_spll_funcs, 0 }, + { "LCPLL 810", DPLL_ID_LCPLL_810, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 1350", DPLL_ID_LCPLL_1350, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, + { "LCPLL 2700", DPLL_ID_LCPLL_2700, &hsw_ddi_lcpll_funcs, INTEL_DPLL_ALWAYS_ON }, + { NULL, -1, NULL, }, +}; + +static const struct intel_dpll_mgr hsw_pll_mgr = { + .dpll_info = hsw_plls, + .get_dpll = hsw_get_dpll, +}; + +static const struct dpll_info skl_plls[] = { + { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, + { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, + { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, + { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, + { NULL, -1, NULL, }, +}; + +static const struct intel_dpll_mgr skl_pll_mgr = { + .dpll_info = skl_plls, + .get_dpll = skl_get_dpll, +}; + +static const struct dpll_info bxt_plls[] = { + { "PORT PLL A", DPLL_ID_SKL_DPLL0, &bxt_ddi_pll_funcs, 0 }, + { "PORT PLL B", DPLL_ID_SKL_DPLL1, &bxt_ddi_pll_funcs, 0 }, + { "PORT PLL C", DPLL_ID_SKL_DPLL2, &bxt_ddi_pll_funcs, 0 }, + { NULL, -1, NULL, }, +}; + +static const struct intel_dpll_mgr bxt_pll_mgr = { + .dpll_info = bxt_plls, + .get_dpll = bxt_get_dpll, +}; + +void intel_shared_dpll_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + const struct intel_dpll_mgr *dpll_mgr = NULL; + const struct dpll_info *dpll_info; + int i; + + if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) + dpll_mgr = &skl_pll_mgr; + else if (IS_BROXTON(dev)) + dpll_mgr = &bxt_pll_mgr; + else if (HAS_DDI(dev)) + dpll_mgr = &hsw_pll_mgr; + else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) + dpll_mgr = &pch_pll_mgr; + + if (!dpll_mgr) { + dev_priv->num_shared_dpll = 0; + return; + } + + dpll_info = dpll_mgr->dpll_info; + + for (i = 0; dpll_info[i].id >= 0; i++) { + WARN_ON(i != dpll_info[i].id); + + dev_priv->shared_dplls[i].id = dpll_info[i].id; + dev_priv->shared_dplls[i].name = dpll_info[i].name; + dev_priv->shared_dplls[i].funcs = *dpll_info[i].funcs; + dev_priv->shared_dplls[i].flags = dpll_info[i].flags; + } + + dev_priv->dpll_mgr = dpll_mgr; + dev_priv->num_shared_dpll = i; + mutex_init(&dev_priv->dpll_lock); + + BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); + + /* FIXME: Move this to a more suitable place */ + if (HAS_DDI(dev)) + intel_ddi_pll_init(dev); +} + +struct intel_shared_dpll * +intel_get_shared_dpll(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, + struct intel_encoder *encoder) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr; + + if (WARN_ON(!dpll_mgr)) + return NULL; + + return dpll_mgr->get_dpll(crtc, crtc_state, encoder); +} diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h new file mode 100644 index 000000000000..89c5ada1a315 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h @@ -0,0 +1,164 @@ +/* + * Copyright © 2012-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#ifndef _INTEL_DPLL_MGR_H_ +#define _INTEL_DPLL_MGR_H_ + +/*FIXME: Move this to a more appropriate place. */ +#define abs_diff(a, b) ({ \ + typeof(a) __a = (a); \ + typeof(b) __b = (b); \ + (void) (&__a == &__b); \ + __a > __b ? (__a - __b) : (__b - __a); }) + +struct drm_i915_private; +struct intel_crtc; +struct intel_crtc_state; +struct intel_encoder; + +struct intel_shared_dpll; +struct intel_dpll_mgr; + +enum intel_dpll_id { + DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ + /* real shared dpll ids must be >= 0 */ + DPLL_ID_PCH_PLL_A = 0, + DPLL_ID_PCH_PLL_B = 1, + /* hsw/bdw */ + DPLL_ID_WRPLL1 = 0, + DPLL_ID_WRPLL2 = 1, + DPLL_ID_SPLL = 2, + DPLL_ID_LCPLL_810 = 3, + DPLL_ID_LCPLL_1350 = 4, + DPLL_ID_LCPLL_2700 = 5, + + /* skl */ + DPLL_ID_SKL_DPLL0 = 0, + DPLL_ID_SKL_DPLL1 = 1, + DPLL_ID_SKL_DPLL2 = 2, + DPLL_ID_SKL_DPLL3 = 3, +}; +#define I915_NUM_PLLS 6 + +/** Inform the state checker that the DPLL is kept enabled even if not + * in use by any crtc. + */ +#define INTEL_DPLL_ALWAYS_ON (1 << 0) + +struct intel_dpll_hw_state { + /* i9xx, pch plls */ + uint32_t dpll; + uint32_t dpll_md; + uint32_t fp0; + uint32_t fp1; + + /* hsw, bdw */ + uint32_t wrpll; + uint32_t spll; + + /* skl */ + /* + * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in + * lower part of ctrl1 and they get shifted into position when writing + * the register. This allows us to easily compare the state to share + * the DPLL. + */ + uint32_t ctrl1; + /* HDMI only, 0 when used for DP */ + uint32_t cfgcr1, cfgcr2; + + /* bxt */ + uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, + pcsdw12; +}; + +struct intel_shared_dpll_config { + unsigned crtc_mask; /* mask of CRTCs sharing this PLL */ + struct intel_dpll_hw_state hw_state; +}; + +struct intel_shared_dpll_funcs { + /* The mode_set hook is optional and should be used together with the + * intel_prepare_shared_dpll function. */ + void (*mode_set)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); + void (*enable)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); + void (*disable)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); + bool (*get_hw_state)(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state); +}; + +struct intel_shared_dpll { + struct intel_shared_dpll_config config; + + unsigned active_mask; /* mask of active CRTCs (i.e. DPMS on) */ + bool on; /* is the PLL actually active? Disabled during modeset */ + const char *name; + /* should match the index in the dev_priv->shared_dplls array */ + enum intel_dpll_id id; + + struct intel_shared_dpll_funcs funcs; + + uint32_t flags; +}; + +#define SKL_DPLL0 0 +#define SKL_DPLL1 1 +#define SKL_DPLL2 2 +#define SKL_DPLL3 3 + +/* shared dpll functions */ +struct intel_shared_dpll * +intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv, + enum intel_dpll_id id); +enum intel_dpll_id +intel_get_shared_dpll_id(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll); +void +intel_shared_dpll_config_get(struct intel_shared_dpll_config *config, + struct intel_shared_dpll *pll, + struct intel_crtc *crtc); +void +intel_shared_dpll_config_put(struct intel_shared_dpll_config *config, + struct intel_shared_dpll *pll, + struct intel_crtc *crtc); +void assert_shared_dpll(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + bool state); +#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) +#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) +struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, + struct intel_crtc_state *state, + struct intel_encoder *encoder); +void intel_prepare_shared_dpll(struct intel_crtc *crtc); +void intel_enable_shared_dpll(struct intel_crtc *crtc); +void intel_disable_shared_dpll(struct intel_crtc *crtc); +void intel_shared_dpll_commit(struct drm_atomic_state *state); +void intel_shared_dpll_init(struct drm_device *dev); + + +#endif /* _INTEL_DPLL_MGR_H_ */ diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4c027d69fac9..e0fcfa1683cc 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -44,9 +44,13 @@ * contexts. Note that it's important that we check the condition again after * having timed out, since the timeout could be due to preemption or similar and * we've never had a chance to check the condition before the timeout. + * + * TODO: When modesetting has fully transitioned to atomic, the below + * drm_can_sleep() can be removed and in_atomic()/!in_atomic() asserts + * added. */ -#define _wait_for(COND, MS, W) ({ \ - unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \ +#define _wait_for(COND, US, W) ({ \ + unsigned long timeout__ = jiffies + usecs_to_jiffies(US) + 1; \ int ret__ = 0; \ while (!(COND)) { \ if (time_after(jiffies, timeout__)) { \ @@ -55,7 +59,7 @@ break; \ } \ if ((W) && drm_can_sleep()) { \ - usleep_range((W)*1000, (W)*2000); \ + usleep_range((W), (W)*2); \ } else { \ cpu_relax(); \ } \ @@ -63,10 +67,40 @@ ret__; \ }) -#define wait_for(COND, MS) _wait_for(COND, MS, 1) -#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) -#define wait_for_atomic_us(COND, US) _wait_for((COND), \ - DIV_ROUND_UP((US), 1000), 0) +#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) +#define wait_for_us(COND, US) _wait_for((COND), (US), 1) + +/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ +#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) +# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic()) +#else +# define _WAIT_FOR_ATOMIC_CHECK do { } while (0) +#endif + +#define _wait_for_atomic(COND, US) ({ \ + unsigned long end__; \ + int ret__ = 0; \ + _WAIT_FOR_ATOMIC_CHECK; \ + BUILD_BUG_ON((US) > 50000); \ + end__ = (local_clock() >> 10) + (US) + 1; \ + while (!(COND)) { \ + if (time_after((unsigned long)(local_clock() >> 10), end__)) { \ + /* Unlike the regular wait_for(), this atomic variant \ + * cannot be preempted (and we'll just ignore the issue\ + * of irq interruptions) and so we know that no time \ + * has passed since the last check of COND and can \ + * immediately report the timeout. \ + */ \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + cpu_relax(); \ + } \ + ret__; \ +}) + +#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000) +#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US)) #define KHz(x) (1000 * (x)) #define MHz(x) KHz(1000 * (x)) @@ -118,6 +152,7 @@ enum intel_output_type { struct intel_framebuffer { struct drm_framebuffer base; struct drm_i915_gem_object *obj; + struct intel_rotation_info rot_info; }; struct intel_fbdev { @@ -260,6 +295,12 @@ struct intel_atomic_state { struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; struct intel_wm_config wm_config; + + /* + * Current watermarks can't be trusted during hardware readout, so + * don't bother calculating intermediate watermarks. + */ + bool skip_intermediate_wm; }; struct intel_plane_state { @@ -349,6 +390,7 @@ struct intel_crtc_scaler_state { struct intel_pipe_wm { struct intel_wm_level wm[5]; + struct intel_wm_level raw_wm[5]; uint32_t linetime; bool fbc_wm_enabled; bool pipe_enabled; @@ -376,9 +418,10 @@ struct intel_crtc_state { #define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */ unsigned long quirks; + unsigned fb_bits; /* framebuffers to flip */ bool update_pipe; /* can a fast modeset be performed? */ bool disable_cxsr; - bool wm_changed; /* watermarks are updated */ + bool update_wm_pre, update_wm_post; /* watermarks are updated */ bool fb_changed; /* fb on any of the planes is changed */ /* Pipe source size (ie. panel fitter input size) @@ -394,7 +437,8 @@ struct intel_crtc_state { bool has_infoframe; /* CPU Transcoder for the pipe. Currently this can only differ from the - * pipe on Haswell (where we have a special eDP transcoder). */ + * pipe on Haswell and later (where we have a special eDP transcoder) + * and Broxton (where we have special DSI transcoders). */ enum transcoder cpu_transcoder; /* @@ -441,8 +485,8 @@ struct intel_crtc_state { * haswell. */ struct dpll dpll; - /* Selected dpll when shared or DPLL_ID_PRIVATE. */ - enum intel_dpll_id shared_dpll; + /* Selected dpll when shared or NULL. */ + struct intel_shared_dpll *shared_dpll; /* * - PORT_CLK_SEL for DDI ports on HSW/BDW. @@ -510,14 +554,33 @@ struct intel_crtc_state { struct { /* - * optimal watermarks, programmed post-vblank when this state - * is committed + * Optimal watermarks, programmed post-vblank when this state + * is committed. */ union { struct intel_pipe_wm ilk; struct skl_pipe_wm skl; } optimal; + + /* + * Intermediate watermarks; these can be programmed immediately + * since they satisfy both the current configuration we're + * switching away from and the new configuration we're switching + * to. + */ + struct intel_pipe_wm intermediate; + + /* + * Platforms with two-step watermark programming will need to + * update watermark programming post-vblank to switch from the + * safe intermediate watermarks to the optimal final + * watermarks. + */ + bool need_postvbl_update; } wm; + + /* Gamma mode programmed on the pipe */ + uint32_t gamma_mode; }; struct vlv_wm_state { @@ -537,23 +600,6 @@ struct intel_mmio_flip { unsigned int rotation; }; -/* - * Tracking of operations that need to be performed at the beginning/end of an - * atomic commit, outside the atomic section where interrupts are disabled. - * These are generally operations that grab mutexes or might otherwise sleep - * and thus can't be run with interrupts disabled. - */ -struct intel_crtc_atomic_commit { - /* Sleepable operations to perform before commit */ - - /* Sleepable operations to perform after commit */ - unsigned fb_bits; - bool post_enable_primary; - - /* Sleepable operations to perform before and after commit */ - bool update_fbc; -}; - struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -600,6 +646,7 @@ struct intel_crtc { struct intel_pipe_wm ilk; struct skl_pipe_wm skl; } active; + /* allow CxSR on this pipe */ bool cxsr_allowed; } wm; @@ -613,8 +660,6 @@ struct intel_crtc { int scanline_start; } debug; - struct intel_crtc_atomic_commit atomic; - /* scalers available on this crtc */ int num_scalers; @@ -751,7 +796,9 @@ struct intel_dp { uint32_t DP; int link_rate; uint8_t lane_count; + uint8_t sink_count; bool has_audio; + bool detect_done; enum hdmi_force_audio force_audio; bool limited_color_range; bool color_range_auto; @@ -1007,7 +1054,6 @@ void hsw_fdi_link_train(struct drm_crtc *crtc); void intel_ddi_init(struct drm_device *dev, enum port port); enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder); bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe); -void intel_ddi_pll_init(struct drm_device *dev); void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc); void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder); @@ -1051,17 +1097,19 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, uint64_t fb_modifier, uint32_t pixel_format); /* intel_audio.c */ -void intel_init_audio(struct drm_device *dev); +void intel_init_audio_hooks(struct drm_i915_private *dev_priv); void intel_audio_codec_enable(struct intel_encoder *encoder); void intel_audio_codec_disable(struct intel_encoder *encoder); void i915_audio_component_init(struct drm_i915_private *dev_priv); void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); /* intel_display.c */ +int vlv_get_cck_clock(struct drm_i915_private *dev_priv, + const char *name, u32 reg, int ref_freq); extern const struct drm_plane_funcs intel_plane_funcs; +void intel_init_display_hooks(struct drm_i915_private *dev_priv); +unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); bool intel_has_pending_fb_unpin(struct drm_device *dev); -int intel_pch_rawclk(struct drm_device *dev); -int intel_hrawclk(struct drm_device *dev); void intel_mark_busy(struct drm_device *dev); void intel_mark_idle(struct drm_device *dev); void intel_crtc_restore_mode(struct drm_crtc *crtc); @@ -1106,9 +1154,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, void intel_release_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); -int intel_pin_and_fence_fb_obj(struct drm_plane *plane, - struct drm_framebuffer *fb, - const struct drm_plane_state *plane_state); +int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, + unsigned int rotation); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, @@ -1144,19 +1191,13 @@ intel_rotation_90_or_270(unsigned int rotation) void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane); -/* shared dpll functions */ -struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); -void assert_shared_dpll(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - bool state); -#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true) -#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false) -struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, - struct intel_crtc_state *state); +void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, + enum pipe pipe); int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, const struct dpll *dpll); void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe); +int lpt_get_iclkip(struct drm_i915_private *dev_priv); /* modesetting asserts */ void assert_panel_unlocked(struct drm_i915_private *dev_priv, @@ -1165,6 +1206,9 @@ void assert_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pll_enabled(d, p) assert_pll(d, p, true) #define assert_pll_disabled(d, p) assert_pll(d, p, false) +void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state); +#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) +#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true) @@ -1172,11 +1216,10 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) -u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, - int *x, int *y, - uint64_t fb_modifier, - unsigned int cpp, - unsigned int pitch); +u32 intel_compute_tile_offset(int *x, int *y, + const struct drm_framebuffer *fb, int plane, + unsigned int pitch, + unsigned int rotation); void intel_prepare_reset(struct drm_device *dev); void intel_finish_reset(struct drm_device *dev); void hsw_enable_pc8(struct drm_i915_private *dev_priv); @@ -1196,9 +1239,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); -void -ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, - int dotclock); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, intel_clock_t *best_clock); int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); @@ -1226,7 +1266,7 @@ u32 skl_plane_ctl_rotation(unsigned int rotation); /* intel_csr.c */ void intel_csr_ucode_init(struct drm_i915_private *); -bool intel_csr_load_program(struct drm_i915_private *); +void intel_csr_load_program(struct drm_i915_private *); void intel_csr_ucode_fini(struct drm_i915_private *); /* intel_dp.c */ @@ -1266,7 +1306,6 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits); bool intel_digital_port_connected(struct drm_i915_private *dev_priv, struct intel_digital_port *port); -void hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config); void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, @@ -1541,6 +1580,7 @@ void intel_suspend_hw(struct drm_device *dev); int ilk_wm_max_level(const struct drm_device *dev); void intel_update_watermarks(struct drm_crtc *crtc); void intel_init_pm(struct drm_device *dev); +void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_device *dev); void intel_gpu_ips_init(struct drm_i915_private *dev_priv); void intel_gpu_ips_teardown(void); @@ -1565,6 +1605,7 @@ void skl_wm_get_hw_state(struct drm_device *dev); void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */); uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); +bool ilk_disable_lp_wm(struct drm_device *dev); int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); /* intel_sdvo.c */ @@ -1606,6 +1647,18 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } + +static inline struct intel_plane_state * +intel_atomic_get_existing_plane_state(struct drm_atomic_state *state, + struct intel_plane *plane) +{ + struct drm_plane_state *plane_state; + + plane_state = drm_atomic_get_existing_plane_state(state, &plane->base); + + return to_intel_plane_state(plane_state); +} + int intel_atomic_setup_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); @@ -1617,4 +1670,10 @@ void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; +/* intel_color.c */ +void intel_color_init(struct drm_crtc *crtc); +int intel_color_check(struct drm_crtc *crtc, struct drm_crtc_state *state); +void intel_color_set_csc(struct drm_crtc_state *crtc_state); +void intel_color_load_luts(struct drm_crtc_state *crtc_state); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index 01b8e9f4c272..9ff6435e7d38 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -46,6 +46,24 @@ static const struct { }, }; +enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) +{ + /* It just so happens the VBT matches register contents. */ + switch (fmt) { + case VID_MODE_FORMAT_RGB888: + return MIPI_DSI_FMT_RGB888; + case VID_MODE_FORMAT_RGB666: + return MIPI_DSI_FMT_RGB666; + case VID_MODE_FORMAT_RGB666_PACKED: + return MIPI_DSI_FMT_RGB666_PACKED; + case VID_MODE_FORMAT_RGB565: + return MIPI_DSI_FMT_RGB565; + default: + MISSING_CASE(fmt); + return MIPI_DSI_FMT_RGB666; + } +} + static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { struct drm_encoder *encoder = &intel_dsi->base.base; @@ -268,6 +286,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi) static bool intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, base); struct intel_connector *intel_connector = intel_dsi->attached_connector; @@ -284,6 +303,14 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder, /* DSI uses short packets for sync events, so clear mode flags for DSI */ adjusted_mode->flags = 0; + if (IS_BROXTON(dev_priv)) { + /* Dual link goes to DSI transcoder A. */ + if (intel_dsi->ports == BIT(PORT_C)) + pipe_config->cpu_transcoder = TRANSCODER_DSI_C; + else + pipe_config->cpu_transcoder = TRANSCODER_DSI_A; + } + return true; } @@ -403,7 +430,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder) temp &= ~LANE_CONFIGURATION_MASK; temp &= ~DUAL_LINK_MODE_MASK; - if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) { + if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { temp |= (intel_dsi->dual_link - 1) << DUAL_LINK_MODE_SHIFT; temp |= intel_crtc->pipe ? @@ -478,7 +505,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); + /* + * The BIOS may leave the PLL in a wonky state where it doesn't + * lock. It needs to be fully powered down to fix it. + */ + intel_disable_dsi_pll(encoder); intel_enable_dsi_pll(encoder); + intel_dsi_prepare(encoder); /* Panel Enable over CRC PMIC */ @@ -667,7 +700,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; enum intel_display_power_domain power_domain; enum port port; - bool ret; + bool active = false; DRM_DEBUG_KMS("\n"); @@ -675,48 +708,130 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) return false; - ret = false; + /* + * On Broxton the PLL needs to be enabled with a valid divider + * configuration, otherwise accessing DSI registers will hang the + * machine. See BSpec North Display Engine registers/MIPI[BXT]. + */ + if (IS_BROXTON(dev_priv) && !intel_dsi_pll_is_enabled(dev_priv)) + goto out_put_power; /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { i915_reg_t ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port); - u32 dpi_enabled, func; - - func = I915_READ(MIPI_DSI_FUNC_PRG(port)); - dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; + bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE; /* Due to some hardware limitations on BYT, MIPI Port C DPI * Enable bit does not get set. To check whether DSI Port C * was enabled in BIOS, check the Pipe B enable bit */ if (IS_VALLEYVIEW(dev) && port == PORT_C) - dpi_enabled = I915_READ(PIPECONF(PIPE_B)) & - PIPECONF_ENABLE; + enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE; - if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) { - if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) { - *pipe = port == PORT_A ? PIPE_A : PIPE_B; - ret = true; + /* Try command mode if video mode not enabled */ + if (!enabled) { + u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port)); + enabled = tmp & CMD_MODE_DATA_WIDTH_MASK; + } + + if (!enabled) + continue; + + if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) + continue; + + if (IS_BROXTON(dev_priv)) { + u32 tmp = I915_READ(MIPI_CTRL(port)); + tmp &= BXT_PIPE_SELECT_MASK; + tmp >>= BXT_PIPE_SELECT_SHIFT; - goto out; - } + if (WARN_ON(tmp > PIPE_C)) + continue; + + *pipe = tmp; + } else { + *pipe = port == PORT_A ? PIPE_A : PIPE_B; } + + active = true; + break; } -out: + +out_put_power: intel_display_power_put(dev_priv, power_domain); - return ret; + return active; } +static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) +{ + struct drm_device *dev = encoder->base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_display_mode *adjusted_mode = + &pipe_config->base.adjusted_mode; + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); + unsigned int bpp, fmt; + enum port port; + u16 vfp, vsync, vbp; + + /* + * Atleast one port is active as encoder->get_config called only if + * encoder->get_hw_state() returns true. + */ + for_each_dsi_port(port, intel_dsi->ports) { + if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE) + break; + } + + fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK; + pipe_config->pipe_bpp = + mipi_dsi_pixel_format_to_bpp( + pixel_format_from_register_bits(fmt)); + bpp = pipe_config->pipe_bpp; + + /* In terms of pixels */ + adjusted_mode->crtc_hdisplay = + I915_READ(BXT_MIPI_TRANS_HACTIVE(port)); + adjusted_mode->crtc_vdisplay = + I915_READ(BXT_MIPI_TRANS_VACTIVE(port)); + adjusted_mode->crtc_vtotal = + I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); + + /* + * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and + * calculate hsync_start, hsync_end, htotal and hblank_end + */ + + /* vertical values are in terms of lines */ + vfp = I915_READ(MIPI_VFP_COUNT(port)); + vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); + vbp = I915_READ(MIPI_VBP_COUNT(port)); + + adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; + + adjusted_mode->crtc_vsync_start = + vfp + adjusted_mode->crtc_vdisplay; + adjusted_mode->crtc_vsync_end = + vsync + adjusted_mode->crtc_vsync_start; + adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; + adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; +} + + static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { + struct drm_device *dev = encoder->base.dev; u32 pclk; DRM_DEBUG_KMS("\n"); pipe_config->has_dsi_encoder = true; + if (IS_BROXTON(dev)) + bxt_dsi_get_pipe_config(encoder, pipe_config); + /* * DPLL_MD is not used in case of DSI, reading will get some default value * set dpll_md = 0 @@ -787,7 +902,7 @@ static void set_dsi_timings(struct drm_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format); + unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); unsigned int lane_count = intel_dsi->lane_count; u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; @@ -849,6 +964,23 @@ static void set_dsi_timings(struct drm_encoder *encoder, } } +static u32 pixel_format_to_reg(enum mipi_dsi_pixel_format fmt) +{ + switch (fmt) { + case MIPI_DSI_FMT_RGB888: + return VID_MODE_FORMAT_RGB888; + case MIPI_DSI_FMT_RGB666: + return VID_MODE_FORMAT_RGB666; + case MIPI_DSI_FMT_RGB666_PACKED: + return VID_MODE_FORMAT_RGB666_PACKED; + case MIPI_DSI_FMT_RGB565: + return VID_MODE_FORMAT_RGB565; + default: + MISSING_CASE(fmt); + return VID_MODE_FORMAT_RGB666; + } +} + static void intel_dsi_prepare(struct intel_encoder *intel_encoder) { struct drm_encoder *encoder = &intel_encoder->base; @@ -858,7 +990,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; enum port port; - unsigned int bpp = dsi_pixel_format_bpp(intel_dsi->pixel_format); + unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 val, tmp; u16 mode_hdisplay; @@ -917,9 +1049,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder) val |= CMD_MODE_DATA_WIDTH_8_BIT; /* XXX */ } else { val |= intel_dsi->channel << VID_MODE_CHANNEL_NUMBER_SHIFT; - - /* XXX: cross-check bpp vs. pixel format? */ - val |= intel_dsi->pixel_format; + val |= pixel_format_to_reg(intel_dsi->pixel_format); } tmp = 0; @@ -1121,11 +1251,13 @@ void intel_dsi_init(struct drm_device *dev) DRM_DEBUG_KMS("\n"); /* There is no detection method for MIPI so rely on VBT */ - if (!dev_priv->vbt.has_mipi) + if (!intel_bios_is_dsi_present(dev_priv, &port)) return; if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { dev_priv->mipi_mmio_base = VLV_MIPI_BASE; + } else if (IS_BROXTON(dev)) { + dev_priv->mipi_mmio_base = BXT_MIPI_BASE; } else { DRM_ERROR("Unsupported Mipi device to reg base"); return; @@ -1161,17 +1293,21 @@ void intel_dsi_init(struct drm_device *dev) intel_connector->get_hw_state = intel_connector_get_hw_state; intel_connector->unregister = intel_connector_unregister; - /* Pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI port C */ - if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIA) { - intel_encoder->crtc_mask = (1 << PIPE_A); - intel_dsi->ports = (1 << PORT_A); - } else if (dev_priv->vbt.dsi.port == DVO_PORT_MIPIC) { - intel_encoder->crtc_mask = (1 << PIPE_B); - intel_dsi->ports = (1 << PORT_C); - } + /* + * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI + * port C. BXT isn't limited like this. + */ + if (IS_BROXTON(dev_priv)) + intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + else if (port == PORT_A) + intel_encoder->crtc_mask = BIT(PIPE_A); + else + intel_encoder->crtc_mask = BIT(PIPE_B); if (dev_priv->vbt.dsi.config->dual_link) - intel_dsi->ports = ((1 << PORT_A) | (1 << PORT_C)); + intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); + else + intel_dsi->ports = BIT(port); /* Create a DSI host (and a device) for each port. */ for_each_dsi_port(port, intel_dsi->ports) { diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h index 92f39227b361..dabde19ee8aa 100644 --- a/drivers/gpu/drm/i915/intel_dsi.h +++ b/drivers/gpu/drm/i915/intel_dsi.h @@ -34,8 +34,6 @@ #define DSI_DUAL_LINK_FRONT_BACK 1 #define DSI_DUAL_LINK_PIXEL_ALT 2 -int dsi_pixel_format_bpp(int pixel_format); - struct intel_dsi_host; struct intel_dsi { @@ -64,8 +62,12 @@ struct intel_dsi { /* number of DSI lanes */ unsigned int lane_count; - /* video mode pixel format for MIPI_DSI_FUNC_PRG register */ - u32 pixel_format; + /* + * video mode pixel format + * + * XXX: consolidate on .format in struct mipi_dsi_device. + */ + enum mipi_dsi_pixel_format pixel_format; /* video mode format for MIPI_VIDEO_MODE_FORMAT register */ u32 video_mode_format; @@ -117,15 +119,14 @@ static inline struct intel_dsi_host *to_intel_dsi_host(struct mipi_dsi_host *h) return container_of(h, struct intel_dsi_host, base); } -#define for_each_dsi_port(__port, __ports_mask) \ - for ((__port) = PORT_A; (__port) < I915_MAX_PORTS; (__port)++) \ - for_each_if ((__ports_mask) & (1 << (__port))) +#define for_each_dsi_port(__port, __ports_mask) for_each_port_masked(__port, __ports_mask) static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder) { return container_of(encoder, struct intel_dsi, base.base); } +bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); extern void intel_enable_dsi_pll(struct intel_encoder *encoder); extern void intel_disable_dsi_pll(struct intel_encoder *encoder); extern u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp); @@ -133,5 +134,6 @@ extern void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); +enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt); #endif /* _INTEL_DSI_H */ diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c index 7f145b4fec6a..e498f1c3221e 100644 --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c @@ -58,50 +58,41 @@ static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel) #define NS_KHZ_RATIO 1000000 -#define GPI0_NC_0_HV_DDI0_HPD 0x4130 -#define GPIO_NC_0_HV_DDI0_PAD 0x4138 -#define GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 -#define GPIO_NC_1_HV_DDI0_DDC_SDA_PAD 0x4128 -#define GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110 -#define GPIO_NC_2_HV_DDI0_DDC_SCL_PAD 0x4118 -#define GPIO_NC_3_PANEL0_VDDEN 0x4140 -#define GPIO_NC_3_PANEL0_VDDEN_PAD 0x4148 -#define GPIO_NC_4_PANEL0_BLKEN 0x4150 -#define GPIO_NC_4_PANEL0_BLKEN_PAD 0x4158 -#define GPIO_NC_5_PANEL0_BLKCTL 0x4160 -#define GPIO_NC_5_PANEL0_BLKCTL_PAD 0x4168 -#define GPIO_NC_6_PCONF0 0x4180 -#define GPIO_NC_6_PAD 0x4188 -#define GPIO_NC_7_PCONF0 0x4190 -#define GPIO_NC_7_PAD 0x4198 -#define GPIO_NC_8_PCONF0 0x4170 -#define GPIO_NC_8_PAD 0x4178 -#define GPIO_NC_9_PCONF0 0x4100 -#define GPIO_NC_9_PAD 0x4108 -#define GPIO_NC_10_PCONF0 0x40E0 -#define GPIO_NC_10_PAD 0x40E8 -#define GPIO_NC_11_PCONF0 0x40F0 -#define GPIO_NC_11_PAD 0x40F8 - -struct gpio_table { - u16 function_reg; - u16 pad_reg; - u8 init; +/* base offsets for gpio pads */ +#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130 +#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120 +#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110 +#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140 +#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150 +#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160 +#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180 +#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190 +#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170 +#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100 +#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0 +#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0 + +#define VLV_GPIO_PCONF0(base_offset) (base_offset) +#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8) + +struct gpio_map { + u16 base_offset; + bool init; }; -static struct gpio_table gtable[] = { - { GPI0_NC_0_HV_DDI0_HPD, GPIO_NC_0_HV_DDI0_PAD, 0 }, - { GPIO_NC_1_HV_DDI0_DDC_SDA, GPIO_NC_1_HV_DDI0_DDC_SDA_PAD, 0 }, - { GPIO_NC_2_HV_DDI0_DDC_SCL, GPIO_NC_2_HV_DDI0_DDC_SCL_PAD, 0 }, - { GPIO_NC_3_PANEL0_VDDEN, GPIO_NC_3_PANEL0_VDDEN_PAD, 0 }, - { GPIO_NC_4_PANEL0_BLKEN, GPIO_NC_4_PANEL0_BLKEN_PAD, 0 }, - { GPIO_NC_5_PANEL0_BLKCTL, GPIO_NC_5_PANEL0_BLKCTL_PAD, 0 }, - { GPIO_NC_6_PCONF0, GPIO_NC_6_PAD, 0 }, - { GPIO_NC_7_PCONF0, GPIO_NC_7_PAD, 0 }, - { GPIO_NC_8_PCONF0, GPIO_NC_8_PAD, 0 }, - { GPIO_NC_9_PCONF0, GPIO_NC_9_PAD, 0 }, - { GPIO_NC_10_PCONF0, GPIO_NC_10_PAD, 0}, - { GPIO_NC_11_PCONF0, GPIO_NC_11_PAD, 0} +static struct gpio_map vlv_gpio_table[] = { + { VLV_GPIO_NC_0_HV_DDI0_HPD }, + { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA }, + { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL }, + { VLV_GPIO_NC_3_PANEL0_VDDEN }, + { VLV_GPIO_NC_4_PANEL0_BKLTEN }, + { VLV_GPIO_NC_5_PANEL0_BKLTCTL }, + { VLV_GPIO_NC_6_HV_DDI1_HPD }, + { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA }, + { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL }, + { VLV_GPIO_NC_9_PANEL1_VDDEN }, + { VLV_GPIO_NC_10_PANEL1_BKLTEN }, + { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, }; static inline enum port intel_dsi_seq_port_to_port(u8 port) @@ -196,56 +187,76 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) return data; } -static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) +static void vlv_exec_gpio(struct drm_i915_private *dev_priv, + u8 gpio_source, u8 gpio_index, bool value) { - u8 gpio, action; - u16 function, pad; - u32 val; - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->vbt.dsi.seq_version >= 3) - data++; - - gpio = *data++; + struct gpio_map *map; + u16 pconf0, padval; + u32 tmp; + u8 port; - /* pull up/down */ - action = *data++ & 1; - - if (gpio >= ARRAY_SIZE(gtable)) { - DRM_DEBUG_KMS("unknown gpio %u\n", gpio); - goto out; + if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) { + DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index); + return; } - if (!IS_VALLEYVIEW(dev_priv)) { - DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); - goto out; - } + map = &vlv_gpio_table[gpio_index]; if (dev_priv->vbt.dsi.seq_version >= 3) { DRM_DEBUG_KMS("GPIO element v3 not supported\n"); - goto out; + return; + } else { + if (gpio_source == 0) { + port = IOSF_PORT_GPIO_NC; + } else if (gpio_source == 1) { + port = IOSF_PORT_GPIO_SC; + } else { + DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); + return; + } } - function = gtable[gpio].function_reg; - pad = gtable[gpio].pad_reg; + pconf0 = VLV_GPIO_PCONF0(map->base_offset); + padval = VLV_GPIO_PAD_VAL(map->base_offset); mutex_lock(&dev_priv->sb_lock); - if (!gtable[gpio].init) { - /* program the function */ + if (!map->init) { /* FIXME: remove constant below */ - vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, function, - 0x2000CC00); - gtable[gpio].init = 1; + vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00); + map->init = true; } - val = 0x4 | action; + tmp = 0x4 | value; + vlv_iosf_sb_write(dev_priv, port, padval, tmp); + mutex_unlock(&dev_priv->sb_lock); +} + +static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) +{ + struct drm_device *dev = intel_dsi->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + u8 gpio_source, gpio_index; + bool value; + + if (dev_priv->vbt.dsi.seq_version >= 3) + data++; + + gpio_index = *data++; + + /* gpio source in sequence v2 only */ + if (dev_priv->vbt.dsi.seq_version == 2) + gpio_source = (*data >> 1) & 3; + else + gpio_source = 0; /* pull up/down */ - vlv_iosf_sb_write(dev_priv, IOSF_PORT_GPIO_NC, pad, val); - mutex_unlock(&dev_priv->sb_lock); + value = *data++ & 1; + + if (IS_VALLEYVIEW(dev_priv)) + vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); + else + DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); -out: return data; } @@ -420,7 +431,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; struct vbt_panel *vbt_panel; - u32 bits_per_pixel = 24; + u32 bpp; u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui; u32 ui_num, ui_den; u32 prepare_cnt, exit_zero_cnt, clk_zero_cnt, trail_cnt; @@ -436,12 +447,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; intel_dsi->lane_count = mipi_config->lane_cnt + 1; - intel_dsi->pixel_format = mipi_config->videomode_color_format << 7; + intel_dsi->pixel_format = + pixel_format_from_register_bits( + mipi_config->videomode_color_format << 7); + bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); + intel_dsi->dual_link = mipi_config->dual_link; intel_dsi->pixel_overlap = mipi_config->pixel_overlap; - - bits_per_pixel = dsi_pixel_format_bpp(intel_dsi->pixel_format); - intel_dsi->operation_mode = mipi_config->is_cmd_mode; intel_dsi->video_mode_format = mipi_config->video_transfer_mode; intel_dsi->escape_clk_div = mipi_config->byte_clk_sel; @@ -475,8 +487,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) */ if (intel_dsi->video_mode_format == VIDEO_MODE_BURST) { if (mipi_config->target_burst_mode_freq) { - computed_ddr = - (pclk * bits_per_pixel) / intel_dsi->lane_count; + computed_ddr = (pclk * bpp) / intel_dsi->lane_count; if (mipi_config->target_burst_mode_freq < computed_ddr) { @@ -499,7 +510,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->burst_mode_ratio = burst_mode_ratio; intel_dsi->pclk = pclk; - bitrate = (pclk * bits_per_pixel) / intel_dsi->lane_count; + bitrate = (pclk * bpp) / intel_dsi->lane_count; switch (intel_dsi->escape_clk_div) { case 0: diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c index 70883c54cb0a..7ad59d13dd4c 100644 --- a/drivers/gpu/drm/i915/intel_dsi_pll.c +++ b/drivers/gpu/drm/i915/intel_dsi_pll.c @@ -30,33 +30,12 @@ #include "i915_drv.h" #include "intel_dsi.h" -int dsi_pixel_format_bpp(int pixel_format) -{ - int bpp; - - switch (pixel_format) { - default: - case VID_MODE_FORMAT_RGB888: - case VID_MODE_FORMAT_RGB666_LOOSE: - bpp = 24; - break; - case VID_MODE_FORMAT_RGB666: - bpp = 18; - break; - case VID_MODE_FORMAT_RGB565: - bpp = 16; - break; - } - - return bpp; -} - struct dsi_mnp { u32 dsi_pll_ctrl; u32 dsi_pll_div; }; -static const u32 lfsr_converts[] = { +static const u16 lfsr_converts[] = { 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */ 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */ 106, 53, 282, 397, 454, 227, 113, 56, 284, 142, /* 81 - 90 */ @@ -64,10 +43,11 @@ static const u32 lfsr_converts[] = { }; /* Get DSI clock from pixel clock */ -static u32 dsi_clk_from_pclk(u32 pclk, int pixel_format, int lane_count) +static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt, + int lane_count) { u32 dsi_clk_khz; - u32 bpp = dsi_pixel_format_bpp(pixel_format); + u32 bpp = mipi_dsi_pixel_format_to_bpp(fmt); /* DSI data rate = pixel clock * bits per pixel / lane count pixel clock is converted from KHz to Hz */ @@ -212,6 +192,36 @@ static void vlv_disable_dsi_pll(struct intel_encoder *encoder) mutex_unlock(&dev_priv->sb_lock); } +static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +{ + bool enabled; + u32 val; + u32 mask; + + mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED; + val = I915_READ(BXT_DSI_PLL_ENABLE); + enabled = (val & mask) == mask; + + if (!enabled) + return false; + + /* + * Both dividers must be programmed with valid values even if only one + * of the PLL is used, see BSpec/Broxton Clocks. Check this here for + * paranoia, since BIOS is known to misconfigure PLLs in this way at + * times, and since accessing DSI registers with invalid dividers + * causes a system hang. + */ + val = I915_READ(BXT_DSI_PLL_CTL); + if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) { + DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n", + val); + enabled = false; + } + + return enabled; +} + static void bxt_disable_dsi_pll(struct intel_encoder *encoder) { struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; @@ -232,9 +242,9 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder) DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); } -static void assert_bpp_mismatch(int pixel_format, int pipe_bpp) +static void assert_bpp_mismatch(enum mipi_dsi_pixel_format fmt, int pipe_bpp) { - int bpp = dsi_pixel_format_bpp(pixel_format); + int bpp = mipi_dsi_pixel_format_to_bpp(fmt); WARN(bpp != pipe_bpp, "bpp match assertion failure (expected %d, current %d)\n", @@ -248,7 +258,7 @@ static u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp) u32 dsi_clock, pclk; u32 pll_ctl, pll_div; u32 m = 0, p = 0, n; - int refclk = 25000; + int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; int i; DRM_DEBUG_KMS("\n"); @@ -362,35 +372,57 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) /* Program BXT Mipi clocks and dividers */ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port) { - u32 tmp; - u32 divider; - u32 dsi_rate; - u32 pll_ratio; struct drm_i915_private *dev_priv = dev->dev_private; + u32 tmp; + u32 dsi_rate = 0; + u32 pll_ratio = 0; + u32 rx_div; + u32 tx_div; + u32 rx_div_upper; + u32 rx_div_lower; + u32 mipi_8by3_divider; /* Clear old configurations */ tmp = I915_READ(BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); - tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port)); - tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port)); - tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port)); + tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); + tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); + tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); /* Get the current DSI rate(actual) */ pll_ratio = I915_READ(BXT_DSI_PLL_CTL) & BXT_DSI_PLL_RATIO_MASK; dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2; - /* Max possible output of clock is 39.5 MHz, program value -1 */ - divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1; - tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider); + /* + * tx clock should be <= 20MHz and the div value must be + * subtracted by 1 as per bspec + */ + tx_div = DIV_ROUND_UP(dsi_rate, 20000) - 1; + /* + * rx clock should be <= 150MHz and the div value must be + * subtracted by 1 as per bspec + */ + rx_div = DIV_ROUND_UP(dsi_rate, 150000) - 1; /* - * Tx escape clock must be as close to 20MHz possible, but should - * not exceed it. Hence select divide by 2 + * rx divider value needs to be updated in the + * two differnt bit fields in the register hence splitting the + * rx divider value accordingly */ - tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port); + rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2; + rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2; + + /* As per bpsec program the 8/3X clock divider to the below value */ + if (dev_priv->vbt.dsi.config->is_cmd_mode) + mipi_8by3_divider = 0x2; + else + mipi_8by3_divider = 0x3; - tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port); + tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider); + tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div); + tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower); + tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper); I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); } @@ -452,14 +484,6 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder) DRM_DEBUG_KMS("\n"); - val = I915_READ(BXT_DSI_PLL_ENABLE); - - if (val & BXT_DSI_PLL_DO_ENABLE) { - WARN(1, "DSI PLL already enabled. Disabling it.\n"); - val &= ~BXT_DSI_PLL_DO_ENABLE; - I915_WRITE(BXT_DSI_PLL_ENABLE, val); - } - /* Configure PLL vales */ if (!bxt_configure_dsi_pll(encoder)) { DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n"); @@ -484,6 +508,16 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder) DRM_DEBUG_KMS("DSI PLL locked\n"); } +bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +{ + if (IS_BROXTON(dev_priv)) + return bxt_dsi_pll_is_enabled(dev_priv); + + MISSING_CASE(INTEL_DEVID(dev_priv)); + + return false; +} + void intel_enable_dsi_pll(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; @@ -513,9 +547,9 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) /* Clear old configurations */ tmp = I915_READ(BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); - tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port)); - tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port)); - tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port)); + tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); + tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); + tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port)); I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); } diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 0f0492f4a357..d5a7cfec589b 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, int size, int fb_cpp) { + struct i915_ggtt *ggtt = &dev_priv->ggtt; int compression_threshold = 1; int ret; u64 end; @@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, * underruns, even if that range is not reserved by the BIOS. */ if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) - end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024; + end = ggtt->stolen_size - 8 * 1024 * 1024; else - end = dev_priv->gtt.stolen_usable_size; + end = ggtt->stolen_usable_size; /* HACK: This code depends on what we will do in *_enable_fbc. If that * code changes, this code needs to change as well. diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 97a91e631915..79ac202f3870 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, struct drm_framebuffer *fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_i915_gem_object *obj = NULL; int size, ret; @@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, /* If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other * features. */ - if (size * 2 < dev_priv->gtt.stolen_usable_size) + if (size * 2 < ggtt->stolen_usable_size) obj = i915_gem_object_create_stolen(dev, size); if (obj == NULL) obj = i915_gem_alloc_object(dev, size); @@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper, container_of(helper, struct intel_fbdev, helper); struct intel_framebuffer *intel_fb = ifbdev->fb; struct drm_device *dev = helper->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct fb_info *info; struct drm_framebuffer *fb; struct drm_i915_gem_object *obj; @@ -220,7 +222,7 @@ static int intelfb_create(struct drm_fb_helper *helper, * This also validates that any existing fb inherited from the * BIOS is suitable for own access. */ - ret = intel_pin_and_fence_fb_obj(NULL, &ifbdev->fb->base, NULL); + ret = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0)); if (ret) goto out_unlock; @@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper, /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = dev->mode_config.fb_base; - info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; + info->apertures->ranges[0].size = ggtt->mappable_end; info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_len = size; info->screen_base = - ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), + ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), size); if (!info->screen_base) { DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); @@ -379,6 +381,7 @@ retry: struct drm_connector *connector; struct drm_encoder *encoder; struct drm_fb_helper_crtc *new_crtc; + struct intel_crtc *intel_crtc; fb_conn = fb_helper->connector_info[i]; connector = fb_conn->connector; @@ -420,6 +423,13 @@ retry: num_connectors_enabled++; + intel_crtc = to_intel_crtc(connector->state->crtc); + for (j = 0; j < 256; j++) { + intel_crtc->lut_r[j] = j; + intel_crtc->lut_g[j] = j; + intel_crtc->lut_b[j] = j; + } + new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc); /* diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c index bda526660e20..9be839a242f9 100644 --- a/drivers/gpu/drm/i915/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c @@ -212,7 +212,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); POSTING_READ(SERR_INT); - DRM_ERROR("pch fifo underrun on pch transcoder %c\n", + DRM_ERROR("pch fifo underrun on pch transcoder %s\n", transcoder_name(pch_transcoder)); } @@ -235,7 +235,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, if (old && I915_READ(SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { - DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", + DRM_ERROR("uncleared pch fifo underrun on pch transcoder %s\n", transcoder_name(pch_transcoder)); } } @@ -333,7 +333,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv, old = !intel_crtc->pch_fifo_underrun_disabled; intel_crtc->pch_fifo_underrun_disabled = !enable; - if (HAS_PCH_IBX(dev_priv->dev)) + if (HAS_PCH_IBX(dev_priv)) ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, enable); else @@ -363,7 +363,7 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, return; /* GMCH can't disable fifo underruns, filter them. */ - if (HAS_GMCH_DISPLAY(dev_priv->dev) && + if (HAS_GMCH_DISPLAY(dev_priv) && to_intel_crtc(crtc)->cpu_fifo_underrun_disabled) return; @@ -386,7 +386,7 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv, { if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, false)) - DRM_ERROR("PCH transcoder %c FIFO underrun\n", + DRM_ERROR("PCH transcoder %s FIFO underrun\n", transcoder_name(pch_transcoder)); } diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c index 82a3c03fbc0e..876e5da44c4e 100644 --- a/drivers/gpu/drm/i915/intel_guc_loader.c +++ b/drivers/gpu/drm/i915/intel_guc_loader.c @@ -59,7 +59,7 @@ * */ -#define I915_SKL_GUC_UCODE "i915/skl_guc_ver4.bin" +#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" MODULE_FIRMWARE(I915_SKL_GUC_UCODE); /* User-friendly representation of an enum */ @@ -81,14 +81,14 @@ const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) { - struct intel_engine_cs *ring; - int i, irqs; + struct intel_engine_cs *engine; + int irqs; /* tell all command streamers NOT to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_ring(ring, dev_priv, i) - I915_WRITE(RING_MODE_GEN7(ring), irqs); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MODE_GEN7(engine), irqs); /* route all GT interrupts to the host */ I915_WRITE(GUC_BCS_RCS_IER, 0); @@ -98,14 +98,14 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv) static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv) { - struct intel_engine_cs *ring; - int i, irqs; + struct intel_engine_cs *engine; + int irqs; /* tell all command streamers to forward interrupts and vblank to GuC */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_ring(ring, dev_priv, i) - I915_WRITE(RING_MODE_GEN7(ring), irqs); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MODE_GEN7(engine), irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | @@ -353,6 +353,24 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv) return ret; } +static int i915_reset_guc(struct drm_i915_private *dev_priv) +{ + int ret; + u32 guc_status; + + ret = intel_guc_reset(dev_priv); + if (ret) { + DRM_ERROR("GuC reset failed, ret = %d\n", ret); + return ret; + } + + guc_status = I915_READ(GUC_STATUS); + WARN(!(guc_status & GS_MIA_IN_RESET), + "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status); + + return ret; +} + /** * intel_guc_ucode_load() - load GuC uCode into the device * @dev: drm device @@ -369,7 +387,7 @@ int intel_guc_ucode_load(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; - int err = 0; + int retries, err = 0; if (!i915.enable_guc_submission) return 0; @@ -417,9 +435,33 @@ int intel_guc_ucode_load(struct drm_device *dev) if (err) goto fail; - err = guc_ucode_xfer(dev_priv); - if (err) - goto fail; + /* + * WaEnableuKernelHeaderValidFix:skl,bxt + * For BXT, this is only upto B0 but below WA is required for later + * steppings also so this is extended as well. + */ + /* WaEnableGuCBootHashCheckNotSet:skl,bxt */ + for (retries = 3; ; ) { + /* + * Always reset the GuC just before (re)loading, so + * that the state and timing are fairly predictable + */ + err = i915_reset_guc(dev_priv); + if (err) { + DRM_ERROR("GuC reset failed, err %d\n", err); + goto fail; + } + + err = guc_ucode_xfer(dev_priv); + if (!err) + break; + + if (--retries == 0) + goto fail; + + DRM_INFO("GuC fw load failed, err %d; will reset and " + "retry %d more time(s)\n", err, retries); + } guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; @@ -440,6 +482,7 @@ int intel_guc_ucode_load(struct drm_device *dev) return 0; fail: + DRM_ERROR("GuC firmware load failed, err %d\n", err); if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; @@ -595,8 +638,8 @@ void intel_guc_ucode_init(struct drm_device *dev) fw_path = NULL; } else if (IS_SKYLAKE(dev)) { fw_path = I915_SKL_GUC_UCODE; - guc_fw->guc_fw_major_wanted = 4; - guc_fw->guc_fw_minor_wanted = 3; + guc_fw->guc_fw_major_wanted = 6; + guc_fw->guc_fw_minor_wanted = 1; } else { i915.enable_guc_submission = false; fw_path = ""; /* unknown device */ diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index a0d8daed2470..b199ede08f72 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -638,7 +638,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) reg = HSW_TVIDEO_DIP_GCP(crtc->config->cpu_transcoder); else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) reg = VLV_TVIDEO_DIP_GCP(crtc->pipe); - else if (HAS_PCH_SPLIT(dev_priv->dev)) + else if (HAS_PCH_SPLIT(dev_priv)) reg = TVIDEO_DIP_GCP(crtc->pipe); else return false; @@ -952,9 +952,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder, if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; - if (HAS_PCH_SPLIT(dev_priv->dev)) - ironlake_check_encoder_dotclock(pipe_config, dotclock); - pipe_config->base.adjusted_mode.crtc_clock = dotclock; } diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 52fbe530fc9e..81de23098be7 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c @@ -124,7 +124,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) u32 val; /* When using bit bashing for I2C, this bit needs to be set to 1 */ - if (!IS_PINEVIEW(dev_priv->dev)) + if (!IS_PINEVIEW(dev_priv)) return; val = I915_READ(DSPCLK_GATE_D); @@ -264,7 +264,7 @@ gmbus_wait_hw_status(struct drm_i915_private *dev_priv, u32 gmbus2 = 0; DEFINE_WAIT(wait); - if (!HAS_GMBUS_IRQ(dev_priv->dev)) + if (!HAS_GMBUS_IRQ(dev_priv)) gmbus4_irq_en = 0; /* Important: The hw handles only the first bit, so set only one! Since @@ -300,7 +300,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv) #define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0) - if (!HAS_GMBUS_IRQ(dev_priv->dev)) + if (!HAS_GMBUS_IRQ(dev_priv)) return wait_for(C, 10); /* Important: The hw handles only the first bit, so set only one! */ @@ -571,15 +571,14 @@ clear_err: goto out; timeout: - DRM_INFO("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", - bus->adapter.name, bus->reg0 & 0xff); + DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n", + bus->adapter.name, bus->reg0 & 0xff); I915_WRITE(GMBUS0, 0); /* * Hardware may not support GMBUS over these pins? Try GPIO bitbanging * instead. Use EAGAIN to have i2c core retry. */ - bus->force_bit = 1; ret = -EAGAIN; out: @@ -597,10 +596,15 @@ gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS); mutex_lock(&dev_priv->gmbus_mutex); - if (bus->force_bit) + if (bus->force_bit) { ret = i2c_bit_algo.master_xfer(adapter, msgs, num); - else + if (ret < 0) + bus->force_bit &= ~GMBUS_FORCE_BIT_RETRY; + } else { ret = do_gmbus_xfer(adapter, msgs, num); + if (ret == -EAGAIN) + bus->force_bit |= GMBUS_FORCE_BIT_RETRY; + } mutex_unlock(&dev_priv->gmbus_mutex); intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); @@ -718,11 +722,16 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); + struct drm_i915_private *dev_priv = bus->dev_priv; + + mutex_lock(&dev_priv->gmbus_mutex); bus->force_bit += force_bit ? 1 : -1; DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n", force_bit ? "en" : "dis", adapter->name, bus->force_bit); + + mutex_unlock(&dev_priv->gmbus_mutex); } void intel_teardown_gmbus(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 6a978ce80244..5e08ea5aa6d1 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -131,6 +131,7 @@ * preemption, but just sampling the new tail pointer). * */ +#include <linux/interrupt.h> #include <drm/drmP.h> #include <drm/i915_drm.h> @@ -228,9 +229,6 @@ enum { static int intel_lr_context_pin(struct intel_context *ctx, struct intel_engine_cs *engine); -static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, - struct drm_i915_gem_object *default_ctx_obj); - /** * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists @@ -266,20 +264,23 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists } static void -logical_ring_init_platform_invariants(struct intel_engine_cs *ring) +logical_ring_init_platform_invariants(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; + + if (IS_GEN8(dev) || IS_GEN9(dev)) + engine->idle_lite_restore_wa = ~0; - ring->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || + engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && - (ring->id == VCS || ring->id == VCS2); + (engine->id == VCS || engine->id == VCS2); - ring->ctx_desc_template = GEN8_CTX_VALID; - ring->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << + engine->ctx_desc_template = GEN8_CTX_VALID; + engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << GEN8_CTX_ADDRESSING_MODE_SHIFT; if (IS_GEN8(dev)) - ring->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; - ring->ctx_desc_template |= GEN8_CTX_PRIVILEGE; + engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; + engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; /* TODO: WaDisableLiteRestore when we start using semaphore * signalling between Command Streamers */ @@ -287,8 +288,8 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring) /* WaEnableForceRestoreInCtxtDescForVCS:skl */ /* WaEnableForceRestoreInCtxtDescForVCS:bxt */ - if (ring->disable_lite_restore_wa) - ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; + if (engine->disable_lite_restore_wa) + engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; } /** @@ -311,24 +312,24 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *ring) */ static void intel_lr_context_descriptor_update(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { uint64_t lrca, desc; - lrca = ctx->engine[ring->id].lrc_vma->node.start + + lrca = ctx->engine[engine->id].lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE; - desc = ring->ctx_desc_template; /* bits 0-11 */ + desc = engine->ctx_desc_template; /* bits 0-11 */ desc |= lrca; /* bits 12-31 */ desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ - ctx->engine[ring->id].lrc_desc = desc; + ctx->engine[engine->id].lrc_desc = desc; } uint64_t intel_lr_context_descriptor(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - return ctx->engine[ring->id].lrc_desc; + return ctx->engine[engine->id].lrc_desc; } /** @@ -348,98 +349,103 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx, * Return: 20-bits globally unique context ID. */ u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - return intel_lr_context_descriptor(ctx, ring) >> GEN8_CTX_ID_SHIFT; + return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT; } static void execlists_elsp_write(struct drm_i915_gem_request *rq0, struct drm_i915_gem_request *rq1) { - struct intel_engine_cs *ring = rq0->ring; - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = rq0->engine; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint64_t desc[2]; if (rq1) { - desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->ring); + desc[1] = intel_lr_context_descriptor(rq1->ctx, rq1->engine); rq1->elsp_submitted++; } else { desc[1] = 0; } - desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->ring); + desc[0] = intel_lr_context_descriptor(rq0->ctx, rq0->engine); rq0->elsp_submitted++; /* You must always write both descriptors in the order below. */ - spin_lock(&dev_priv->uncore.lock); - intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL); - I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[1])); - I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[1])); + I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[1])); + I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[1])); - I915_WRITE_FW(RING_ELSP(ring), upper_32_bits(desc[0])); + I915_WRITE_FW(RING_ELSP(engine), upper_32_bits(desc[0])); /* The context is automatically loaded after the following */ - I915_WRITE_FW(RING_ELSP(ring), lower_32_bits(desc[0])); + I915_WRITE_FW(RING_ELSP(engine), lower_32_bits(desc[0])); /* ELSP is a wo register, use another nearby reg for posting */ - POSTING_READ_FW(RING_EXECLIST_STATUS_LO(ring)); - intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL); - spin_unlock(&dev_priv->uncore.lock); + POSTING_READ_FW(RING_EXECLIST_STATUS_LO(engine)); } -static int execlists_update_context(struct drm_i915_gem_request *rq) +static void +execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state) { - struct intel_engine_cs *ring = rq->ring; + ASSIGN_CTX_PDP(ppgtt, reg_state, 3); + ASSIGN_CTX_PDP(ppgtt, reg_state, 2); + ASSIGN_CTX_PDP(ppgtt, reg_state, 1); + ASSIGN_CTX_PDP(ppgtt, reg_state, 0); +} + +static void execlists_update_context(struct drm_i915_gem_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; - uint32_t *reg_state = rq->ctx->engine[ring->id].lrc_reg_state; + uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state; reg_state[CTX_RING_TAIL+1] = rq->tail; - if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { - /* True 32b PPGTT with dynamic page allocation: update PDP - * registers and point the unallocated PDPs to scratch page. - * PML4 is allocated during ppgtt init, so this is not needed - * in 48-bit mode. - */ - ASSIGN_CTX_PDP(ppgtt, reg_state, 3); - ASSIGN_CTX_PDP(ppgtt, reg_state, 2); - ASSIGN_CTX_PDP(ppgtt, reg_state, 1); - ASSIGN_CTX_PDP(ppgtt, reg_state, 0); - } - - return 0; + /* True 32b PPGTT with dynamic page allocation: update PDP + * registers and point the unallocated PDPs to scratch page. + * PML4 is allocated during ppgtt init, so this is not needed + * in 48-bit mode. + */ + if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) + execlists_update_context_pdps(ppgtt, reg_state); } static void execlists_submit_requests(struct drm_i915_gem_request *rq0, struct drm_i915_gem_request *rq1) { + struct drm_i915_private *dev_priv = rq0->i915; + unsigned int fw_domains = rq0->engine->fw_domains; + execlists_update_context(rq0); if (rq1) execlists_update_context(rq1); + spin_lock_irq(&dev_priv->uncore.lock); + intel_uncore_forcewake_get__locked(dev_priv, fw_domains); + execlists_elsp_write(rq0, rq1); + + intel_uncore_forcewake_put__locked(dev_priv, fw_domains); + spin_unlock_irq(&dev_priv->uncore.lock); } -static void execlists_context_unqueue(struct intel_engine_cs *ring) +static void execlists_context_unqueue(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; - struct drm_i915_gem_request *cursor = NULL, *tmp = NULL; + struct drm_i915_gem_request *cursor, *tmp; - assert_spin_locked(&ring->execlist_lock); + assert_spin_locked(&engine->execlist_lock); /* * If irqs are not active generate a warning as batches that finish * without the irqs may get lost and a GPU Hang may occur. */ - WARN_ON(!intel_irqs_enabled(ring->dev->dev_private)); - - if (list_empty(&ring->execlist_queue)) - return; + WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); /* Try to read in pairs */ - list_for_each_entry_safe(cursor, tmp, &ring->execlist_queue, + list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, execlist_link) { if (!req0) { req0 = cursor; @@ -448,172 +454,179 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring) * will update tail past first request's workload */ cursor->elsp_submitted = req0->elsp_submitted; list_move_tail(&req0->execlist_link, - &ring->execlist_retired_req_list); + &engine->execlist_retired_req_list); req0 = cursor; } else { req1 = cursor; + WARN_ON(req1->elsp_submitted); break; } } - if (IS_GEN8(ring->dev) || IS_GEN9(ring->dev)) { + if (unlikely(!req0)) + return; + + if (req0->elsp_submitted & engine->idle_lite_restore_wa) { /* - * WaIdleLiteRestore: make sure we never cause a lite - * restore with HEAD==TAIL + * WaIdleLiteRestore: make sure we never cause a lite restore + * with HEAD==TAIL. + * + * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL as we + * resubmit the request. See gen8_emit_request() for where we + * prepare the padding after the end of the request. */ - if (req0->elsp_submitted) { - /* - * Apply the wa NOOPS to prevent ring:HEAD == req:TAIL - * as we resubmit the request. See gen8_emit_request() - * for where we prepare the padding after the end of the - * request. - */ - struct intel_ringbuffer *ringbuf; + struct intel_ringbuffer *ringbuf; - ringbuf = req0->ctx->engine[ring->id].ringbuf; - req0->tail += 8; - req0->tail &= ringbuf->size - 1; - } + ringbuf = req0->ctx->engine[engine->id].ringbuf; + req0->tail += 8; + req0->tail &= ringbuf->size - 1; } - WARN_ON(req1 && req1->elsp_submitted); - execlists_submit_requests(req0, req1); } -static bool execlists_check_remove_request(struct intel_engine_cs *ring, - u32 request_id) +static unsigned int +execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) { struct drm_i915_gem_request *head_req; - assert_spin_locked(&ring->execlist_lock); + assert_spin_locked(&engine->execlist_lock); - head_req = list_first_entry_or_null(&ring->execlist_queue, + head_req = list_first_entry_or_null(&engine->execlist_queue, struct drm_i915_gem_request, execlist_link); - if (head_req != NULL) { - if (intel_execlists_ctx_id(head_req->ctx, ring) == request_id) { - WARN(head_req->elsp_submitted == 0, - "Never submitted head request\n"); + if (!head_req) + return 0; - if (--head_req->elsp_submitted <= 0) { - list_move_tail(&head_req->execlist_link, - &ring->execlist_retired_req_list); - return true; - } - } - } + if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id)) + return 0; + + WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); - return false; + if (--head_req->elsp_submitted > 0) + return 0; + + list_move_tail(&head_req->execlist_link, + &engine->execlist_retired_req_list); + + return 1; } -static void get_context_status(struct intel_engine_cs *ring, - u8 read_pointer, - u32 *status, u32 *context_id) +static u32 +get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, + u32 *context_id) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; + u32 status; - if (WARN_ON(read_pointer >= GEN8_CSB_ENTRIES)) - return; + read_pointer %= GEN8_CSB_ENTRIES; + + status = I915_READ_FW(RING_CONTEXT_STATUS_BUF_LO(engine, read_pointer)); + + if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) + return 0; + + *context_id = I915_READ_FW(RING_CONTEXT_STATUS_BUF_HI(engine, + read_pointer)); - *status = I915_READ(RING_CONTEXT_STATUS_BUF_LO(ring, read_pointer)); - *context_id = I915_READ(RING_CONTEXT_STATUS_BUF_HI(ring, read_pointer)); + return status; } /** * intel_lrc_irq_handler() - handle Context Switch interrupts - * @ring: Engine Command Streamer to handle. + * @engine: Engine Command Streamer to handle. * * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. */ -void intel_lrc_irq_handler(struct intel_engine_cs *ring) +static void intel_lrc_irq_handler(unsigned long data) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct intel_engine_cs *engine = (struct intel_engine_cs *)data; + struct drm_i915_private *dev_priv = engine->dev->dev_private; u32 status_pointer; - u8 read_pointer; - u8 write_pointer; - u32 status = 0; - u32 status_id; - u32 submit_contexts = 0; + unsigned int read_pointer, write_pointer; + u32 csb[GEN8_CSB_ENTRIES][2]; + unsigned int csb_read = 0, i; + unsigned int submit_contexts = 0; - status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring)); + intel_uncore_forcewake_get(dev_priv, engine->fw_domains); - read_pointer = ring->next_context_status_buffer; + status_pointer = I915_READ_FW(RING_CONTEXT_STATUS_PTR(engine)); + + read_pointer = engine->next_context_status_buffer; write_pointer = GEN8_CSB_WRITE_PTR(status_pointer); if (read_pointer > write_pointer) write_pointer += GEN8_CSB_ENTRIES; - spin_lock(&ring->execlist_lock); - while (read_pointer < write_pointer) { + if (WARN_ON_ONCE(csb_read == GEN8_CSB_ENTRIES)) + break; + csb[csb_read][0] = get_context_status(engine, ++read_pointer, + &csb[csb_read][1]); + csb_read++; + } - get_context_status(ring, ++read_pointer % GEN8_CSB_ENTRIES, - &status, &status_id); + engine->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; - if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) - continue; + /* Update the read pointer to the old write pointer. Manual ringbuffer + * management ftw </sarcasm> */ + I915_WRITE_FW(RING_CONTEXT_STATUS_PTR(engine), + _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, + engine->next_context_status_buffer << 8)); + + intel_uncore_forcewake_put(dev_priv, engine->fw_domains); + + spin_lock(&engine->execlist_lock); - if (status & GEN8_CTX_STATUS_PREEMPTED) { - if (status & GEN8_CTX_STATUS_LITE_RESTORE) { - if (execlists_check_remove_request(ring, status_id)) + for (i = 0; i < csb_read; i++) { + if (unlikely(csb[i][0] & GEN8_CTX_STATUS_PREEMPTED)) { + if (csb[i][0] & GEN8_CTX_STATUS_LITE_RESTORE) { + if (execlists_check_remove_request(engine, csb[i][1])) WARN(1, "Lite Restored request removed from queue\n"); } else WARN(1, "Preemption without Lite Restore\n"); } - if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) || - (status & GEN8_CTX_STATUS_ELEMENT_SWITCH)) { - if (execlists_check_remove_request(ring, status_id)) - submit_contexts++; - } + if (csb[i][0] & (GEN8_CTX_STATUS_ACTIVE_IDLE | + GEN8_CTX_STATUS_ELEMENT_SWITCH)) + submit_contexts += + execlists_check_remove_request(engine, csb[i][1]); } - if (ring->disable_lite_restore_wa) { - /* Prevent a ctx to preempt itself */ - if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) && - (submit_contexts != 0)) - execlists_context_unqueue(ring); - } else if (submit_contexts != 0) { - execlists_context_unqueue(ring); + if (submit_contexts) { + if (!engine->disable_lite_restore_wa || + (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE)) + execlists_context_unqueue(engine); } - spin_unlock(&ring->execlist_lock); + spin_unlock(&engine->execlist_lock); if (unlikely(submit_contexts > 2)) DRM_ERROR("More than two context complete events?\n"); - - ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES; - - /* Update the read pointer to the old write pointer. Manual ringbuffer - * management ftw </sarcasm> */ - I915_WRITE(RING_CONTEXT_STATUS_PTR(ring), - _MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, - ring->next_context_status_buffer << 8)); } -static int execlists_context_queue(struct drm_i915_gem_request *request) +static void execlists_context_queue(struct drm_i915_gem_request *request) { - struct intel_engine_cs *ring = request->ring; + struct intel_engine_cs *engine = request->engine; struct drm_i915_gem_request *cursor; int num_elements = 0; if (request->ctx != request->i915->kernel_context) - intel_lr_context_pin(request->ctx, ring); + intel_lr_context_pin(request->ctx, engine); i915_gem_request_reference(request); - spin_lock_irq(&ring->execlist_lock); + spin_lock_bh(&engine->execlist_lock); - list_for_each_entry(cursor, &ring->execlist_queue, execlist_link) + list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) if (++num_elements > 2) break; if (num_elements > 2) { struct drm_i915_gem_request *tail_req; - tail_req = list_last_entry(&ring->execlist_queue, + tail_req = list_last_entry(&engine->execlist_queue, struct drm_i915_gem_request, execlist_link); @@ -621,41 +634,39 @@ static int execlists_context_queue(struct drm_i915_gem_request *request) WARN(tail_req->elsp_submitted != 0, "More than 2 already-submitted reqs queued\n"); list_move_tail(&tail_req->execlist_link, - &ring->execlist_retired_req_list); + &engine->execlist_retired_req_list); } } - list_add_tail(&request->execlist_link, &ring->execlist_queue); + list_add_tail(&request->execlist_link, &engine->execlist_queue); if (num_elements == 0) - execlists_context_unqueue(ring); + execlists_context_unqueue(engine); - spin_unlock_irq(&ring->execlist_lock); - - return 0; + spin_unlock_bh(&engine->execlist_lock); } static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; uint32_t flush_domains; int ret; flush_domains = 0; - if (ring->gpu_caches_dirty) + if (engine->gpu_caches_dirty) flush_domains = I915_GEM_GPU_DOMAINS; - ret = ring->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); + ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); if (ret) return ret; - ring->gpu_caches_dirty = false; + engine->gpu_caches_dirty = false; return 0; } static int execlists_move_to_gpu(struct drm_i915_gem_request *req, struct list_head *vmas) { - const unsigned other_rings = ~intel_ring_flag(req->ring); + const unsigned other_rings = ~intel_engine_flag(req->engine); struct i915_vma *vma; uint32_t flush_domains = 0; bool flush_chipset = false; @@ -665,7 +676,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, struct drm_i915_gem_object *obj = vma->obj; if (obj->active & other_rings) { - ret = i915_gem_object_sync(obj, req->ring, &req); + ret = i915_gem_object_sync(obj, req->engine, &req); if (ret) return ret; } @@ -689,7 +700,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request { int ret = 0; - request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; + request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; if (i915.enable_guc_submission) { /* @@ -705,7 +716,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request } if (request->ctx != request->i915->kernel_context) - ret = intel_lr_context_pin(request->ctx, request->ring); + ret = intel_lr_context_pin(request->ctx, request->engine); return ret; } @@ -714,7 +725,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, int bytes) { struct intel_ringbuffer *ringbuf = req->ringbuf; - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct drm_i915_gem_request *target; unsigned space; int ret; @@ -725,7 +736,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, /* The whole point of reserving space is to not wait! */ WARN_ON(ringbuf->reserved_in_use); - list_for_each_entry(target, &ring->request_list, list) { + list_for_each_entry(target, &engine->request_list, list) { /* * The request queue is per-engine, so can contain requests * from multiple ringbuffers. Here, we must ignore any that @@ -741,7 +752,7 @@ static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, break; } - if (WARN_ON(&target->list == &ring->request_list)) + if (WARN_ON(&target->list == &engine->request_list)) return -ENOSPC; ret = i915_wait_request(target); @@ -766,7 +777,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) { struct intel_ringbuffer *ringbuf = request->ringbuf; struct drm_i915_private *dev_priv = request->i915; - struct intel_engine_cs *engine = request->ring; + struct intel_engine_cs *engine = request->engine; intel_logical_ring_advance(ringbuf); request->tail = ringbuf->tail; @@ -781,7 +792,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, MI_NOOP); intel_logical_ring_advance(ringbuf); - if (intel_ring_stopped(engine)) + if (intel_engine_stopped(engine)) return 0; if (engine->last_context != request->ctx) { @@ -841,11 +852,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) if (unlikely(total_bytes > remain_usable)) { /* * The base request will fit but the reserved space - * falls off the end. So only need to to wait for the - * reserved size after flushing out the remainder. + * falls off the end. So don't need an immediate wrap + * and only need to effectively wait for the reserved + * size space from the start of ringbuffer. */ wait_bytes = remain_actual + ringbuf->reserved_size; - need_wrap = true; } else if (total_bytes > ringbuf->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -883,7 +894,7 @@ int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) int ret; WARN_ON(req == NULL); - dev_priv = req->ring->dev->dev_private; + dev_priv = req->i915; ret = i915_gem_check_wedge(&dev_priv->gpu_error, dev_priv->mm.interruptible); @@ -935,9 +946,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, struct list_head *vmas) { struct drm_device *dev = params->dev; - struct intel_engine_cs *ring = params->ring; + struct intel_engine_cs *engine = params->engine; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ringbuffer *ringbuf = params->ctx->engine[ring->id].ringbuf; + struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; u64 exec_start; int instp_mode; u32 instp_mask; @@ -949,7 +960,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, case I915_EXEC_CONSTANTS_REL_GENERAL: case I915_EXEC_CONSTANTS_ABSOLUTE: case I915_EXEC_CONSTANTS_REL_SURFACE: - if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { + if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) { DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); return -EINVAL; } @@ -978,7 +989,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, if (ret) return ret; - if (ring == &dev_priv->ring[RCS] && + if (engine == &dev_priv->engine[RCS] && instp_mode != dev_priv->relative_constants_mode) { ret = intel_logical_ring_begin(params->request, 4); if (ret) @@ -996,7 +1007,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, exec_start = params->batch_obj_vm_offset + args->batch_start_offset; - ret = ring->emit_bb_start(params->request, exec_start, params->dispatch_flags); + ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags); if (ret) return ret; @@ -1008,104 +1019,105 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, return 0; } -void intel_execlists_retire_requests(struct intel_engine_cs *ring) +void intel_execlists_retire_requests(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req, *tmp; struct list_head retired_list; - WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); - if (list_empty(&ring->execlist_retired_req_list)) + WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); + if (list_empty(&engine->execlist_retired_req_list)) return; INIT_LIST_HEAD(&retired_list); - spin_lock_irq(&ring->execlist_lock); - list_replace_init(&ring->execlist_retired_req_list, &retired_list); - spin_unlock_irq(&ring->execlist_lock); + spin_lock_bh(&engine->execlist_lock); + list_replace_init(&engine->execlist_retired_req_list, &retired_list); + spin_unlock_bh(&engine->execlist_lock); list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { struct intel_context *ctx = req->ctx; struct drm_i915_gem_object *ctx_obj = - ctx->engine[ring->id].state; + ctx->engine[engine->id].state; if (ctx_obj && (ctx != req->i915->kernel_context)) - intel_lr_context_unpin(ctx, ring); + intel_lr_context_unpin(ctx, engine); list_del(&req->execlist_link); i915_gem_request_unreference(req); } } -void intel_logical_ring_stop(struct intel_engine_cs *ring) +void intel_logical_ring_stop(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; int ret; - if (!intel_ring_initialized(ring)) + if (!intel_engine_initialized(engine)) return; - ret = intel_ring_idle(ring); - if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) + ret = intel_engine_idle(engine); + if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", - ring->name, ret); + engine->name, ret); /* TODO: Is this correct with Execlists enabled? */ - I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { - DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); + I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); + if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); return; } - I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); + I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } int logical_ring_flush_all_caches(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; - if (!ring->gpu_caches_dirty) + if (!engine->gpu_caches_dirty) return 0; - ret = ring->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); + ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS); if (ret) return ret; - ring->gpu_caches_dirty = false; + engine->gpu_caches_dirty = false; return 0; } static int intel_lr_context_do_pin(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state; - struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf; - struct page *lrc_state_page; - uint32_t *lrc_reg_state; + struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state; + struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf; + void *vaddr; + u32 *lrc_reg_state; int ret; - WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex)); + WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, PIN_OFFSET_BIAS | GUC_WOPCM_TOP); if (ret) return ret; - lrc_state_page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - if (WARN_ON(!lrc_state_page)) { - ret = -ENODEV; + vaddr = i915_gem_object_pin_map(ctx_obj); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); goto unpin_ctx_obj; } - ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); + lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; + + ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); if (ret) - goto unpin_ctx_obj; + goto unpin_map; - ctx->engine[ring->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); - intel_lr_context_descriptor_update(ctx, ring); - lrc_reg_state = kmap(lrc_state_page); + ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); + intel_lr_context_descriptor_update(ctx, engine); lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; - ctx->engine[ring->id].lrc_reg_state = lrc_reg_state; + ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; ctx_obj->dirty = true; /* Invalidate GuC TLB. */ @@ -1114,6 +1126,8 @@ static int intel_lr_context_do_pin(struct intel_context *ctx, return ret; +unpin_map: + i915_gem_object_unpin_map(ctx_obj); unpin_ctx_obj: i915_gem_object_ggtt_unpin(ctx_obj); @@ -1146,7 +1160,7 @@ void intel_lr_context_unpin(struct intel_context *ctx, WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); if (--ctx->engine[engine->id].pin_count == 0) { - kunmap(kmap_to_page(ctx->engine[engine->id].lrc_reg_state)); + i915_gem_object_unpin_map(ctx_obj); intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf); i915_gem_object_ggtt_unpin(ctx_obj); ctx->engine[engine->id].lrc_vma = NULL; @@ -1160,16 +1174,16 @@ void intel_lr_context_unpin(struct intel_context *ctx, static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) { int ret, i; - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct i915_workarounds *w = &dev_priv->workarounds; if (w->count == 0) return 0; - ring->gpu_caches_dirty = true; + engine->gpu_caches_dirty = true; ret = logical_ring_flush_all_caches(req); if (ret) return ret; @@ -1187,7 +1201,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) intel_logical_ring_advance(ringbuf); - ring->gpu_caches_dirty = true; + engine->gpu_caches_dirty = true; ret = logical_ring_flush_all_caches(req); if (ret) return ret; @@ -1223,7 +1237,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) * This WA is also required for Gen9 so extracting as a function avoids * code duplication. */ -static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, +static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, uint32_t *const batch, uint32_t index) { @@ -1235,13 +1249,13 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, * this batch updates GEN8_L3SQCREG4 with default value we need to * set this bit here to retain the WA during flush. */ - if (IS_SKL_REVID(ring->dev, 0, SKL_REVID_E0)) + if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); + wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, 0); wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); @@ -1259,7 +1273,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT)); wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4); - wa_ctx_emit(batch, index, ring->scratch.gtt_offset + 256); + wa_ctx_emit(batch, index, engine->scratch.gtt_offset + 256); wa_ctx_emit(batch, index, 0); return index; @@ -1312,7 +1326,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx, * Return: non-zero if we exceed the PAGE_SIZE limit. */ -static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, +static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, uint32_t *const batch, uint32_t *offset) @@ -1324,8 +1338,8 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ - if (IS_BROADWELL(ring->dev)) { - int rc = gen8_emit_flush_coherentl3_wa(ring, batch, index); + if (IS_BROADWELL(engine->dev)) { + int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (rc < 0) return rc; index = rc; @@ -1333,7 +1347,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, /* WaClearSlmSpaceAtContextSwitch:bdw,chv */ /* Actual scratch location is at 128 bytes offset */ - scratch_addr = ring->scratch.gtt_offset + 2*CACHELINE_BYTES; + scratch_addr = engine->scratch.gtt_offset + 2*CACHELINE_BYTES; wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6)); wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 | @@ -1375,7 +1389,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *ring, * This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding * to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant. */ -static int gen8_init_perctx_bb(struct intel_engine_cs *ring, +static int gen8_init_perctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, uint32_t *const batch, uint32_t *offset) @@ -1390,13 +1404,13 @@ static int gen8_init_perctx_bb(struct intel_engine_cs *ring, return wa_ctx_end(wa_ctx, *offset = index, 1); } -static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, +static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, uint32_t *const batch, uint32_t *offset) { int ret; - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaDisableCtxRestoreArbitration:skl,bxt */ @@ -1405,7 +1419,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ - ret = gen8_emit_flush_coherentl3_wa(ring, batch, index); + ret = gen8_emit_flush_coherentl3_wa(engine, batch, index); if (ret < 0) return ret; index = ret; @@ -1417,12 +1431,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *ring, return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS); } -static int gen9_init_perctx_bb(struct intel_engine_cs *ring, +static int gen9_init_perctx_bb(struct intel_engine_cs *engine, struct i915_wa_ctx_bb *wa_ctx, uint32_t *const batch, uint32_t *offset) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ @@ -1435,6 +1449,25 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring, wa_ctx_emit(batch, index, MI_NOOP); } + /* WaClearTdlStateAckDirtyBits:bxt */ + if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { + wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); + + wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); + wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); + + wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1); + wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); + + wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2); + wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS)); + + wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2); + /* dummy write to CS, mask bits are 0 to ensure the register is not modified */ + wa_ctx_emit(batch, index, 0x0); + wa_ctx_emit(batch, index, MI_NOOP); + } + /* WaDisableCtxRestoreArbitration:skl,bxt */ if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || IS_BXT_REVID(dev, 0, BXT_REVID_A1)) @@ -1445,60 +1478,61 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *ring, return wa_ctx_end(wa_ctx, *offset = index, 1); } -static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *ring, u32 size) +static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size) { int ret; - ring->wa_ctx.obj = i915_gem_alloc_object(ring->dev, PAGE_ALIGN(size)); - if (!ring->wa_ctx.obj) { + engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, + PAGE_ALIGN(size)); + if (!engine->wa_ctx.obj) { DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); return -ENOMEM; } - ret = i915_gem_obj_ggtt_pin(ring->wa_ctx.obj, PAGE_SIZE, 0); + ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); if (ret) { DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n", ret); - drm_gem_object_unreference(&ring->wa_ctx.obj->base); + drm_gem_object_unreference(&engine->wa_ctx.obj->base); return ret; } return 0; } -static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *ring) +static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine) { - if (ring->wa_ctx.obj) { - i915_gem_object_ggtt_unpin(ring->wa_ctx.obj); - drm_gem_object_unreference(&ring->wa_ctx.obj->base); - ring->wa_ctx.obj = NULL; + if (engine->wa_ctx.obj) { + i915_gem_object_ggtt_unpin(engine->wa_ctx.obj); + drm_gem_object_unreference(&engine->wa_ctx.obj->base); + engine->wa_ctx.obj = NULL; } } -static int intel_init_workaround_bb(struct intel_engine_cs *ring) +static int intel_init_workaround_bb(struct intel_engine_cs *engine) { int ret; uint32_t *batch; uint32_t offset; struct page *page; - struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; + struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - WARN_ON(ring->id != RCS); + WARN_ON(engine->id != RCS); /* update this when WA for higher Gen are added */ - if (INTEL_INFO(ring->dev)->gen > 9) { + if (INTEL_INFO(engine->dev)->gen > 9) { DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", - INTEL_INFO(ring->dev)->gen); + INTEL_INFO(engine->dev)->gen); return 0; } /* some WA perform writes to scratch page, ensure it is valid */ - if (ring->scratch.obj == NULL) { - DRM_ERROR("scratch page not allocated for %s\n", ring->name); + if (engine->scratch.obj == NULL) { + DRM_ERROR("scratch page not allocated for %s\n", engine->name); return -EINVAL; } - ret = lrc_setup_wa_ctx_obj(ring, PAGE_SIZE); + ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE); if (ret) { DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret); return ret; @@ -1508,29 +1542,29 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring) batch = kmap_atomic(page); offset = 0; - if (INTEL_INFO(ring->dev)->gen == 8) { - ret = gen8_init_indirectctx_bb(ring, + if (INTEL_INFO(engine->dev)->gen == 8) { + ret = gen8_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, &offset); if (ret) goto out; - ret = gen8_init_perctx_bb(ring, + ret = gen8_init_perctx_bb(engine, &wa_ctx->per_ctx, batch, &offset); if (ret) goto out; - } else if (INTEL_INFO(ring->dev)->gen == 9) { - ret = gen9_init_indirectctx_bb(ring, + } else if (INTEL_INFO(engine->dev)->gen == 9) { + ret = gen9_init_indirectctx_bb(engine, &wa_ctx->indirect_ctx, batch, &offset); if (ret) goto out; - ret = gen9_init_perctx_bb(ring, + ret = gen9_init_perctx_bb(engine, &wa_ctx->per_ctx, batch, &offset); @@ -1541,27 +1575,36 @@ static int intel_init_workaround_bb(struct intel_engine_cs *ring) out: kunmap_atomic(batch); if (ret) - lrc_destroy_wa_ctx_obj(ring); + lrc_destroy_wa_ctx_obj(engine); return ret; } -static int gen8_init_common_ring(struct intel_engine_cs *ring) +static void lrc_init_hws(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_i915_private *dev_priv = engine->dev->dev_private; + + I915_WRITE(RING_HWS_PGA(engine->mmio_base), + (u32)engine->status_page.gfx_addr); + POSTING_READ(RING_HWS_PGA(engine->mmio_base)); +} + +static int gen8_init_common_ring(struct intel_engine_cs *engine) +{ + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - u8 next_context_status_buffer_hw; + unsigned int next_context_status_buffer_hw; - lrc_setup_hardware_status_page(ring, - dev_priv->kernel_context->engine[ring->id].state); + lrc_init_hws(engine); - I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); - I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff); - I915_WRITE(RING_MODE_GEN7(ring), + I915_WRITE(RING_MODE_GEN7(engine), _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) | _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)); - POSTING_READ(RING_MODE_GEN7(ring)); + POSTING_READ(RING_MODE_GEN7(engine)); /* * Instead of resetting the Context Status Buffer (CSB) read pointer to @@ -1576,7 +1619,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) * BXT | ? | ? | */ next_context_status_buffer_hw = - GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(ring))); + GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))); /* * When the CSB registers are reset (also after power-up / gpu reset), @@ -1586,21 +1629,21 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring) if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK) next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1); - ring->next_context_status_buffer = next_context_status_buffer_hw; - DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name); + engine->next_context_status_buffer = next_context_status_buffer_hw; + DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name); - memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); + intel_engine_init_hangcheck(engine); return 0; } -static int gen8_init_render_ring(struct intel_engine_cs *ring) +static int gen8_init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; int ret; - ret = gen8_init_common_ring(ring); + ret = gen8_init_common_ring(engine); if (ret) return ret; @@ -1614,24 +1657,24 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); - return init_workarounds_ring(ring); + return init_workarounds_ring(engine); } -static int gen9_init_render_ring(struct intel_engine_cs *ring) +static int gen9_init_render_ring(struct intel_engine_cs *engine) { int ret; - ret = gen8_init_common_ring(ring); + ret = gen8_init_common_ring(engine); if (ret) return ret; - return init_workarounds_ring(ring); + return init_workarounds_ring(engine); } static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) { struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt; - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; struct intel_ringbuffer *ringbuf = req->ringbuf; const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; int i, ret; @@ -1644,9 +1687,11 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req) for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_UDW(ring, i)); + intel_logical_ring_emit_reg(ringbuf, + GEN8_RING_PDP_UDW(engine, i)); intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr)); - intel_logical_ring_emit_reg(ringbuf, GEN8_RING_PDP_LDW(ring, i)); + intel_logical_ring_emit_reg(ringbuf, + GEN8_RING_PDP_LDW(engine, i)); intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr)); } @@ -1670,7 +1715,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, * not idle). PML4 is allocated during ppgtt init so this is * not needed in 48-bit.*/ if (req->ctx->ppgtt && - (intel_ring_flag(req->ring) & req->ctx->ppgtt->pd_dirty_rings)) { + (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { if (!USES_FULL_48BIT_PPGTT(req->i915) && !intel_vgpu_active(req->i915->dev)) { ret = intel_logical_ring_emit_pdps(req); @@ -1678,7 +1723,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return ret; } - req->ctx->ppgtt->pd_dirty_rings &= ~intel_ring_flag(req->ring); + req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); } ret = intel_logical_ring_begin(req, 4); @@ -1698,9 +1743,9 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req, return 0; } -static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) +static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1708,25 +1753,26 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); - POSTING_READ(RING_IMR(ring->mmio_base)); + if (engine->irq_refcount++ == 0) { + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + POSTING_READ(RING_IMR(engine->mmio_base)); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } -static void gen8_logical_ring_put_irq(struct intel_engine_cs *ring) +static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - I915_WRITE_IMR(ring, ~ring->irq_keep_mask); - POSTING_READ(RING_IMR(ring->mmio_base)); + if (--engine->irq_refcount == 0) { + I915_WRITE_IMR(engine, ~engine->irq_keep_mask); + POSTING_READ(RING_IMR(engine->mmio_base)); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1736,8 +1782,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 unused) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = ringbuf->engine; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t cmd; int ret; @@ -1757,7 +1803,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, if (invalidate_domains & I915_GEM_GPU_DOMAINS) { cmd |= MI_INVALIDATE_TLB; - if (ring == &dev_priv->ring[VCS]) + if (engine == &dev_priv->engine[VCS]) cmd |= MI_INVALIDATE_BSD; } @@ -1777,8 +1823,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, u32 flush_domains) { struct intel_ringbuffer *ringbuf = request->ringbuf; - struct intel_engine_cs *ring = ringbuf->ring; - u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_engine_cs *engine = ringbuf->engine; + u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; bool vf_flush_wa = false; u32 flags = 0; int ret; @@ -1806,7 +1852,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL * pipe control. */ - if (IS_GEN9(ring->dev)) + if (IS_GEN9(engine->dev)) vf_flush_wa = true; } @@ -1834,19 +1880,18 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, return 0; } -static u32 gen8_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +static u32 gen8_get_seqno(struct intel_engine_cs *engine) { - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + return intel_read_status_page(engine, I915_GEM_HWS_INDEX); } -static void gen8_set_seqno(struct intel_engine_cs *ring, u32 seqno) +static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno) { - intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); } -static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) { - /* * On BXT A steppings there is a HW coherency issue whereby the * MI_STORE_DATA_IMM storing the completed request's seqno @@ -1857,19 +1902,15 @@ static u32 bxt_a_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) * bxt_a_set_seqno(), where we also do a clflush after the write. So * this clflush in practice becomes an invalidate operation. */ - - if (!lazy_coherency) - intel_flush_status_page(ring, I915_GEM_HWS_INDEX); - - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + intel_flush_status_page(engine, I915_GEM_HWS_INDEX); } -static void bxt_a_set_seqno(struct intel_engine_cs *ring, u32 seqno) +static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno) { - intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); /* See bxt_a_get_seqno() explaining the reason for the clflush. */ - intel_flush_status_page(ring, I915_GEM_HWS_INDEX); + intel_flush_status_page(engine, I915_GEM_HWS_INDEX); } /* @@ -1899,7 +1940,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request) intel_logical_ring_emit(ringbuf, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); intel_logical_ring_emit(ringbuf, - hws_seqno_address(request->ring) | + hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); @@ -1913,23 +1954,29 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request) struct intel_ringbuffer *ringbuf = request->ringbuf; int ret; - ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); + ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); if (ret) return ret; + /* We're using qword write, seqno should be aligned to 8 bytes. */ + BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1); + /* w/a for post sync ops following a GPGPU operation we * need a prior CS_STALL, which is emitted by the flush * following the batch. */ - intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(5)); + intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); intel_logical_ring_emit(ringbuf, (PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE)); - intel_logical_ring_emit(ringbuf, hws_seqno_address(request->ring)); + intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); + /* We're thrashing one dword of HWS. */ + intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); + intel_logical_ring_emit(ringbuf, MI_NOOP); return intel_logical_ring_advance_and_submit(request); } @@ -1938,19 +1985,19 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req) struct render_state so; int ret; - ret = i915_gem_render_state_prepare(req->ring, &so); + ret = i915_gem_render_state_prepare(req->engine, &so); if (ret) return ret; if (so.rodata == NULL) return 0; - ret = req->ring->emit_bb_start(req, so.ggtt_offset, + ret = req->engine->emit_bb_start(req, so.ggtt_offset, I915_DISPATCH_SECURE); if (ret) goto out; - ret = req->ring->emit_bb_start(req, + ret = req->engine->emit_bb_start(req, (so.ggtt_offset + so.aux_batch_offset), I915_DISPATCH_SECURE); if (ret) @@ -1988,146 +2035,197 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req) * @ring: Engine Command Streamer. * */ -void intel_logical_ring_cleanup(struct intel_engine_cs *ring) +void intel_logical_ring_cleanup(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv; - if (!intel_ring_initialized(ring)) + if (!intel_engine_initialized(engine)) return; - dev_priv = ring->dev->dev_private; + /* + * Tasklet cannot be active at this point due intel_mark_active/idle + * so this is just for documentation. + */ + if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) + tasklet_kill(&engine->irq_tasklet); + + dev_priv = engine->dev->dev_private; - if (ring->buffer) { - intel_logical_ring_stop(ring); - WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + if (engine->buffer) { + intel_logical_ring_stop(engine); + WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); } - if (ring->cleanup) - ring->cleanup(ring); + if (engine->cleanup) + engine->cleanup(engine); - i915_cmd_parser_fini_ring(ring); - i915_gem_batch_pool_fini(&ring->batch_pool); + i915_cmd_parser_fini_ring(engine); + i915_gem_batch_pool_fini(&engine->batch_pool); - if (ring->status_page.obj) { - kunmap(sg_page(ring->status_page.obj->pages->sgl)); - ring->status_page.obj = NULL; + if (engine->status_page.obj) { + i915_gem_object_unpin_map(engine->status_page.obj); + engine->status_page.obj = NULL; } - ring->disable_lite_restore_wa = false; - ring->ctx_desc_template = 0; + engine->idle_lite_restore_wa = 0; + engine->disable_lite_restore_wa = false; + engine->ctx_desc_template = 0; - lrc_destroy_wa_ctx_obj(ring); - ring->dev = NULL; + lrc_destroy_wa_ctx_obj(engine); + engine->dev = NULL; } static void logical_ring_default_vfuncs(struct drm_device *dev, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { /* Default vfuncs which can be overriden by each engine. */ - ring->init_hw = gen8_init_common_ring; - ring->emit_request = gen8_emit_request; - ring->emit_flush = gen8_emit_flush; - ring->irq_get = gen8_logical_ring_get_irq; - ring->irq_put = gen8_logical_ring_put_irq; - ring->emit_bb_start = gen8_emit_bb_start; + engine->init_hw = gen8_init_common_ring; + engine->emit_request = gen8_emit_request; + engine->emit_flush = gen8_emit_flush; + engine->irq_get = gen8_logical_ring_get_irq; + engine->irq_put = gen8_logical_ring_put_irq; + engine->emit_bb_start = gen8_emit_bb_start; + engine->get_seqno = gen8_get_seqno; + engine->set_seqno = gen8_set_seqno; if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ring->get_seqno = bxt_a_get_seqno; - ring->set_seqno = bxt_a_set_seqno; - } else { - ring->get_seqno = gen8_get_seqno; - ring->set_seqno = gen8_set_seqno; + engine->irq_seqno_barrier = bxt_a_seqno_barrier; + engine->set_seqno = bxt_a_set_seqno; } } static inline void -logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift) +logical_ring_default_irqs(struct intel_engine_cs *engine, unsigned shift) +{ + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; + engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; +} + +static int +lrc_setup_hws(struct intel_engine_cs *engine, + struct drm_i915_gem_object *dctx_obj) { - ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; - ring->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; + void *hws; + + /* The HWSP is part of the default context object in LRC mode. */ + engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(dctx_obj) + + LRC_PPHWSP_PN * PAGE_SIZE; + hws = i915_gem_object_pin_map(dctx_obj); + if (IS_ERR(hws)) + return PTR_ERR(hws); + engine->status_page.page_addr = hws + LRC_PPHWSP_PN * PAGE_SIZE; + engine->status_page.obj = dctx_obj; + + return 0; } static int -logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) +logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) { - struct intel_context *dctx = to_i915(dev)->kernel_context; + struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_context *dctx = dev_priv->kernel_context; + enum forcewake_domains fw_domains; int ret; /* Intentionally left blank. */ - ring->buffer = NULL; + engine->buffer = NULL; + + engine->dev = dev; + INIT_LIST_HEAD(&engine->active_list); + INIT_LIST_HEAD(&engine->request_list); + i915_gem_batch_pool_init(dev, &engine->batch_pool); + init_waitqueue_head(&engine->irq_queue); + + INIT_LIST_HEAD(&engine->buffers); + INIT_LIST_HEAD(&engine->execlist_queue); + INIT_LIST_HEAD(&engine->execlist_retired_req_list); + spin_lock_init(&engine->execlist_lock); + + tasklet_init(&engine->irq_tasklet, + intel_lrc_irq_handler, (unsigned long)engine); - ring->dev = dev; - INIT_LIST_HEAD(&ring->active_list); - INIT_LIST_HEAD(&ring->request_list); - i915_gem_batch_pool_init(dev, &ring->batch_pool); - init_waitqueue_head(&ring->irq_queue); + logical_ring_init_platform_invariants(engine); - INIT_LIST_HEAD(&ring->buffers); - INIT_LIST_HEAD(&ring->execlist_queue); - INIT_LIST_HEAD(&ring->execlist_retired_req_list); - spin_lock_init(&ring->execlist_lock); + fw_domains = intel_uncore_forcewake_for_reg(dev_priv, + RING_ELSP(engine), + FW_REG_WRITE); - logical_ring_init_platform_invariants(ring); + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + RING_CONTEXT_STATUS_PTR(engine), + FW_REG_READ | FW_REG_WRITE); - ret = i915_cmd_parser_init_ring(ring); + fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, + RING_CONTEXT_STATUS_BUF_BASE(engine), + FW_REG_READ); + + engine->fw_domains = fw_domains; + + ret = i915_cmd_parser_init_ring(engine); if (ret) goto error; - ret = intel_lr_context_deferred_alloc(dctx, ring); + ret = intel_lr_context_deferred_alloc(dctx, engine); if (ret) goto error; /* As this is the default context, always pin it */ - ret = intel_lr_context_do_pin(dctx, ring); + ret = intel_lr_context_do_pin(dctx, engine); if (ret) { DRM_ERROR( "Failed to pin and map ringbuffer %s: %d\n", - ring->name, ret); + engine->name, ret); + goto error; + } + + /* And setup the hardware status page. */ + ret = lrc_setup_hws(engine, dctx->engine[engine->id].state); + if (ret) { + DRM_ERROR("Failed to set up hws %s: %d\n", engine->name, ret); goto error; } return 0; error: - intel_logical_ring_cleanup(ring); + intel_logical_ring_cleanup(engine); return ret; } static int logical_render_ring_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; - ring->name = "render ring"; - ring->id = RCS; - ring->exec_id = I915_EXEC_RENDER; - ring->guc_id = GUC_RENDER_ENGINE; - ring->mmio_base = RENDER_RING_BASE; + engine->name = "render ring"; + engine->id = RCS; + engine->exec_id = I915_EXEC_RENDER; + engine->guc_id = GUC_RENDER_ENGINE; + engine->mmio_base = RENDER_RING_BASE; - logical_ring_default_irqs(ring, GEN8_RCS_IRQ_SHIFT); + logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT); if (HAS_L3_DPF(dev)) - ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; + engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; - logical_ring_default_vfuncs(dev, ring); + logical_ring_default_vfuncs(dev, engine); /* Override some for render ring. */ if (INTEL_INFO(dev)->gen >= 9) - ring->init_hw = gen9_init_render_ring; + engine->init_hw = gen9_init_render_ring; else - ring->init_hw = gen8_init_render_ring; - ring->init_context = gen8_init_rcs_context; - ring->cleanup = intel_fini_pipe_control; - ring->emit_flush = gen8_emit_flush_render; - ring->emit_request = gen8_emit_request_render; + engine->init_hw = gen8_init_render_ring; + engine->init_context = gen8_init_rcs_context; + engine->cleanup = intel_fini_pipe_control; + engine->emit_flush = gen8_emit_flush_render; + engine->emit_request = gen8_emit_request_render; - ring->dev = dev; + engine->dev = dev; - ret = intel_init_pipe_control(ring); + ret = intel_init_pipe_control(engine); if (ret) return ret; - ret = intel_init_workaround_bb(ring); + ret = intel_init_workaround_bb(engine); if (ret) { /* * We continue even if we fail to initialize WA batch @@ -2138,9 +2236,9 @@ static int logical_render_ring_init(struct drm_device *dev) ret); } - ret = logical_ring_init(dev, ring); + ret = logical_ring_init(dev, engine); if (ret) { - lrc_destroy_wa_ctx_obj(ring); + lrc_destroy_wa_ctx_obj(engine); } return ret; @@ -2149,69 +2247,69 @@ static int logical_render_ring_init(struct drm_device *dev) static int logical_bsd_ring_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[VCS]; + struct intel_engine_cs *engine = &dev_priv->engine[VCS]; - ring->name = "bsd ring"; - ring->id = VCS; - ring->exec_id = I915_EXEC_BSD; - ring->guc_id = GUC_VIDEO_ENGINE; - ring->mmio_base = GEN6_BSD_RING_BASE; + engine->name = "bsd ring"; + engine->id = VCS; + engine->exec_id = I915_EXEC_BSD; + engine->guc_id = GUC_VIDEO_ENGINE; + engine->mmio_base = GEN6_BSD_RING_BASE; - logical_ring_default_irqs(ring, GEN8_VCS1_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, ring); + logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, engine); - return logical_ring_init(dev, ring); + return logical_ring_init(dev, engine); } static int logical_bsd2_ring_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; + struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; - ring->name = "bsd2 ring"; - ring->id = VCS2; - ring->exec_id = I915_EXEC_BSD; - ring->guc_id = GUC_VIDEO_ENGINE2; - ring->mmio_base = GEN8_BSD2_RING_BASE; + engine->name = "bsd2 ring"; + engine->id = VCS2; + engine->exec_id = I915_EXEC_BSD; + engine->guc_id = GUC_VIDEO_ENGINE2; + engine->mmio_base = GEN8_BSD2_RING_BASE; - logical_ring_default_irqs(ring, GEN8_VCS2_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, ring); + logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, engine); - return logical_ring_init(dev, ring); + return logical_ring_init(dev, engine); } static int logical_blt_ring_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[BCS]; + struct intel_engine_cs *engine = &dev_priv->engine[BCS]; - ring->name = "blitter ring"; - ring->id = BCS; - ring->exec_id = I915_EXEC_BLT; - ring->guc_id = GUC_BLITTER_ENGINE; - ring->mmio_base = BLT_RING_BASE; + engine->name = "blitter ring"; + engine->id = BCS; + engine->exec_id = I915_EXEC_BLT; + engine->guc_id = GUC_BLITTER_ENGINE; + engine->mmio_base = BLT_RING_BASE; - logical_ring_default_irqs(ring, GEN8_BCS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, ring); + logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, engine); - return logical_ring_init(dev, ring); + return logical_ring_init(dev, engine); } static int logical_vebox_ring_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[VECS]; + struct intel_engine_cs *engine = &dev_priv->engine[VECS]; - ring->name = "video enhancement ring"; - ring->id = VECS; - ring->exec_id = I915_EXEC_VEBOX; - ring->guc_id = GUC_VIDEOENHANCE_ENGINE; - ring->mmio_base = VEBOX_RING_BASE; + engine->name = "video enhancement ring"; + engine->id = VECS; + engine->exec_id = I915_EXEC_VEBOX; + engine->guc_id = GUC_VIDEOENHANCE_ENGINE; + engine->mmio_base = VEBOX_RING_BASE; - logical_ring_default_irqs(ring, GEN8_VECS_IRQ_SHIFT); - logical_ring_default_vfuncs(dev, ring); + logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); + logical_ring_default_vfuncs(dev, engine); - return logical_ring_init(dev, ring); + return logical_ring_init(dev, engine); } /** @@ -2219,7 +2317,7 @@ static int logical_vebox_ring_init(struct drm_device *dev) * @dev: DRM device. * * This function inits the engines for an Execlists submission style (the equivalent in the - * legacy ringbuffer submission world would be i915_gem_init_rings). It does it only for + * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for * those engines that are present in the hardware. * * Return: non-zero if the initialization failed. @@ -2260,13 +2358,13 @@ int intel_logical_rings_init(struct drm_device *dev) return 0; cleanup_vebox_ring: - intel_logical_ring_cleanup(&dev_priv->ring[VECS]); + intel_logical_ring_cleanup(&dev_priv->engine[VECS]); cleanup_blt_ring: - intel_logical_ring_cleanup(&dev_priv->ring[BCS]); + intel_logical_ring_cleanup(&dev_priv->engine[BCS]); cleanup_bsd_ring: - intel_logical_ring_cleanup(&dev_priv->ring[VCS]); + intel_logical_ring_cleanup(&dev_priv->engine[VCS]); cleanup_render_ring: - intel_logical_ring_cleanup(&dev_priv->ring[RCS]); + intel_logical_ring_cleanup(&dev_priv->engine[RCS]); return ret; } @@ -2314,13 +2412,13 @@ make_rpcs(struct drm_device *dev) return rpcs; } -static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring) +static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) { u32 indirect_ctx_offset; - switch (INTEL_INFO(ring->dev)->gen) { + switch (INTEL_INFO(engine->dev)->gen) { default: - MISSING_CASE(INTEL_INFO(ring->dev)->gen); + MISSING_CASE(INTEL_INFO(engine->dev)->gen); /* fall through */ case 9: indirect_ctx_offset = @@ -2336,14 +2434,16 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *ring) } static int -populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj, - struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf) +populate_lr_context(struct intel_context *ctx, + struct drm_i915_gem_object *ctx_obj, + struct intel_engine_cs *engine, + struct intel_ringbuffer *ringbuf) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; - struct page *page; - uint32_t *reg_state; + void *vaddr; + u32 *reg_state; int ret; if (!ppgtt) @@ -2355,18 +2455,17 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o return ret; } - ret = i915_gem_object_get_pages(ctx_obj); - if (ret) { - DRM_DEBUG_DRIVER("Could not get object pages\n"); + vaddr = i915_gem_object_pin_map(ctx_obj); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret); return ret; } - - i915_gem_object_pin_pages(ctx_obj); + ctx_obj->dirty = true; /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); + reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM * commands followed by (reg, value) pairs. The values we are setting here are @@ -2374,33 +2473,47 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o * recreate this batchbuffer with new values (including all the missing * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */ reg_state[CTX_LRI_HEADER_0] = - MI_LOAD_REGISTER_IMM(ring->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; - ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(ring), + MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED; + ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL, + RING_CONTEXT_CONTROL(engine), _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | (HAS_RESOURCE_STREAMER(dev) ? CTX_CTRL_RS_CTX_ENABLE : 0))); - ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), + 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base), + 0); /* Ring buffer start address is not known until the buffer is pinned. * It is written to the context image in execlists_update_context() */ - ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, RING_START(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, RING_CTL(ring->mmio_base), + ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START, + RING_START(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL, + RING_CTL(engine->mmio_base), ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); - ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, RING_BBADDR_UDW(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, RING_BBADDR(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, RING_BBSTATE(ring->mmio_base), + ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U, + RING_BBADDR_UDW(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L, + RING_BBADDR(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_BB_STATE, + RING_BBSTATE(engine->mmio_base), RING_BB_PPGTT); - ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, RING_SBBSTATE(ring->mmio_base), 0); - if (ring->id == RCS) { - ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(ring->mmio_base), 0); - ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, RING_INDIRECT_CTX_OFFSET(ring->mmio_base), 0); - if (ring->wa_ctx.obj) { - struct i915_ctx_workarounds *wa_ctx = &ring->wa_ctx; + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U, + RING_SBBADDR_UDW(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L, + RING_SBBADDR(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE, + RING_SBBSTATE(engine->mmio_base), 0); + if (engine->id == RCS) { + ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR, + RING_BB_PER_CTX_PTR(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX, + RING_INDIRECT_CTX(engine->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET, + RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0); + if (engine->wa_ctx.obj) { + struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; uint32_t ggtt_offset = i915_gem_obj_ggtt_offset(wa_ctx->obj); reg_state[CTX_RCS_INDIRECT_CTX+1] = @@ -2408,7 +2521,7 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS); reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] = - intel_lr_indirect_ctx_offset(ring) << 6; + intel_lr_indirect_ctx_offset(engine) << 6; reg_state[CTX_BB_PER_CTX_PTR+1] = (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) | @@ -2416,16 +2529,25 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o } } reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; - ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(ring->mmio_base), 0); + ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP, + RING_CTX_TIMESTAMP(engine->mmio_base), 0); /* PDP values well be assigned later if needed */ - ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(ring, 3), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(ring, 3), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(ring, 2), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(ring, 2), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(ring, 1), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(ring, 1), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(ring, 0), 0); - ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(ring, 0), 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), + 0); + ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), + 0); if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) { /* 64b PPGTT (48bit canonical) @@ -2439,20 +2561,16 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o * With dynamic page allocation, PDPs may not be allocated at * this point. Point the unallocated PDPs to the scratch page */ - ASSIGN_CTX_PDP(ppgtt, reg_state, 3); - ASSIGN_CTX_PDP(ppgtt, reg_state, 2); - ASSIGN_CTX_PDP(ppgtt, reg_state, 1); - ASSIGN_CTX_PDP(ppgtt, reg_state, 0); + execlists_update_context_pdps(ppgtt, reg_state); } - if (ring->id == RCS) { + if (engine->id == RCS) { reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, make_rpcs(dev)); } - kunmap_atomic(reg_state); - i915_gem_object_unpin_pages(ctx_obj); + i915_gem_object_unpin_map(ctx_obj); return 0; } @@ -2469,7 +2587,7 @@ void intel_lr_context_free(struct intel_context *ctx) { int i; - for (i = I915_NUM_RINGS; --i >= 0; ) { + for (i = I915_NUM_ENGINES; --i >= 0; ) { struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf; struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; @@ -2479,6 +2597,7 @@ void intel_lr_context_free(struct intel_context *ctx) if (ctx == ctx->i915->kernel_context) { intel_unpin_ringbuffer_obj(ringbuf); i915_gem_object_ggtt_unpin(ctx_obj); + i915_gem_object_unpin_map(ctx_obj); } WARN_ON(ctx->engine[i].pin_count); @@ -2501,15 +2620,15 @@ void intel_lr_context_free(struct intel_context *ctx) * in LRC mode, but does not include the "shared data page" used with * GuC submission. The caller should account for this if using the GuC. */ -uint32_t intel_lr_context_size(struct intel_engine_cs *ring) +uint32_t intel_lr_context_size(struct intel_engine_cs *engine) { int ret = 0; - WARN_ON(INTEL_INFO(ring->dev)->gen < 8); + WARN_ON(INTEL_INFO(engine->dev)->gen < 8); - switch (ring->id) { + switch (engine->id) { case RCS: - if (INTEL_INFO(ring->dev)->gen >= 9) + if (INTEL_INFO(engine->dev)->gen >= 9) ret = GEN9_LR_CONTEXT_RENDER_SIZE; else ret = GEN8_LR_CONTEXT_RENDER_SIZE; @@ -2525,24 +2644,6 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *ring) return ret; } -static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, - struct drm_i915_gem_object *default_ctx_obj) -{ - struct drm_i915_private *dev_priv = ring->dev->dev_private; - struct page *page; - - /* The HWSP is part of the default context object in LRC mode. */ - ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj) - + LRC_PPHWSP_PN * PAGE_SIZE; - page = i915_gem_object_get_page(default_ctx_obj, LRC_PPHWSP_PN); - ring->status_page.page_addr = kmap(page); - ring->status_page.obj = default_ctx_obj; - - I915_WRITE(RING_HWS_PGA(ring->mmio_base), - (u32)ring->status_page.gfx_addr); - POSTING_READ(RING_HWS_PGA(ring->mmio_base)); -} - /** * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context * @ctx: LR context to create. @@ -2558,18 +2659,18 @@ static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring, */ int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_gem_object *ctx_obj; uint32_t context_size; struct intel_ringbuffer *ringbuf; int ret; WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); - WARN_ON(ctx->engine[ring->id].state); + WARN_ON(ctx->engine[engine->id].state); - context_size = round_up(intel_lr_context_size(ring), 4096); + context_size = round_up(intel_lr_context_size(engine), 4096); /* One extra page as the sharing data between driver and GuC */ context_size += PAGE_SIZE * LRC_PPHWSP_PN; @@ -2580,32 +2681,32 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx, return -ENOMEM; } - ringbuf = intel_engine_create_ringbuffer(ring, 4 * PAGE_SIZE); + ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); if (IS_ERR(ringbuf)) { ret = PTR_ERR(ringbuf); goto error_deref_obj; } - ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); + ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf); if (ret) { DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); goto error_ringbuf; } - ctx->engine[ring->id].ringbuf = ringbuf; - ctx->engine[ring->id].state = ctx_obj; + ctx->engine[engine->id].ringbuf = ringbuf; + ctx->engine[engine->id].state = ctx_obj; - if (ctx != ctx->i915->kernel_context && ring->init_context) { + if (ctx != ctx->i915->kernel_context && engine->init_context) { struct drm_i915_gem_request *req; - req = i915_gem_request_alloc(ring, ctx); + req = i915_gem_request_alloc(engine, ctx); if (IS_ERR(req)) { ret = PTR_ERR(req); DRM_ERROR("ring create req: %d\n", ret); goto error_ringbuf; } - ret = ring->init_context(req); + ret = engine->init_context(req); if (ret) { DRM_ERROR("ring init context: %d\n", ret); @@ -2620,40 +2721,38 @@ error_ringbuf: intel_ringbuffer_free(ringbuf); error_deref_obj: drm_gem_object_unreference(&ctx_obj->base); - ctx->engine[ring->id].ringbuf = NULL; - ctx->engine[ring->id].state = NULL; + ctx->engine[engine->id].ringbuf = NULL; + ctx->engine[engine->id].state = NULL; return ret; } -void intel_lr_context_reset(struct drm_device *dev, - struct intel_context *ctx) +void intel_lr_context_reset(struct drm_i915_private *dev_priv, + struct intel_context *ctx) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; - int i; + struct intel_engine_cs *engine; - for_each_ring(ring, dev_priv, i) { + for_each_engine(engine, dev_priv) { struct drm_i915_gem_object *ctx_obj = - ctx->engine[ring->id].state; + ctx->engine[engine->id].state; struct intel_ringbuffer *ringbuf = - ctx->engine[ring->id].ringbuf; + ctx->engine[engine->id].ringbuf; + void *vaddr; uint32_t *reg_state; - struct page *page; if (!ctx_obj) continue; - if (i915_gem_object_get_pages(ctx_obj)) { - WARN(1, "Failed get_pages for context obj\n"); + vaddr = i915_gem_object_pin_map(ctx_obj); + if (WARN_ON(IS_ERR(vaddr))) continue; - } - page = i915_gem_object_get_dirty_page(ctx_obj, LRC_STATE_PN); - reg_state = kmap_atomic(page); + + reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; + ctx_obj->dirty = true; reg_state[CTX_RING_HEAD+1] = 0; reg_state[CTX_RING_TAIL+1] = 0; - kunmap_atomic(reg_state); + i915_gem_object_unpin_map(ctx_obj); ringbuf->head = 0; ringbuf->tail = 0; diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h index e6cda3e225d0..9affda2c650c 100644 --- a/drivers/gpu/drm/i915/intel_lrc.h +++ b/drivers/gpu/drm/i915/intel_lrc.h @@ -34,6 +34,7 @@ #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) +#define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370) #define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8) #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4) #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0) @@ -57,8 +58,8 @@ /* Logical Rings */ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); -void intel_logical_ring_stop(struct intel_engine_cs *ring); -void intel_logical_ring_cleanup(struct intel_engine_cs *ring); +void intel_logical_ring_stop(struct intel_engine_cs *engine); +void intel_logical_ring_cleanup(struct intel_engine_cs *engine); int intel_logical_rings_init(struct drm_device *dev); int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); @@ -98,18 +99,21 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf, #define LRC_STATE_PN (LRC_PPHWSP_PN + 1) void intel_lr_context_free(struct intel_context *ctx); -uint32_t intel_lr_context_size(struct intel_engine_cs *ring); +uint32_t intel_lr_context_size(struct intel_engine_cs *engine); int intel_lr_context_deferred_alloc(struct intel_context *ctx, - struct intel_engine_cs *ring); + struct intel_engine_cs *engine); void intel_lr_context_unpin(struct intel_context *ctx, struct intel_engine_cs *engine); -void intel_lr_context_reset(struct drm_device *dev, - struct intel_context *ctx); + +struct drm_i915_private; + +void intel_lr_context_reset(struct drm_i915_private *dev_priv, + struct intel_context *ctx); uint64_t intel_lr_context_descriptor(struct intel_context *ctx, - struct intel_engine_cs *ring); + struct intel_engine_cs *engine); u32 intel_execlists_ctx_id(struct intel_context *ctx, - struct intel_engine_cs *ring); + struct intel_engine_cs *engine); /* Execlists */ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); @@ -118,7 +122,6 @@ int intel_execlists_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, struct list_head *vmas); -void intel_lrc_irq_handler(struct intel_engine_cs *ring); -void intel_execlists_retire_requests(struct intel_engine_cs *ring); +void intel_execlists_retire_requests(struct intel_engine_cs *engine); #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 30a8403a8f4f..66e832beeb37 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -109,7 +109,6 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); u32 tmp, flags = 0; - int dotclock; tmp = I915_READ(lvds_encoder->reg); if (tmp & LVDS_HSYNC_POLARITY) @@ -130,12 +129,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; } - dotclock = pipe_config->port_clock; - - if (HAS_PCH_SPLIT(dev_priv->dev)) - ironlake_check_encoder_dotclock(pipe_config, dotclock); - - pipe_config->base.adjusted_mode.crtc_clock = dotclock; + pipe_config->base.adjusted_mode.crtc_clock = pipe_config->port_clock; } static void intel_pre_enable_lvds(struct intel_encoder *encoder) @@ -151,7 +145,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder) if (HAS_PCH_SPLIT(dev)) { assert_fdi_rx_pll_disabled(dev_priv, pipe); assert_shared_dpll_disabled(dev_priv, - intel_crtc_to_shared_dpll(crtc)); + crtc->config->shared_dpll); } else { assert_pll_disabled(dev_priv, pipe); } @@ -478,11 +472,8 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val, * and as part of the cleanup in the hw state restore we also redisable * the vga plane. */ - if (!HAS_PCH_SPLIT(dev)) { - drm_modeset_lock_all(dev); + if (!HAS_PCH_SPLIT(dev)) intel_display_resume(dev); - drm_modeset_unlock_all(dev); - } dev_priv->modeset_restore = MODESET_DONE; @@ -781,57 +772,6 @@ static const struct dmi_system_id intel_no_lvds[] = { { } /* terminating entry */ }; -/* - * Enumerate the child dev array parsed from VBT to check whether - * the LVDS is present. - * If it is present, return 1. - * If it is not present, return false. - * If no child dev is parsed from VBT, it assumes that the LVDS is present. - */ -static bool lvds_is_present_in_vbt(struct drm_device *dev, - u8 *i2c_pin) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - - if (!dev_priv->vbt.child_dev_num) - return true; - - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - union child_device_config *uchild = dev_priv->vbt.child_dev + i; - struct old_child_dev_config *child = &uchild->old; - - /* If the device type is not LFP, continue. - * We have to check both the new identifiers as well as the - * old for compatibility with some BIOSes. - */ - if (child->device_type != DEVICE_TYPE_INT_LFP && - child->device_type != DEVICE_TYPE_LFP) - continue; - - if (intel_gmbus_is_valid_pin(dev_priv, child->i2c_pin)) - *i2c_pin = child->i2c_pin; - - /* However, we cannot trust the BIOS writers to populate - * the VBT correctly. Since LVDS requires additional - * information from AIM blocks, a non-zero addin offset is - * a good indicator that the LVDS is actually present. - */ - if (child->addin_offset) - return true; - - /* But even then some BIOS writers perform some black magic - * and instantiate the device without reference to any - * additional data. Trust that if the VBT was written into - * the OpRegion then they have validated the LVDS's existence. - */ - if (dev_priv->opregion.vbt) - return true; - } - - return false; -} - static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) { DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); @@ -981,14 +921,14 @@ void intel_lvds_init(struct drm_device *dev) if (HAS_PCH_SPLIT(dev)) { if ((lvds & LVDS_DETECTED) == 0) return; - if (dev_priv->vbt.edp_support) { + if (dev_priv->vbt.edp.support) { DRM_DEBUG_KMS("disable LVDS for eDP support\n"); return; } } pin = GMBUS_PIN_PANEL; - if (!lvds_is_present_in_vbt(dev, &pin)) { + if (!intel_bios_is_lvds_present(dev_priv, &pin)) { if ((lvds & LVDS_PORT_EN) == 0) { DRM_DEBUG_KMS("LVDS is not present in VBT\n"); return; diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index fed7bea19cc9..7c7ac0aa192a 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c @@ -159,7 +159,7 @@ static bool get_mocs_settings(struct drm_device *dev, return result; } -static i915_reg_t mocs_register(enum intel_ring_id ring, int index) +static i915_reg_t mocs_register(enum intel_engine_id ring, int index) { switch (ring) { case RCS: @@ -191,7 +191,7 @@ static i915_reg_t mocs_register(enum intel_ring_id ring, int index) */ static int emit_mocs_control_table(struct drm_i915_gem_request *req, const struct drm_i915_mocs_table *table, - enum intel_ring_id ring) + enum intel_engine_id ring) { struct intel_ringbuffer *ringbuf = req->ringbuf; unsigned int index; @@ -322,14 +322,14 @@ int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req) struct drm_i915_mocs_table t; int ret; - if (get_mocs_settings(req->ring->dev, &t)) { + if (get_mocs_settings(req->engine->dev, &t)) { struct drm_i915_private *dev_priv = req->i915; - struct intel_engine_cs *ring; - enum intel_ring_id ring_id; + struct intel_engine_cs *engine; + enum intel_engine_id id; /* Program the control registers */ - for_each_ring(ring, dev_priv, ring_id) { - ret = emit_mocs_control_table(req, &t, ring_id); + for_each_engine_id(engine, dev_priv, id) { + ret = emit_mocs_control_table(req, &t, id); if (ret) return ret; } diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index c15718b4862a..2ae0314f2cf6 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c @@ -246,7 +246,6 @@ struct opregion_asle_ext { #define MAX_DSLP 1500 -#ifdef CONFIG_ACPI static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -905,9 +904,6 @@ static void swsci_setup(struct drm_device *dev) opregion->swsci_gbda_sub_functions, opregion->swsci_sbcb_sub_functions); } -#else /* CONFIG_ACPI */ -static inline void swsci_setup(struct drm_device *dev) {} -#endif /* CONFIG_ACPI */ static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) { @@ -950,9 +946,7 @@ int intel_opregion_setup(struct drm_device *dev) return -ENOTSUPP; } -#ifdef CONFIG_ACPI INIT_WORK(&opregion->asle_work, asle_work); -#endif base = memremap(asls, OPREGION_SIZE, MEMREMAP_WB); if (!base) @@ -1024,3 +1018,31 @@ err_out: memunmap(base); return err; } + +int +intel_opregion_get_panel_type(struct drm_device *dev) +{ + u32 panel_details; + int ret; + + ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); + if (ret) { + DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", + ret); + return ret; + } + + ret = (panel_details >> 8) & 0xff; + if (ret > 0x10) { + DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret); + return -EINVAL; + } + + /* fall back to VBT panel type? */ + if (ret == 0x0) { + DRM_DEBUG_KMS("No panel type in OpRegion\n"); + return -ENODEV; + } + + return ret - 1; +} diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 9168413fe204..6694e9230cd5 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -190,13 +190,14 @@ struct intel_overlay { static struct overlay_registers __iomem * intel_overlay_map_regs(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = overlay->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(overlay->dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_wc(dev_priv->gtt.mappable, + regs = io_mapping_map_wc(ggtt->mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); return regs; @@ -233,14 +234,14 @@ static int intel_overlay_on(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; int ret; WARN_ON(overlay->active); WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); - req = i915_gem_request_alloc(ring, NULL); + req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) return PTR_ERR(req); @@ -252,11 +253,11 @@ static int intel_overlay_on(struct intel_overlay *overlay) overlay->active = true; - intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON); - intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE); - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON); + intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE); + intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return intel_overlay_do_wait_request(overlay, req, NULL); } @@ -267,7 +268,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; u32 tmp; @@ -283,7 +284,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay, if (tmp & (1 << 17)) DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); - req = i915_gem_request_alloc(ring, NULL); + req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) return PTR_ERR(req); @@ -293,9 +294,9 @@ static int intel_overlay_continue(struct intel_overlay *overlay, return ret; } - intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(ring, flip_addr); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(engine, flip_addr); + intel_ring_advance(engine); WARN_ON(overlay->last_flip_req); i915_gem_request_assign(&overlay->last_flip_req, req); @@ -336,7 +337,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_request *req; u32 flip_addr = overlay->flip_addr; int ret; @@ -349,7 +350,7 @@ static int intel_overlay_off(struct intel_overlay *overlay) * of the hw. Do it in both cases */ flip_addr |= OFC_UPDATE; - req = i915_gem_request_alloc(ring, NULL); + req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) return PTR_ERR(req); @@ -360,22 +361,23 @@ static int intel_overlay_off(struct intel_overlay *overlay) } /* wait for overlay to go idle */ - intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); - intel_ring_emit(ring, flip_addr); - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); + intel_ring_emit(engine, flip_addr); + intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); /* turn overlay off */ if (IS_I830(dev)) { /* Workaround: Don't disable the overlay fully, since otherwise * it dies on the next OVERLAY_ON cmd. */ - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_NOOP); + intel_ring_emit(engine, MI_NOOP); } else { - intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); - intel_ring_emit(ring, flip_addr); - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF); + intel_ring_emit(engine, flip_addr); + intel_ring_emit(engine, + MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); } - intel_ring_advance(ring); + intel_ring_advance(engine); return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail); } @@ -408,7 +410,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) { struct drm_device *dev = overlay->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; int ret; WARN_ON(!mutex_is_locked(&dev->struct_mutex)); @@ -423,7 +425,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) /* synchronous slowpath */ struct drm_i915_gem_request *req; - req = i915_gem_request_alloc(ring, NULL); + req = i915_gem_request_alloc(engine, NULL); if (IS_ERR(req)) return PTR_ERR(req); @@ -433,9 +435,10 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) return ret; } - intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, + MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); ret = intel_overlay_do_wait_request(overlay, req, intel_overlay_release_old_vid_tail); @@ -1479,7 +1482,8 @@ struct intel_overlay_error_state { static struct overlay_registers __iomem * intel_overlay_map_regs_atomic(struct intel_overlay *overlay) { - struct drm_i915_private *dev_priv = overlay->dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(overlay->dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct overlay_registers __iomem *regs; if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) @@ -1488,7 +1492,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) regs = (struct overlay_registers __iomem *) overlay->reg_bo->phys_handle->vaddr; else - regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, + regs = io_mapping_map_atomic_wc(ggtt->mappable, i915_gem_obj_ggtt_offset(overlay->reg_bo)); return regs; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 21ee6477bf98..8c8996fcbaf5 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -1240,7 +1240,7 @@ static void intel_backlight_device_unregister(struct intel_connector *connector) */ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { - return KHz(19200) / pwm_freq_hz; + return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz); } /* @@ -1251,16 +1251,14 @@ static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - u32 mul, clock; + u32 mul; if (I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY) mul = 128; else mul = 16; - clock = MHz(24); - - return clock / (pwm_freq_hz * mul); + return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul); } /* @@ -1283,7 +1281,7 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) else clock = MHz(24); /* LPT:LP */ - return clock / (pwm_freq_hz * mul); + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); } /* @@ -1292,10 +1290,9 @@ static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) */ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { - struct drm_device *dev = connector->base.dev; - int clock = MHz(intel_pch_rawclk(dev)); + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - return clock / (pwm_freq_hz * 128); + return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128); } /* @@ -1308,16 +1305,15 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) */ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); int clock; - if (IS_PINEVIEW(dev)) - clock = MHz(intel_hrawclk(dev)); + if (IS_PINEVIEW(dev_priv)) + clock = KHz(dev_priv->rawclk_freq); else - clock = 1000 * dev_priv->cdclk_freq; + clock = KHz(dev_priv->cdclk_freq); - return clock / (pwm_freq_hz * 32); + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32); } /* @@ -1332,11 +1328,11 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) int clock; if (IS_G4X(dev_priv)) - clock = MHz(intel_hrawclk(dev)); + clock = KHz(dev_priv->rawclk_freq); else - clock = 1000 * dev_priv->cdclk_freq; + clock = KHz(dev_priv->cdclk_freq); - return clock / (pwm_freq_hz * 128); + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128); } /* @@ -1346,19 +1342,21 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) */ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) { - struct drm_device *dev = connector->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int clock; + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + int mul, clock; if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) { - if (IS_CHERRYVIEW(dev)) - return KHz(19200) / (pwm_freq_hz * 16); + if (IS_CHERRYVIEW(dev_priv)) + clock = KHz(19200); else - return MHz(25) / (pwm_freq_hz * 16); + clock = MHz(25); + mul = 16; } else { - clock = intel_hrawclk(dev); - return MHz(clock) / (pwm_freq_hz * 128); + clock = KHz(dev_priv->rawclk_freq); + mul = 128; } + + return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul); } static u32 get_backlight_max_vbt(struct intel_connector *connector) @@ -1745,7 +1743,7 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) panel->backlight.get = pch_get_backlight; panel->backlight.hz_to_pwm = pch_hz_to_pwm; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - if (dev_priv->vbt.has_mipi) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { panel->backlight.setup = pwm_setup_backlight; panel->backlight.enable = pwm_enable_backlight; panel->backlight.disable = pwm_disable_backlight; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 347d4df49a9b..b8e395ad05ac 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -487,20 +487,6 @@ static const struct intel_watermark_params g4x_cursor_wm_info = { .guard_size = 2, .cacheline_size = G4X_FIFO_LINE_SIZE, }; -static const struct intel_watermark_params valleyview_wm_info = { - .fifo_size = VALLEYVIEW_FIFO_SIZE, - .max_wm = VALLEYVIEW_MAX_WM, - .default_wm = VALLEYVIEW_MAX_WM, - .guard_size = 2, - .cacheline_size = G4X_FIFO_LINE_SIZE, -}; -static const struct intel_watermark_params valleyview_cursor_wm_info = { - .fifo_size = I965_CURSOR_FIFO, - .max_wm = VALLEYVIEW_CURSOR_MAX_WM, - .default_wm = I965_CURSOR_DFT_WM, - .guard_size = 2, - .cacheline_size = G4X_FIFO_LINE_SIZE, -}; static const struct intel_watermark_params i965_cursor_wm_info = { .fifo_size = I965_CURSOR_FIFO, .max_wm = I965_CURSOR_MAX_WM, @@ -2010,11 +1996,18 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, cur_latency *= 5; } - result->pri_val = ilk_compute_pri_wm(cstate, pristate, - pri_latency, level); - result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); - result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); - result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); + if (pristate) { + result->pri_val = ilk_compute_pri_wm(cstate, pristate, + pri_latency, level); + result->fbc_val = ilk_compute_fbc_wm(cstate, pristate, result->pri_val); + } + + if (sprstate) + result->spr_val = ilk_compute_spr_wm(cstate, sprstate, spr_latency); + + if (curstate) + result->cur_val = ilk_compute_cur_wm(cstate, curstate, cur_latency); + result->enable = true; } @@ -2278,96 +2271,167 @@ static void skl_setup_wm_latency(struct drm_device *dev) intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); } +static bool ilk_validate_pipe_wm(struct drm_device *dev, + struct intel_pipe_wm *pipe_wm) +{ + /* LP0 watermark maximums depend on this pipe alone */ + const struct intel_wm_config config = { + .num_pipes_active = 1, + .sprites_enabled = pipe_wm->sprites_enabled, + .sprites_scaled = pipe_wm->sprites_scaled, + }; + struct ilk_wm_maximums max; + + /* LP0 watermarks always use 1/2 DDB partitioning */ + ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); + + /* At least LP0 must be valid */ + if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) { + DRM_DEBUG_KMS("LP0 watermark invalid\n"); + return false; + } + + return true; +} + /* Compute new watermarks for the pipe */ -static int ilk_compute_pipe_wm(struct intel_crtc *intel_crtc, - struct drm_atomic_state *state) +static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate) { + struct drm_atomic_state *state = cstate->base.state; + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); struct intel_pipe_wm *pipe_wm; - struct drm_device *dev = intel_crtc->base.dev; + struct drm_device *dev = state->dev; const struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc_state *cstate = NULL; struct intel_plane *intel_plane; - struct drm_plane_state *ps; struct intel_plane_state *pristate = NULL; struct intel_plane_state *sprstate = NULL; struct intel_plane_state *curstate = NULL; - int level, max_level = ilk_wm_max_level(dev); - /* LP0 watermark maximums depend on this pipe alone */ - struct intel_wm_config config = { - .num_pipes_active = 1, - }; + int level, max_level = ilk_wm_max_level(dev), usable_level; struct ilk_wm_maximums max; - cstate = intel_atomic_get_crtc_state(state, intel_crtc); - if (IS_ERR(cstate)) - return PTR_ERR(cstate); - pipe_wm = &cstate->wm.optimal.ilk; - memset(pipe_wm, 0, sizeof(*pipe_wm)); for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { - ps = drm_atomic_get_plane_state(state, - &intel_plane->base); - if (IS_ERR(ps)) - return PTR_ERR(ps); + struct intel_plane_state *ps; + + ps = intel_atomic_get_existing_plane_state(state, + intel_plane); + if (!ps) + continue; if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY) - pristate = to_intel_plane_state(ps); + pristate = ps; else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) - sprstate = to_intel_plane_state(ps); + sprstate = ps; else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) - curstate = to_intel_plane_state(ps); + curstate = ps; } - config.sprites_enabled = sprstate->visible; - config.sprites_scaled = sprstate->visible && - (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || - drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); - pipe_wm->pipe_enabled = cstate->base.active; - pipe_wm->sprites_enabled = config.sprites_enabled; - pipe_wm->sprites_scaled = config.sprites_scaled; + if (sprstate) { + pipe_wm->sprites_enabled = sprstate->visible; + pipe_wm->sprites_scaled = sprstate->visible && + (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || + drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); + } + + usable_level = max_level; /* ILK/SNB: LP2+ watermarks only w/o sprites */ - if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible) - max_level = 1; + if (INTEL_INFO(dev)->gen <= 6 && pipe_wm->sprites_enabled) + usable_level = 1; /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ - if (config.sprites_scaled) - max_level = 0; + if (pipe_wm->sprites_scaled) + usable_level = 0; ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, - pristate, sprstate, curstate, &pipe_wm->wm[0]); + pristate, sprstate, curstate, &pipe_wm->raw_wm[0]); + + memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm)); + pipe_wm->wm[0] = pipe_wm->raw_wm[0]; if (IS_HASWELL(dev) || IS_BROADWELL(dev)) pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); - /* LP0 watermarks always use 1/2 DDB partitioning */ - ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max); - - /* At least LP0 must be valid */ - if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0])) + if (!ilk_validate_pipe_wm(dev, pipe_wm)) return -EINVAL; ilk_compute_wm_reg_maximums(dev, 1, &max); for (level = 1; level <= max_level; level++) { - struct intel_wm_level wm = {}; + struct intel_wm_level *wm = &pipe_wm->raw_wm[level]; ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, - pristate, sprstate, curstate, &wm); + pristate, sprstate, curstate, wm); /* * Disable any watermark level that exceeds the * register maximums since such watermarks are * always invalid. */ - if (!ilk_validate_wm_level(level, &max, &wm)) - break; + if (level > usable_level) + continue; + + if (ilk_validate_wm_level(level, &max, wm)) + pipe_wm->wm[level] = *wm; + else + usable_level = level; + } + + return 0; +} + +/* + * Build a set of 'intermediate' watermark values that satisfy both the old + * state and the new state. These can be programmed to the hardware + * immediately. + */ +static int ilk_compute_intermediate_wm(struct drm_device *dev, + struct intel_crtc *intel_crtc, + struct intel_crtc_state *newstate) +{ + struct intel_pipe_wm *a = &newstate->wm.intermediate; + struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; + int level, max_level = ilk_wm_max_level(dev); + + /* + * Start with the final, target watermarks, then combine with the + * currently active watermarks to get values that are safe both before + * and after the vblank. + */ + *a = newstate->wm.optimal.ilk; + a->pipe_enabled |= b->pipe_enabled; + a->sprites_enabled |= b->sprites_enabled; + a->sprites_scaled |= b->sprites_scaled; + + for (level = 0; level <= max_level; level++) { + struct intel_wm_level *a_wm = &a->wm[level]; + const struct intel_wm_level *b_wm = &b->wm[level]; - pipe_wm->wm[level] = wm; + a_wm->enable &= b_wm->enable; + a_wm->pri_val = max(a_wm->pri_val, b_wm->pri_val); + a_wm->spr_val = max(a_wm->spr_val, b_wm->spr_val); + a_wm->cur_val = max(a_wm->cur_val, b_wm->cur_val); + a_wm->fbc_val = max(a_wm->fbc_val, b_wm->fbc_val); } + /* + * We need to make sure that these merged watermark values are + * actually a valid configuration themselves. If they're not, + * there's no safe way to transition from the old state to + * the new state, so we need to fail the atomic transaction. + */ + if (!ilk_validate_pipe_wm(dev, a)) + return -EINVAL; + + /* + * If our intermediate WM are identical to the final WM, then we can + * omit the post-vblank programming; only update if it's different. + */ + if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) + newstate->wm.need_postvbl_update = false; + return 0; } @@ -2383,9 +2447,7 @@ static void ilk_merge_wm_level(struct drm_device *dev, ret_wm->enable = true; for_each_intel_crtc(dev, intel_crtc) { - const struct intel_crtc_state *cstate = - to_intel_crtc_state(intel_crtc->base.state); - const struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; + const struct intel_pipe_wm *active = &intel_crtc->wm.active.ilk; const struct intel_wm_level *wm = &active->wm[level]; if (!active->pipe_enabled) @@ -2421,7 +2483,7 @@ static void ilk_wm_merge(struct drm_device *dev, /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */ if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) && config->num_pipes_active > 1) - return; + last_enabled_level = 0; /* ILK: FBC WM must be disabled always */ merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6; @@ -2533,15 +2595,14 @@ static void ilk_compute_wm_results(struct drm_device *dev, /* LP0 register values */ for_each_intel_crtc(dev, intel_crtc) { - const struct intel_crtc_state *cstate = - to_intel_crtc_state(intel_crtc->base.state); enum pipe pipe = intel_crtc->pipe; - const struct intel_wm_level *r = &cstate->wm.optimal.ilk.wm[0]; + const struct intel_wm_level *r = + &intel_crtc->wm.active.ilk.wm[0]; if (WARN_ON(!r->enable)) continue; - results->wm_linetime[pipe] = cstate->wm.optimal.ilk.linetime; + results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime; results->wm_pipe[pipe] = (r->pri_val << WM0_PIPE_PLANE_SHIFT) | @@ -2748,7 +2809,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, dev_priv->wm.hw = *results; } -static bool ilk_disable_lp_wm(struct drm_device *dev) +bool ilk_disable_lp_wm(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -2876,25 +2937,28 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, const struct drm_plane_state *pstate, int y) { - struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); + struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); struct drm_framebuffer *fb = pstate->fb; + uint32_t width = 0, height = 0; + + width = drm_rect_width(&intel_pstate->src) >> 16; + height = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(pstate->rotation)) + swap(width, height); /* for planar format */ if (fb->pixel_format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ - return intel_crtc->config->pipe_src_w * - intel_crtc->config->pipe_src_h * + return width * height * drm_format_plane_cpp(fb->pixel_format, 0); else /* uv-plane data rate */ - return (intel_crtc->config->pipe_src_w/2) * - (intel_crtc->config->pipe_src_h/2) * + return (width / 2) * (height / 2) * drm_format_plane_cpp(fb->pixel_format, 1); } /* for packed formats */ - return intel_crtc->config->pipe_src_w * - intel_crtc->config->pipe_src_h * - drm_format_plane_cpp(fb->pixel_format, 0); + return width * height * drm_format_plane_cpp(fb->pixel_format, 0); } /* @@ -2973,8 +3037,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, struct drm_framebuffer *fb = plane->state->fb; int id = skl_wm_plane_id(intel_plane); - if (fb == NULL) + if (!to_intel_plane_state(plane->state)->visible) continue; + if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -3000,7 +3065,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, uint16_t plane_blocks, y_plane_blocks = 0; int id = skl_wm_plane_id(intel_plane); - if (pstate->fb == NULL) + if (!to_intel_plane_state(pstate)->visible) continue; if (plane->type == DRM_PLANE_TYPE_CURSOR) continue; @@ -3123,26 +3188,36 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, { struct drm_plane *plane = &intel_plane->base; struct drm_framebuffer *fb = plane->state->fb; + struct intel_plane_state *intel_pstate = + to_intel_plane_state(plane->state); uint32_t latency = dev_priv->wm.skl_latency[level]; uint32_t method1, method2; uint32_t plane_bytes_per_line, plane_blocks_per_line; uint32_t res_blocks, res_lines; uint32_t selected_result; uint8_t cpp; + uint32_t width = 0, height = 0; - if (latency == 0 || !cstate->base.active || !fb) + if (latency == 0 || !cstate->base.active || !intel_pstate->visible) return false; + width = drm_rect_width(&intel_pstate->src) >> 16; + height = drm_rect_height(&intel_pstate->src) >> 16; + + if (intel_rotation_90_or_270(plane->state->rotation)) + swap(width, height); + cpp = drm_format_plane_cpp(fb->pixel_format, 0); method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), cpp, latency); method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, - cstate->pipe_src_w, - cpp, fb->modifier[0], + width, + cpp, + fb->modifier[0], latency); - plane_bytes_per_line = cstate->pipe_src_w * cpp; + plane_bytes_per_line = width * cpp; plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || @@ -3643,11 +3718,9 @@ static void ilk_compute_wm_config(struct drm_device *dev, } } -static void ilk_program_watermarks(struct intel_crtc_state *cstate) +static void ilk_program_watermarks(struct drm_i915_private *dev_priv) { - struct drm_crtc *crtc = cstate->base.crtc; - struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_device *dev = dev_priv->dev; struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; struct ilk_wm_maximums max; struct intel_wm_config config = {}; @@ -3678,28 +3751,28 @@ static void ilk_program_watermarks(struct intel_crtc_state *cstate) ilk_write_wm_values(dev_priv, &results); } -static void ilk_update_wm(struct drm_crtc *crtc) +static void ilk_initial_watermarks(struct intel_crtc_state *cstate) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); - - WARN_ON(cstate->base.active != intel_crtc->active); + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - /* - * IVB workaround: must disable low power watermarks for at least - * one frame before enabling scaling. LP watermarks can be re-enabled - * when scaling is disabled. - * - * WaCxSRDisabledForSpriteScaling:ivb - */ - if (cstate->disable_lp_wm) { - ilk_disable_lp_wm(crtc->dev); - intel_wait_for_vblank(crtc->dev, intel_crtc->pipe); - } + mutex_lock(&dev_priv->wm.wm_mutex); + intel_crtc->wm.active.ilk = cstate->wm.intermediate; + ilk_program_watermarks(dev_priv); + mutex_unlock(&dev_priv->wm.wm_mutex); +} - intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; +static void ilk_optimize_watermarks(struct intel_crtc_state *cstate) +{ + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); - ilk_program_watermarks(cstate); + mutex_lock(&dev_priv->wm.wm_mutex); + if (cstate->wm.need_postvbl_update) { + intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; + ilk_program_watermarks(dev_priv); + } + mutex_unlock(&dev_priv->wm.wm_mutex); } static void skl_pipe_wm_active_state(uint32_t val, @@ -4229,7 +4302,7 @@ static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) * the hw runs at the minimal clock before selecting the desired * frequency, if the down threshold expires in that window we will not * receive a down interrupt. */ - if (IS_GEN9(dev_priv->dev)) { + if (IS_GEN9(dev_priv)) { limits = (dev_priv->rps.max_freq_softlimit) << 23; if (val <= dev_priv->rps.min_freq_softlimit) limits |= (dev_priv->rps.min_freq_softlimit) << 14; @@ -4571,7 +4644,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; bool enable_rc6 = true; unsigned long rc6_ctx_base; @@ -4585,9 +4659,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) * for this check. */ rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; - if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) && - (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base + - dev_priv->gtt.stolen_reserved_size))) { + if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && + (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + + ggtt->stolen_reserved_size))) { DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); enable_rc6 = false; } @@ -4748,7 +4822,7 @@ static void gen9_enable_rps(struct drm_device *dev) * Up/Down EI & threshold registers, as well as the RP_CONTROL, * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -4756,9 +4830,8 @@ static void gen9_enable_rps(struct drm_device *dev) static void gen9_enable_rc6(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; uint32_t rc6_mask = 0; - int unused; /* 1a: Software RC state - RC0 */ I915_WRITE(GEN6_RC_STATE, 0); @@ -4779,8 +4852,8 @@ static void gen9_enable_rc6(struct drm_device *dev) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_ring(ring, dev_priv, unused) - I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); if (HAS_GUC_UCODE(dev)) I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); @@ -4826,9 +4899,8 @@ static void gen9_enable_rc6(struct drm_device *dev) static void gen8_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; uint32_t rc6_mask = 0; - int unused; /* 1a: Software RC state - RC0 */ I915_WRITE(GEN6_RC_STATE, 0); @@ -4847,8 +4919,8 @@ static void gen8_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_ring(ring, dev_priv, unused) - I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); if (IS_BROADWELL(dev)) I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ @@ -4908,11 +4980,11 @@ static void gen8_enable_rps(struct drm_device *dev) static void gen6_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; u32 gtfifodbg; int rc6_mode; - int i, ret; + int ret; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -4944,8 +5016,8 @@ static void gen6_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_ring(ring, dev_priv, i) - I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); @@ -5230,9 +5302,9 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv) static void cherryview_setup_pctx(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; unsigned long pctx_paddr, paddr; - struct i915_gtt *gtt = &dev_priv->gtt; u32 pcbr; int pctx_size = 32*1024; @@ -5240,7 +5312,7 @@ static void cherryview_setup_pctx(struct drm_device *dev) if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); paddr = (dev_priv->mm.stolen_base + - (gtt->stolen_size - pctx_size)); + (ggtt->stolen_size - pctx_size)); pctx_paddr = (paddr & (~4095)); I915_WRITE(VLV_PCBR, pctx_paddr); @@ -5308,6 +5380,17 @@ static void valleyview_cleanup_pctx(struct drm_device *dev) dev_priv->vlv_pctx = NULL; } +static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) +{ + dev_priv->rps.gpll_ref_freq = + vlv_get_cck_clock(dev_priv, "GPLL ref", + CCK_GPLL_CLOCK_CONTROL, + dev_priv->czclk_freq); + + DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", + dev_priv->rps.gpll_ref_freq); +} + static void valleyview_init_gt_powersave(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5315,6 +5398,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) valleyview_setup_pctx(dev); + vlv_init_gpll_ref_freq(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); @@ -5372,6 +5457,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) cherryview_setup_pctx(dev); + vlv_init_gpll_ref_freq(dev_priv); + mutex_lock(&dev_priv->rps.hw_lock); mutex_lock(&dev_priv->sb_lock); @@ -5436,9 +5523,8 @@ static void valleyview_cleanup_gt_powersave(struct drm_device *dev) static void cherryview_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0, pcbr; - int i; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5463,8 +5549,8 @@ static void cherryview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_ring(ring, dev_priv, i) - I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC_SLEEP, 0); /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ @@ -5523,10 +5609,10 @@ static void cherryview_enable_rps(struct drm_device *dev) dev_priv->rps.cur_freq); DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", - intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), - dev_priv->rps.efficient_freq); + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), + dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); + valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5534,9 +5620,8 @@ static void cherryview_enable_rps(struct drm_device *dev) static void valleyview_enable_rps(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; u32 gtfifodbg, val, rc6_mode = 0; - int i; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); @@ -5574,8 +5659,8 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - for_each_ring(ring, dev_priv, i) - I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10); + for_each_engine(engine, dev_priv) + I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); @@ -5613,10 +5698,10 @@ static void valleyview_enable_rps(struct drm_device *dev) dev_priv->rps.cur_freq); DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n", - intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq), - dev_priv->rps.efficient_freq); + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), + dev_priv->rps.idle_freq); - valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); + valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -5951,17 +6036,16 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower); bool i915_gpu_busy(void) { struct drm_i915_private *dev_priv; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; bool ret = false; - int i; spin_lock_irq(&mchdev_lock); if (!i915_mch_dev) goto out_unlock; dev_priv = i915_mch_dev; - for_each_ring(ring, dev_priv, i) - ret |= !list_empty(&ring->request_list); + for_each_engine(engine, dev_priv) + ret |= !list_empty(&engine->request_list); out_unlock: spin_unlock_irq(&mchdev_lock); @@ -6798,23 +6882,10 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) gen6_check_mch_setup(dev); } -static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) -{ - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); - - /* - * Disable trickle feed and enable pnd deadline calculation - */ - I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); - I915_WRITE(CBR1_VLV, 0); -} - static void valleyview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - vlv_init_display_clock_gating(dev_priv); - /* WaDisableEarlyCull:vlv */ I915_WRITE(_3D_CHICKEN3, _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL)); @@ -6897,8 +6968,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - vlv_init_display_clock_gating(dev_priv); - /* WaVSRefCountFullforceMissDisable:chv */ /* WaDSRefCountFullforceMissDisable:chv */ I915_WRITE(GEN7_FF_THREAD_MODE, @@ -7038,8 +7107,7 @@ void intel_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->display.init_clock_gating) - dev_priv->display.init_clock_gating(dev); + dev_priv->display.init_clock_gating(dev); } void intel_suspend_hw(struct drm_device *dev) @@ -7048,6 +7116,60 @@ void intel_suspend_hw(struct drm_device *dev) lpt_suspend_hw(dev); } +static void nop_init_clock_gating(struct drm_device *dev) +{ + DRM_DEBUG_KMS("No clock gating settings or workarounds applied.\n"); +} + +/** + * intel_init_clock_gating_hooks - setup the clock gating hooks + * @dev_priv: device private + * + * Setup the hooks that configure which clocks of a given platform can be + * gated and also apply various GT and display specific workarounds for these + * platforms. Note that some GT specific workarounds are applied separately + * when GPU contexts or batchbuffers start their execution. + */ +void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) +{ + if (IS_SKYLAKE(dev_priv)) + dev_priv->display.init_clock_gating = nop_init_clock_gating; + else if (IS_KABYLAKE(dev_priv)) + dev_priv->display.init_clock_gating = nop_init_clock_gating; + else if (IS_BROXTON(dev_priv)) + dev_priv->display.init_clock_gating = bxt_init_clock_gating; + else if (IS_BROADWELL(dev_priv)) + dev_priv->display.init_clock_gating = broadwell_init_clock_gating; + else if (IS_CHERRYVIEW(dev_priv)) + dev_priv->display.init_clock_gating = cherryview_init_clock_gating; + else if (IS_HASWELL(dev_priv)) + dev_priv->display.init_clock_gating = haswell_init_clock_gating; + else if (IS_IVYBRIDGE(dev_priv)) + dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; + else if (IS_VALLEYVIEW(dev_priv)) + dev_priv->display.init_clock_gating = valleyview_init_clock_gating; + else if (IS_GEN6(dev_priv)) + dev_priv->display.init_clock_gating = gen6_init_clock_gating; + else if (IS_GEN5(dev_priv)) + dev_priv->display.init_clock_gating = ironlake_init_clock_gating; + else if (IS_G4X(dev_priv)) + dev_priv->display.init_clock_gating = g4x_init_clock_gating; + else if (IS_CRESTLINE(dev_priv)) + dev_priv->display.init_clock_gating = crestline_init_clock_gating; + else if (IS_BROADWATER(dev_priv)) + dev_priv->display.init_clock_gating = broadwater_init_clock_gating; + else if (IS_GEN3(dev_priv)) + dev_priv->display.init_clock_gating = gen3_init_clock_gating; + else if (IS_I85X(dev_priv) || IS_I865G(dev_priv)) + dev_priv->display.init_clock_gating = i85x_init_clock_gating; + else if (IS_GEN2(dev_priv)) + dev_priv->display.init_clock_gating = i830_init_clock_gating; + else { + MISSING_CASE(INTEL_DEVID(dev_priv)); + dev_priv->display.init_clock_gating = nop_init_clock_gating; + } +} + /* Set up chip specific power management-related functions */ void intel_init_pm(struct drm_device *dev) { @@ -7064,10 +7186,6 @@ void intel_init_pm(struct drm_device *dev) /* For FIFO watermark updates */ if (INTEL_INFO(dev)->gen >= 9) { skl_setup_wm_latency(dev); - - if (IS_BROXTON(dev)) - dev_priv->display.init_clock_gating = - bxt_init_clock_gating; dev_priv->display.update_wm = skl_update_wm; } else if (HAS_PCH_SPLIT(dev)) { ilk_setup_wm_latency(dev); @@ -7076,36 +7194,23 @@ void intel_init_pm(struct drm_device *dev) dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) || (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { - dev_priv->display.update_wm = ilk_update_wm; dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm; - dev_priv->display.program_watermarks = ilk_program_watermarks; + dev_priv->display.compute_intermediate_wm = + ilk_compute_intermediate_wm; + dev_priv->display.initial_watermarks = + ilk_initial_watermarks; + dev_priv->display.optimize_watermarks = + ilk_optimize_watermarks; } else { DRM_DEBUG_KMS("Failed to read display plane latency. " "Disable CxSR\n"); } - - if (IS_GEN5(dev)) - dev_priv->display.init_clock_gating = ironlake_init_clock_gating; - else if (IS_GEN6(dev)) - dev_priv->display.init_clock_gating = gen6_init_clock_gating; - else if (IS_IVYBRIDGE(dev)) - dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; - else if (IS_HASWELL(dev)) - dev_priv->display.init_clock_gating = haswell_init_clock_gating; - else if (INTEL_INFO(dev)->gen == 8) - dev_priv->display.init_clock_gating = broadwell_init_clock_gating; } else if (IS_CHERRYVIEW(dev)) { vlv_setup_wm_latency(dev); - dev_priv->display.update_wm = vlv_update_wm; - dev_priv->display.init_clock_gating = - cherryview_init_clock_gating; } else if (IS_VALLEYVIEW(dev)) { vlv_setup_wm_latency(dev); - dev_priv->display.update_wm = vlv_update_wm; - dev_priv->display.init_clock_gating = - valleyview_init_clock_gating; } else if (IS_PINEVIEW(dev)) { if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, @@ -7121,20 +7226,13 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = NULL; } else dev_priv->display.update_wm = pineview_update_wm; - dev_priv->display.init_clock_gating = gen3_init_clock_gating; } else if (IS_G4X(dev)) { dev_priv->display.update_wm = g4x_update_wm; - dev_priv->display.init_clock_gating = g4x_init_clock_gating; } else if (IS_GEN4(dev)) { dev_priv->display.update_wm = i965_update_wm; - if (IS_CRESTLINE(dev)) - dev_priv->display.init_clock_gating = crestline_init_clock_gating; - else if (IS_BROADWATER(dev)) - dev_priv->display.init_clock_gating = broadwater_init_clock_gating; } else if (IS_GEN3(dev)) { dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; - dev_priv->display.init_clock_gating = gen3_init_clock_gating; } else if (IS_GEN2(dev)) { if (INTEL_INFO(dev)->num_pipes == 1) { dev_priv->display.update_wm = i845_update_wm; @@ -7143,11 +7241,6 @@ void intel_init_pm(struct drm_device *dev) dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i830_get_fifo_size; } - - if (IS_I85X(dev) || IS_I865G(dev)) - dev_priv->display.init_clock_gating = i85x_init_clock_gating; - else - dev_priv->display.init_clock_gating = i830_init_clock_gating; } else { DRM_ERROR("unexpected fall-through in intel_init_pm\n"); } @@ -7201,78 +7294,43 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val return 0; } -static int vlv_gpu_freq_div(unsigned int czclk_freq) -{ - switch (czclk_freq) { - case 200: - return 10; - case 267: - return 12; - case 320: - case 333: - return 16; - case 400: - return 20; - default: - return -1; - } -} - static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) { - int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - - div = vlv_gpu_freq_div(czclk_freq); - if (div < 0) - return div; - - return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div); + /* + * N = val - 0xb7 + * Slow = Fast = GPLL ref * N + */ + return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * (val - 0xb7), 1000); } static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) { - int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - - mul = vlv_gpu_freq_div(czclk_freq); - if (mul < 0) - return mul; - - return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6; + return DIV_ROUND_CLOSEST(1000 * val, dev_priv->rps.gpll_ref_freq) + 0xb7; } static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) { - int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - - div = vlv_gpu_freq_div(czclk_freq); - if (div < 0) - return div; - div /= 2; - - return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2; + /* + * N = val / 2 + * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 + */ + return DIV_ROUND_CLOSEST(dev_priv->rps.gpll_ref_freq * val, 2 * 2 * 1000); } static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) { - int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); - - mul = vlv_gpu_freq_div(czclk_freq); - if (mul < 0) - return mul; - mul /= 2; - /* CHV needs even values */ - return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2; + return DIV_ROUND_CLOSEST(2 * 1000 * val, dev_priv->rps.gpll_ref_freq) * 2; } int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) { - if (IS_GEN9(dev_priv->dev)) + if (IS_GEN9(dev_priv)) return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER); - else if (IS_CHERRYVIEW(dev_priv->dev)) + else if (IS_CHERRYVIEW(dev_priv)) return chv_gpu_freq(dev_priv, val); - else if (IS_VALLEYVIEW(dev_priv->dev)) + else if (IS_VALLEYVIEW(dev_priv)) return byt_gpu_freq(dev_priv, val); else return val * GT_FREQUENCY_MULTIPLIER; @@ -7280,12 +7338,12 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) { - if (IS_GEN9(dev_priv->dev)) + if (IS_GEN9(dev_priv)) return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, GT_FREQUENCY_MULTIPLIER); - else if (IS_CHERRYVIEW(dev_priv->dev)) + else if (IS_CHERRYVIEW(dev_priv)) return chv_freq_opcode(dev_priv, val); - else if (IS_VALLEYVIEW(dev_priv->dev)) + else if (IS_VALLEYVIEW(dev_priv)) return byt_freq_opcode(dev_priv, val); else return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); @@ -7302,7 +7360,7 @@ static void __intel_rps_boost_work(struct work_struct *work) struct drm_i915_gem_request *req = boost->req; if (!i915_gem_request_completed(req, true)) - gen6_rps_boost(to_i915(req->ring->dev), NULL, + gen6_rps_boost(to_i915(req->engine->dev), NULL, req->emitted_jiffies); i915_gem_request_unreference__unlocked(req); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index 0b42ada338c8..c3abae4bc596 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -507,7 +507,8 @@ static void hsw_psr_disable(struct intel_dp *intel_dp) /* Wait till PSR is idle */ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & - EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10)) + EDP_PSR_STATUS_STATE_MASK) == 0, + 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) DRM_ERROR("Timed out waiting for PSR Idle State\n"); dev_priv->psr.active = false; @@ -562,7 +563,7 @@ static void intel_psr_work(struct work_struct *work) * PSR might take some time to get fully disabled * and be ready for re-enable. */ - if (HAS_DDI(dev_priv->dev)) { + if (HAS_DDI(dev_priv)) { if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) & EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); @@ -780,8 +781,7 @@ void intel_psr_init(struct drm_device *dev) /* Per platform default */ if (i915.enable_psr == -1) { - if (IS_HASWELL(dev) || IS_BROADWELL(dev) || - IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) i915.enable_psr = 1; else i915.enable_psr = 0; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 45ce45a5e122..19ebe7796e7f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -59,19 +59,19 @@ int intel_ring_space(struct intel_ringbuffer *ringbuf) return ringbuf->space; } -bool intel_ring_stopped(struct intel_engine_cs *ring) +bool intel_engine_stopped(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); + struct drm_i915_private *dev_priv = engine->dev->dev_private; + return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine); } -static void __intel_ring_advance(struct intel_engine_cs *ring) +static void __intel_ring_advance(struct intel_engine_cs *engine) { - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = engine->buffer; ringbuf->tail &= ringbuf->size - 1; - if (intel_ring_stopped(ring)) + if (intel_engine_stopped(engine)) return; - ring->write_tail(ring, ringbuf->tail); + engine->write_tail(engine, ringbuf->tail); } static int @@ -79,7 +79,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; u32 cmd; int ret; @@ -94,9 +94,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, cmd); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return 0; } @@ -106,8 +106,8 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *ring = req->ring; - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = req->engine; + struct drm_device *dev = engine->dev; u32 cmd; int ret; @@ -153,9 +153,9 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, cmd); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return 0; } @@ -200,34 +200,34 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, static int intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; - u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_engine_cs *engine = req->engine; + u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); - intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ - intel_ring_emit(ring, 0); /* low dword */ - intel_ring_emit(ring, 0); /* high dword */ - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ + intel_ring_emit(engine, 0); /* low dword */ + intel_ring_emit(engine, 0); /* high dword */ + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); - intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); - intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE); + intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ + intel_ring_emit(engine, 0); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return 0; } @@ -236,9 +236,9 @@ static int gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; u32 flags = 0; - u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* Force SNB workarounds for PIPE_CONTROL flushes */ @@ -276,11 +276,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(engine, flags); + intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(engine, 0); + intel_ring_advance(engine); return 0; } @@ -288,19 +288,19 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, static int gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(engine, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, 0); + intel_ring_advance(engine); return 0; } @@ -309,9 +309,9 @@ static int gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; u32 flags = 0; - u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* @@ -360,11 +360,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); - intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4)); + intel_ring_emit(engine, flags); + intel_ring_emit(engine, scratch_addr); + intel_ring_emit(engine, 0); + intel_ring_advance(engine); return 0; } @@ -373,20 +373,20 @@ static int gen8_emit_pipe_control(struct drm_i915_gem_request *req, u32 flags, u32 scratch_addr) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 6); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); - intel_ring_emit(ring, flags); - intel_ring_emit(ring, scratch_addr); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_emit(ring, 0); - intel_ring_advance(ring); + intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6)); + intel_ring_emit(engine, flags); + intel_ring_emit(engine, scratch_addr); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, 0); + intel_ring_advance(engine); return 0; } @@ -396,7 +396,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { u32 flags = 0; - u32 scratch_addr = req->ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; flags |= PIPE_CONTROL_CS_STALL; @@ -429,51 +429,51 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, return gen8_emit_pipe_control(req, flags, scratch_addr); } -static void ring_write_tail(struct intel_engine_cs *ring, +static void ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - I915_WRITE_TAIL(ring, value); + struct drm_i915_private *dev_priv = engine->dev->dev_private; + I915_WRITE_TAIL(engine, value); } -u64 intel_ring_get_active_head(struct intel_engine_cs *ring) +u64 intel_ring_get_active_head(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; u64 acthd; - if (INTEL_INFO(ring->dev)->gen >= 8) - acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), - RING_ACTHD_UDW(ring->mmio_base)); - else if (INTEL_INFO(ring->dev)->gen >= 4) - acthd = I915_READ(RING_ACTHD(ring->mmio_base)); + if (INTEL_INFO(engine->dev)->gen >= 8) + acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), + RING_ACTHD_UDW(engine->mmio_base)); + else if (INTEL_INFO(engine->dev)->gen >= 4) + acthd = I915_READ(RING_ACTHD(engine->mmio_base)); else acthd = I915_READ(ACTHD); return acthd; } -static void ring_setup_phys_status_page(struct intel_engine_cs *ring) +static void ring_setup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; u32 addr; addr = dev_priv->status_page_dmah->busaddr; - if (INTEL_INFO(ring->dev)->gen >= 4) + if (INTEL_INFO(engine->dev)->gen >= 4) addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; I915_WRITE(HWS_PGA, addr); } -static void intel_ring_setup_status_page(struct intel_engine_cs *ring) +static void intel_ring_setup_status_page(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_device *dev = engine->dev; + struct drm_i915_private *dev_priv = engine->dev->dev_private; i915_reg_t mmio; /* The ring status page addresses are no longer next to the rest of * the ring registers as of gen7. */ if (IS_GEN7(dev)) { - switch (ring->id) { + switch (engine->id) { case RCS: mmio = RENDER_HWS_PGA_GEN7; break; @@ -492,14 +492,14 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring) mmio = VEBOX_HWS_PGA_GEN7; break; } - } else if (IS_GEN6(ring->dev)) { - mmio = RING_HWS_PGA_GEN6(ring->mmio_base); + } else if (IS_GEN6(engine->dev)) { + mmio = RING_HWS_PGA_GEN6(engine->mmio_base); } else { /* XXX: gen8 returns to sanity */ - mmio = RING_HWS_PGA(ring->mmio_base); + mmio = RING_HWS_PGA(engine->mmio_base); } - I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); + I915_WRITE(mmio, (u32)engine->status_page.gfx_addr); POSTING_READ(mmio); /* @@ -510,10 +510,10 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring) * invalidating the TLB? */ if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { - i915_reg_t reg = RING_INSTPM(ring->mmio_base); + i915_reg_t reg = RING_INSTPM(engine->mmio_base); /* ring should be idle before issuing a sync flush*/ - WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0); I915_WRITE(reg, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | @@ -521,117 +521,125 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *ring) if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 1000)) DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", - ring->name); + engine->name); } } -static bool stop_ring(struct intel_engine_cs *ring) +static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct drm_i915_private *dev_priv = to_i915(engine->dev); - if (!IS_GEN2(ring->dev)) { - I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); - if (wait_for((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { - DRM_ERROR("%s : timed out trying to stop ring\n", ring->name); + if (!IS_GEN2(engine->dev)) { + I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); + if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { + DRM_ERROR("%s : timed out trying to stop ring\n", + engine->name); /* Sometimes we observe that the idle flag is not * set even though the ring is empty. So double * check before giving up. */ - if (I915_READ_HEAD(ring) != I915_READ_TAIL(ring)) + if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine)) return false; } } - I915_WRITE_CTL(ring, 0); - I915_WRITE_HEAD(ring, 0); - ring->write_tail(ring, 0); + I915_WRITE_CTL(engine, 0); + I915_WRITE_HEAD(engine, 0); + engine->write_tail(engine, 0); - if (!IS_GEN2(ring->dev)) { - (void)I915_READ_CTL(ring); - I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); + if (!IS_GEN2(engine->dev)) { + (void)I915_READ_CTL(engine); + I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); } - return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; + return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0; } -static int init_ring_common(struct intel_engine_cs *ring) +void intel_engine_init_hangcheck(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); +} + +static int init_ring_common(struct intel_engine_cs *engine) +{ + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_object *obj = ringbuf->obj; int ret = 0; intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - if (!stop_ring(ring)) { + if (!stop_ring(engine)) { /* G45 ring initialization often fails to reset head to zero */ DRM_DEBUG_KMS("%s head not reset to zero " "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); + engine->name, + I915_READ_CTL(engine), + I915_READ_HEAD(engine), + I915_READ_TAIL(engine), + I915_READ_START(engine)); - if (!stop_ring(ring)) { + if (!stop_ring(engine)) { DRM_ERROR("failed to set %s head to zero " "ctl %08x head %08x tail %08x start %08x\n", - ring->name, - I915_READ_CTL(ring), - I915_READ_HEAD(ring), - I915_READ_TAIL(ring), - I915_READ_START(ring)); + engine->name, + I915_READ_CTL(engine), + I915_READ_HEAD(engine), + I915_READ_TAIL(engine), + I915_READ_START(engine)); ret = -EIO; goto out; } } if (I915_NEED_GFX_HWS(dev)) - intel_ring_setup_status_page(ring); + intel_ring_setup_status_page(engine); else - ring_setup_phys_status_page(ring); + ring_setup_phys_status_page(engine); /* Enforce ordering by reading HEAD register back */ - I915_READ_HEAD(ring); + I915_READ_HEAD(engine); /* Initialize the ring. This must happen _after_ we've cleared the ring * registers with the above sequence (the readback of the HEAD registers * also enforces ordering), otherwise the hw might lose the new ring * register values. */ - I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj)); + I915_WRITE_START(engine, i915_gem_obj_ggtt_offset(obj)); /* WaClearRingBufHeadRegAtInit:ctg,elk */ - if (I915_READ_HEAD(ring)) + if (I915_READ_HEAD(engine)) DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", - ring->name, I915_READ_HEAD(ring)); - I915_WRITE_HEAD(ring, 0); - (void)I915_READ_HEAD(ring); + engine->name, I915_READ_HEAD(engine)); + I915_WRITE_HEAD(engine, 0); + (void)I915_READ_HEAD(engine); - I915_WRITE_CTL(ring, + I915_WRITE_CTL(engine, ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); /* If the head is still not zero, the ring is dead */ - if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 && - I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) && - (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) { + if (wait_for((I915_READ_CTL(engine) & RING_VALID) != 0 && + I915_READ_START(engine) == i915_gem_obj_ggtt_offset(obj) && + (I915_READ_HEAD(engine) & HEAD_ADDR) == 0, 50)) { DRM_ERROR("%s initialization failed " "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", - ring->name, - I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, - I915_READ_HEAD(ring), I915_READ_TAIL(ring), - I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj)); + engine->name, + I915_READ_CTL(engine), + I915_READ_CTL(engine) & RING_VALID, + I915_READ_HEAD(engine), I915_READ_TAIL(engine), + I915_READ_START(engine), + (unsigned long)i915_gem_obj_ggtt_offset(obj)); ret = -EIO; goto out; } ringbuf->last_retired_head = -1; - ringbuf->head = I915_READ_HEAD(ring); - ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; + ringbuf->head = I915_READ_HEAD(engine); + ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR; intel_ring_update_space(ringbuf); - memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); + intel_engine_init_hangcheck(engine); out: intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); @@ -640,59 +648,60 @@ out: } void -intel_fini_pipe_control(struct intel_engine_cs *ring) +intel_fini_pipe_control(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; - if (ring->scratch.obj == NULL) + if (engine->scratch.obj == NULL) return; if (INTEL_INFO(dev)->gen >= 5) { - kunmap(sg_page(ring->scratch.obj->pages->sgl)); - i915_gem_object_ggtt_unpin(ring->scratch.obj); + kunmap(sg_page(engine->scratch.obj->pages->sgl)); + i915_gem_object_ggtt_unpin(engine->scratch.obj); } - drm_gem_object_unreference(&ring->scratch.obj->base); - ring->scratch.obj = NULL; + drm_gem_object_unreference(&engine->scratch.obj->base); + engine->scratch.obj = NULL; } int -intel_init_pipe_control(struct intel_engine_cs *ring) +intel_init_pipe_control(struct intel_engine_cs *engine) { int ret; - WARN_ON(ring->scratch.obj); + WARN_ON(engine->scratch.obj); - ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); - if (ring->scratch.obj == NULL) { + engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); + if (engine->scratch.obj == NULL) { DRM_ERROR("Failed to allocate seqno page\n"); ret = -ENOMEM; goto err; } - ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); + ret = i915_gem_object_set_cache_level(engine->scratch.obj, + I915_CACHE_LLC); if (ret) goto err_unref; - ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); + ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0); if (ret) goto err_unref; - ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); - ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); - if (ring->scratch.cpu_page == NULL) { + engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); + engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); + if (engine->scratch.cpu_page == NULL) { ret = -ENOMEM; goto err_unpin; } DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", - ring->name, ring->scratch.gtt_offset); + engine->name, engine->scratch.gtt_offset); return 0; err_unpin: - i915_gem_object_ggtt_unpin(ring->scratch.obj); + i915_gem_object_ggtt_unpin(engine->scratch.obj); err_unref: - drm_gem_object_unreference(&ring->scratch.obj->base); + drm_gem_object_unreference(&engine->scratch.obj->base); err: return ret; } @@ -700,15 +709,15 @@ err: static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) { int ret, i; - struct intel_engine_cs *ring = req->ring; - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = req->engine; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct i915_workarounds *w = &dev_priv->workarounds; if (w->count == 0) return 0; - ring->gpu_caches_dirty = true; + engine->gpu_caches_dirty = true; ret = intel_ring_flush_all_caches(req); if (ret) return ret; @@ -717,16 +726,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count)); + intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count)); for (i = 0; i < w->count; i++) { - intel_ring_emit_reg(ring, w->reg[i].addr); - intel_ring_emit(ring, w->reg[i].value); + intel_ring_emit_reg(engine, w->reg[i].addr); + intel_ring_emit(engine, w->reg[i].value); } - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(ring); + intel_ring_advance(engine); - ring->gpu_caches_dirty = true; + engine->gpu_caches_dirty = true; ret = intel_ring_flush_all_caches(req); if (ret) return ret; @@ -789,25 +798,26 @@ static int wa_add(struct drm_i915_private *dev_priv, #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) -static int wa_ring_whitelist_reg(struct intel_engine_cs *ring, i915_reg_t reg) +static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, + i915_reg_t reg) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; struct i915_workarounds *wa = &dev_priv->workarounds; - const uint32_t index = wa->hw_whitelist_count[ring->id]; + const uint32_t index = wa->hw_whitelist_count[engine->id]; if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS)) return -EINVAL; - WA_WRITE(RING_FORCE_TO_NONPRIV(ring->mmio_base, index), + WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index), i915_mmio_reg_offset(reg)); - wa->hw_whitelist_count[ring->id]++; + wa->hw_whitelist_count[engine->id]++; return 0; } -static int gen8_init_workarounds(struct intel_engine_cs *ring) +static int gen8_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); @@ -857,13 +867,13 @@ static int gen8_init_workarounds(struct intel_engine_cs *ring) return 0; } -static int bdw_init_workarounds(struct intel_engine_cs *ring) +static int bdw_init_workarounds(struct intel_engine_cs *engine) { int ret; - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - ret = gen8_init_workarounds(ring); + ret = gen8_init_workarounds(engine); if (ret) return ret; @@ -886,13 +896,13 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring) return 0; } -static int chv_init_workarounds(struct intel_engine_cs *ring) +static int chv_init_workarounds(struct intel_engine_cs *engine) { int ret; - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - ret = gen8_init_workarounds(ring); + ret = gen8_init_workarounds(engine); if (ret) return ret; @@ -905,9 +915,9 @@ static int chv_init_workarounds(struct intel_engine_cs *ring) return 0; } -static int gen9_init_workarounds(struct intel_engine_cs *ring) +static int gen9_init_workarounds(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; uint32_t tmp; int ret; @@ -920,8 +930,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ECOCHK_DIS_TLB); + /* WaClearFlowControlGpgpuContextSave:skl,bxt */ /* WaDisablePartialInstShootdown:skl,bxt */ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, + FLOW_CONTROL_ENABLE | PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* Syncing dependencies between camera and graphics:skl,bxt */ @@ -968,7 +980,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) /* WaForceContextSaveRestoreNonCoherent:skl,bxt */ tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; - if (IS_SKL_REVID(dev, SKL_REVID_F0, SKL_REVID_F0) || + if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE; WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); @@ -986,21 +998,21 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring) GEN8_LQSC_FLUSH_COHERENT_LINES)); /* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ - ret= wa_ring_whitelist_reg(ring, GEN8_CS_CHICKEN1); + ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); if (ret) return ret; /* WaAllowUMDToModifyHDCChicken1:skl,bxt */ - ret = wa_ring_whitelist_reg(ring, GEN8_HDC_CHICKEN1); + ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); if (ret) return ret; return 0; } -static int skl_tune_iz_hashing(struct intel_engine_cs *ring) +static int skl_tune_iz_hashing(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; u8 vals[3] = { 0, 0, 0 }; unsigned int i; @@ -1040,13 +1052,13 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *ring) return 0; } -static int skl_init_workarounds(struct intel_engine_cs *ring) +static int skl_init_workarounds(struct intel_engine_cs *engine) { int ret; - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - ret = gen9_init_workarounds(ring); + ret = gen9_init_workarounds(engine); if (ret) return ret; @@ -1085,7 +1097,8 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) WA_SET_BIT_MASKED(HIZ_CHICKEN, BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); - if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) { + /* This is tied to WaForceContextSaveRestoreNonCoherent */ + if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) { /* *Use Force Non-Coherent whenever executing a 3D context. This * is a workaround for a possible hang in the unlikely event @@ -1113,20 +1126,20 @@ static int skl_init_workarounds(struct intel_engine_cs *ring) GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); /* WaDisableLSQCROPERFforOCL:skl */ - ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); + ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) return ret; - return skl_tune_iz_hashing(ring); + return skl_tune_iz_hashing(engine); } -static int bxt_init_workarounds(struct intel_engine_cs *ring) +static int bxt_init_workarounds(struct intel_engine_cs *engine) { int ret; - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - ret = gen9_init_workarounds(ring); + ret = gen9_init_workarounds(engine); if (ret) return ret; @@ -1157,11 +1170,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ /* WaDisableLSQCROPERFforOCL:bxt */ if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { - ret = wa_ring_whitelist_reg(ring, GEN9_CS_DEBUG_MODE1); + ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); if (ret) return ret; - ret = wa_ring_whitelist_reg(ring, GEN8_L3SQCREG4); + ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); if (ret) return ret; } @@ -1169,36 +1182,36 @@ static int bxt_init_workarounds(struct intel_engine_cs *ring) return 0; } -int init_workarounds_ring(struct intel_engine_cs *ring) +int init_workarounds_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - WARN_ON(ring->id != RCS); + WARN_ON(engine->id != RCS); dev_priv->workarounds.count = 0; dev_priv->workarounds.hw_whitelist_count[RCS] = 0; if (IS_BROADWELL(dev)) - return bdw_init_workarounds(ring); + return bdw_init_workarounds(engine); if (IS_CHERRYVIEW(dev)) - return chv_init_workarounds(ring); + return chv_init_workarounds(engine); if (IS_SKYLAKE(dev)) - return skl_init_workarounds(ring); + return skl_init_workarounds(engine); if (IS_BROXTON(dev)) - return bxt_init_workarounds(ring); + return bxt_init_workarounds(engine); return 0; } -static int init_render_ring(struct intel_engine_cs *ring) +static int init_render_ring(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int ret = init_ring_common(ring); + int ret = init_ring_common(engine); if (ret) return ret; @@ -1241,14 +1254,14 @@ static int init_render_ring(struct intel_engine_cs *ring) I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); if (HAS_L3_DPF(dev)) - I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); - return init_workarounds_ring(ring); + return init_workarounds_ring(engine); } -static void render_ring_cleanup(struct intel_engine_cs *ring) +static void render_ring_cleanup(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->semaphore_obj) { @@ -1257,18 +1270,19 @@ static void render_ring_cleanup(struct intel_engine_cs *ring) dev_priv->semaphore_obj = NULL; } - intel_fini_pipe_control(ring); + intel_fini_pipe_control(engine); } static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { #define MBOX_UPDATE_DWORDS 8 - struct intel_engine_cs *signaller = signaller_req->ring; + struct intel_engine_cs *signaller = signaller_req->engine; struct drm_device *dev = signaller->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *waiter; - int i, ret, num_rings; + enum intel_engine_id id; + int ret, num_rings; num_rings = hweight32(INTEL_INFO(dev)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; @@ -1278,9 +1292,9 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req, if (ret) return ret; - for_each_ring(waiter, dev_priv, i) { + for_each_engine_id(waiter, dev_priv, id) { u32 seqno; - u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; + u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; @@ -1305,11 +1319,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { #define MBOX_UPDATE_DWORDS 6 - struct intel_engine_cs *signaller = signaller_req->ring; + struct intel_engine_cs *signaller = signaller_req->engine; struct drm_device *dev = signaller->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *waiter; - int i, ret, num_rings; + enum intel_engine_id id; + int ret, num_rings; num_rings = hweight32(INTEL_INFO(dev)->ring_mask); num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; @@ -1319,9 +1334,9 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, if (ret) return ret; - for_each_ring(waiter, dev_priv, i) { + for_each_engine_id(waiter, dev_priv, id) { u32 seqno; - u64 gtt_offset = signaller->semaphore.signal_ggtt[i]; + u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) continue; @@ -1343,11 +1358,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req, static int gen6_signal(struct drm_i915_gem_request *signaller_req, unsigned int num_dwords) { - struct intel_engine_cs *signaller = signaller_req->ring; + struct intel_engine_cs *signaller = signaller_req->engine; struct drm_device *dev = signaller->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *useless; - int i, ret, num_rings; + enum intel_engine_id id; + int ret, num_rings; #define MBOX_UPDATE_DWORDS 3 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); @@ -1358,8 +1374,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, if (ret) return ret; - for_each_ring(useless, dev_priv, i) { - i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[i]; + for_each_engine_id(useless, dev_priv, id) { + i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; if (i915_mmio_reg_valid(mbox_reg)) { u32 seqno = i915_gem_request_get_seqno(signaller_req); @@ -1388,22 +1404,23 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req, static int gen6_add_request(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; - if (ring->semaphore.signal) - ret = ring->semaphore.signal(req, 4); + if (engine->semaphore.signal) + ret = engine->semaphore.signal(req, 4); else ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, i915_gem_request_get_seqno(req)); - intel_ring_emit(ring, MI_USER_INTERRUPT); - __intel_ring_advance(ring); + intel_ring_emit(engine, MI_STORE_DWORD_INDEX); + intel_ring_emit(engine, + I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, MI_USER_INTERRUPT); + __intel_ring_advance(engine); return 0; } @@ -1428,7 +1445,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req, struct intel_engine_cs *signaller, u32 seqno) { - struct intel_engine_cs *waiter = waiter_req->ring; + struct intel_engine_cs *waiter = waiter_req->engine; struct drm_i915_private *dev_priv = waiter->dev->dev_private; int ret; @@ -1454,7 +1471,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req, struct intel_engine_cs *signaller, u32 seqno) { - struct intel_engine_cs *waiter = waiter_req->ring; + struct intel_engine_cs *waiter = waiter_req->engine; u32 dw1 = MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER; @@ -1502,8 +1519,8 @@ do { \ static int pc_render_add_request(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; - u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; + struct intel_engine_cs *engine = req->engine; + u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; int ret; /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently @@ -1518,78 +1535,87 @@ pc_render_add_request(struct drm_i915_gem_request *req) if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | + intel_ring_emit(engine, + GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); - intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, i915_gem_request_get_seqno(req)); - intel_ring_emit(ring, 0); - PIPE_CONTROL_FLUSH(ring, scratch_addr); + intel_ring_emit(engine, + engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, 0); + PIPE_CONTROL_FLUSH(engine, scratch_addr); scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ - PIPE_CONTROL_FLUSH(ring, scratch_addr); + PIPE_CONTROL_FLUSH(engine, scratch_addr); scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(ring, scratch_addr); + PIPE_CONTROL_FLUSH(engine, scratch_addr); scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(ring, scratch_addr); + PIPE_CONTROL_FLUSH(engine, scratch_addr); scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(ring, scratch_addr); + PIPE_CONTROL_FLUSH(engine, scratch_addr); scratch_addr += 2 * CACHELINE_BYTES; - PIPE_CONTROL_FLUSH(ring, scratch_addr); + PIPE_CONTROL_FLUSH(engine, scratch_addr); - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | + intel_ring_emit(engine, + GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_WRITE_FLUSH | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | PIPE_CONTROL_NOTIFY); - intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, i915_gem_request_get_seqno(req)); - intel_ring_emit(ring, 0); - __intel_ring_advance(ring); + intel_ring_emit(engine, + engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); + intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, 0); + __intel_ring_advance(engine); return 0; } -static u32 -gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +static void +gen6_seqno_barrier(struct intel_engine_cs *engine) { /* Workaround to force correct ordering between irq and seqno writes on * ivb (and maybe also on snb) by reading from a CS register (like - * ACTHD) before reading the status page. */ - if (!lazy_coherency) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; - POSTING_READ(RING_ACTHD(ring->mmio_base)); - } - - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + * ACTHD) before reading the status page. + * + * Note that this effectively stalls the read by the time it takes to + * do a memory transaction, which more or less ensures that the write + * from the GPU has sufficient time to invalidate the CPU cacheline. + * Alternatively we could delay the interrupt from the CS ring to give + * the write time to land, but that would incur a delay after every + * batch i.e. much more frequent than a delay when waiting for the + * interrupt (with the same net latency). + */ + struct drm_i915_private *dev_priv = engine->dev->dev_private; + POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); } static u32 -ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +ring_get_seqno(struct intel_engine_cs *engine) { - return intel_read_status_page(ring, I915_GEM_HWS_INDEX); + return intel_read_status_page(engine, I915_GEM_HWS_INDEX); } static void -ring_set_seqno(struct intel_engine_cs *ring, u32 seqno) +ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) { - intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); + intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno); } static u32 -pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency) +pc_render_get_seqno(struct intel_engine_cs *engine) { - return ring->scratch.cpu_page[0]; + return engine->scratch.cpu_page[0]; } static void -pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno) +pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) { - ring->scratch.cpu_page[0] = seqno; + engine->scratch.cpu_page[0] = seqno; } static bool -gen5_ring_get_irq(struct intel_engine_cs *ring) +gen5_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1597,30 +1623,30 @@ gen5_ring_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) - gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); + if (engine->irq_refcount++ == 0) + gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); return true; } static void -gen5_ring_put_irq(struct intel_engine_cs *ring) +gen5_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) - gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); + if (--engine->irq_refcount == 0) + gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static bool -i9xx_ring_get_irq(struct intel_engine_cs *ring) +i9xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1628,8 +1654,8 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~ring->irq_enable_mask; + if (engine->irq_refcount++ == 0) { + dev_priv->irq_mask &= ~engine->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } @@ -1639,15 +1665,15 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring) } static void -i9xx_ring_put_irq(struct intel_engine_cs *ring) +i9xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - dev_priv->irq_mask |= ring->irq_enable_mask; + if (--engine->irq_refcount == 0) { + dev_priv->irq_mask |= engine->irq_enable_mask; I915_WRITE(IMR, dev_priv->irq_mask); POSTING_READ(IMR); } @@ -1655,9 +1681,9 @@ i9xx_ring_put_irq(struct intel_engine_cs *ring) } static bool -i8xx_ring_get_irq(struct intel_engine_cs *ring) +i8xx_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1665,8 +1691,8 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - dev_priv->irq_mask &= ~ring->irq_enable_mask; + if (engine->irq_refcount++ == 0) { + dev_priv->irq_mask &= ~engine->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); } @@ -1676,15 +1702,15 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring) } static void -i8xx_ring_put_irq(struct intel_engine_cs *ring) +i8xx_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - dev_priv->irq_mask |= ring->irq_enable_mask; + if (--engine->irq_refcount == 0) { + dev_priv->irq_mask |= engine->irq_enable_mask; I915_WRITE16(IMR, dev_priv->irq_mask); POSTING_READ16(IMR); } @@ -1696,42 +1722,43 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 invalidate_domains, u32 flush_domains) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, MI_FLUSH); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_FLUSH); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return 0; } static int i9xx_add_request(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 4); if (ret) return ret; - intel_ring_emit(ring, MI_STORE_DWORD_INDEX); - intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); - intel_ring_emit(ring, i915_gem_request_get_seqno(req)); - intel_ring_emit(ring, MI_USER_INTERRUPT); - __intel_ring_advance(ring); + intel_ring_emit(engine, MI_STORE_DWORD_INDEX); + intel_ring_emit(engine, + I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); + intel_ring_emit(engine, i915_gem_request_get_seqno(req)); + intel_ring_emit(engine, MI_USER_INTERRUPT); + __intel_ring_advance(engine); return 0; } static bool -gen6_ring_get_irq(struct intel_engine_cs *ring) +gen6_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1739,14 +1766,14 @@ gen6_ring_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && ring->id == RCS) - I915_WRITE_IMR(ring, - ~(ring->irq_enable_mask | + if (engine->irq_refcount++ == 0) { + if (HAS_L3_DPF(dev) && engine->id == RCS) + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | GT_PARITY_ERROR(dev))); else - I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - gen5_enable_gt_irq(dev_priv, ring->irq_enable_mask); + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1754,27 +1781,27 @@ gen6_ring_get_irq(struct intel_engine_cs *ring) } static void -gen6_ring_put_irq(struct intel_engine_cs *ring) +gen6_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && ring->id == RCS) - I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); + if (--engine->irq_refcount == 0) { + if (HAS_L3_DPF(dev) && engine->id == RCS) + I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); else - I915_WRITE_IMR(ring, ~0); - gen5_disable_gt_irq(dev_priv, ring->irq_enable_mask); + I915_WRITE_IMR(engine, ~0); + gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static bool -hsw_vebox_get_irq(struct intel_engine_cs *ring) +hsw_vebox_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1782,9 +1809,9 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - I915_WRITE_IMR(ring, ~ring->irq_enable_mask); - gen6_enable_pm_irq(dev_priv, ring->irq_enable_mask); + if (engine->irq_refcount++ == 0) { + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); + gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1792,24 +1819,24 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring) } static void -hsw_vebox_put_irq(struct intel_engine_cs *ring) +hsw_vebox_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - I915_WRITE_IMR(ring, ~0); - gen6_disable_pm_irq(dev_priv, ring->irq_enable_mask); + if (--engine->irq_refcount == 0) { + I915_WRITE_IMR(engine, ~0); + gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } static bool -gen8_ring_get_irq(struct intel_engine_cs *ring) +gen8_ring_get_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; @@ -1817,15 +1844,15 @@ gen8_ring_get_irq(struct intel_engine_cs *ring) return false; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (ring->irq_refcount++ == 0) { - if (HAS_L3_DPF(dev) && ring->id == RCS) { - I915_WRITE_IMR(ring, - ~(ring->irq_enable_mask | + if (engine->irq_refcount++ == 0) { + if (HAS_L3_DPF(dev) && engine->id == RCS) { + I915_WRITE_IMR(engine, + ~(engine->irq_enable_mask | GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); } else { - I915_WRITE_IMR(ring, ~ring->irq_enable_mask); + I915_WRITE_IMR(engine, ~engine->irq_enable_mask); } - POSTING_READ(RING_IMR(ring->mmio_base)); + POSTING_READ(RING_IMR(engine->mmio_base)); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); @@ -1833,21 +1860,21 @@ gen8_ring_get_irq(struct intel_engine_cs *ring) } static void -gen8_ring_put_irq(struct intel_engine_cs *ring) +gen8_ring_put_irq(struct intel_engine_cs *engine) { - struct drm_device *dev = ring->dev; + struct drm_device *dev = engine->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&dev_priv->irq_lock, flags); - if (--ring->irq_refcount == 0) { - if (HAS_L3_DPF(dev) && ring->id == RCS) { - I915_WRITE_IMR(ring, + if (--engine->irq_refcount == 0) { + if (HAS_L3_DPF(dev) && engine->id == RCS) { + I915_WRITE_IMR(engine, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); } else { - I915_WRITE_IMR(ring, ~0); + I915_WRITE_IMR(engine, ~0); } - POSTING_READ(RING_IMR(ring->mmio_base)); + POSTING_READ(RING_IMR(engine->mmio_base)); } spin_unlock_irqrestore(&dev_priv->irq_lock, flags); } @@ -1857,20 +1884,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 length, unsigned dispatch_flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, + intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); - intel_ring_emit(ring, offset); - intel_ring_advance(ring); + intel_ring_emit(engine, offset); + intel_ring_advance(engine); return 0; } @@ -1884,8 +1911,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *ring = req->ring; - u32 cs_offset = ring->scratch.gtt_offset; + struct intel_engine_cs *engine = req->engine; + u32 cs_offset = engine->scratch.gtt_offset; int ret; ret = intel_ring_begin(req, 6); @@ -1893,13 +1920,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, return ret; /* Evict the invalid PTE TLBs */ - intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); - intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); - intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ - intel_ring_emit(ring, cs_offset); - intel_ring_emit(ring, 0xdeadbeef); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); + intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */ + intel_ring_emit(engine, cs_offset); + intel_ring_emit(engine, 0xdeadbeef); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) { if (len > I830_BATCH_LIMIT) @@ -1913,16 +1940,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, * stable batch scratch bo area (so that the CS never * stumbles over its tlb invalidation bug) ... */ - intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); - intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); - intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096); - intel_ring_emit(ring, cs_offset); - intel_ring_emit(ring, 4096); - intel_ring_emit(ring, offset); - - intel_ring_emit(ring, MI_FLUSH); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); + intel_ring_emit(engine, + BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); + intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096); + intel_ring_emit(engine, cs_offset); + intel_ring_emit(engine, 4096); + intel_ring_emit(engine, offset); + + intel_ring_emit(engine, MI_FLUSH); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); /* ... and execute it. */ offset = cs_offset; @@ -1932,10 +1960,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req, if (ret) return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_NON_SECURE)); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(engine); return 0; } @@ -1945,55 +1973,55 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT); - intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ? - 0 : MI_BATCH_NON_SECURE)); - intel_ring_advance(ring); + intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT); + intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ? + 0 : MI_BATCH_NON_SECURE)); + intel_ring_advance(engine); return 0; } -static void cleanup_phys_status_page(struct intel_engine_cs *ring) +static void cleanup_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = to_i915(ring->dev); + struct drm_i915_private *dev_priv = to_i915(engine->dev); if (!dev_priv->status_page_dmah) return; - drm_pci_free(ring->dev, dev_priv->status_page_dmah); - ring->status_page.page_addr = NULL; + drm_pci_free(engine->dev, dev_priv->status_page_dmah); + engine->status_page.page_addr = NULL; } -static void cleanup_status_page(struct intel_engine_cs *ring) +static void cleanup_status_page(struct intel_engine_cs *engine) { struct drm_i915_gem_object *obj; - obj = ring->status_page.obj; + obj = engine->status_page.obj; if (obj == NULL) return; kunmap(sg_page(obj->pages->sgl)); i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); - ring->status_page.obj = NULL; + engine->status_page.obj = NULL; } -static int init_status_page(struct intel_engine_cs *ring) +static int init_status_page(struct intel_engine_cs *engine) { - struct drm_i915_gem_object *obj = ring->status_page.obj; + struct drm_i915_gem_object *obj = engine->status_page.obj; if (obj == NULL) { unsigned flags; int ret; - obj = i915_gem_alloc_object(ring->dev, 4096); + obj = i915_gem_alloc_object(engine->dev, 4096); if (obj == NULL) { DRM_ERROR("Failed to allocate status page\n"); return -ENOMEM; @@ -2004,7 +2032,7 @@ static int init_status_page(struct intel_engine_cs *ring) goto err_unref; flags = 0; - if (!HAS_LLC(ring->dev)) + if (!HAS_LLC(engine->dev)) /* On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. * Though this restriction is not documented for @@ -2023,32 +2051,32 @@ err_unref: return ret; } - ring->status_page.obj = obj; + engine->status_page.obj = obj; } - ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); - ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); - memset(ring->status_page.page_addr, 0, PAGE_SIZE); + engine->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); + engine->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); + memset(engine->status_page.page_addr, 0, PAGE_SIZE); DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", - ring->name, ring->status_page.gfx_addr); + engine->name, engine->status_page.gfx_addr); return 0; } -static int init_phys_status_page(struct intel_engine_cs *ring) +static int init_phys_status_page(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; if (!dev_priv->status_page_dmah) { dev_priv->status_page_dmah = - drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE); + drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); if (!dev_priv->status_page_dmah) return -ENOMEM; } - ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; - memset(ring->status_page.page_addr, 0, PAGE_SIZE); + engine->status_page.page_addr = dev_priv->status_page_dmah->vaddr; + memset(engine->status_page.page_addr, 0, PAGE_SIZE); return 0; } @@ -2056,39 +2084,18 @@ static int init_phys_status_page(struct intel_engine_cs *ring) void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) { if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) - vunmap(ringbuf->virtual_start); + i915_gem_object_unpin_map(ringbuf->obj); else iounmap(ringbuf->virtual_start); - ringbuf->virtual_start = NULL; ringbuf->vma = NULL; i915_gem_object_ggtt_unpin(ringbuf->obj); } -static u32 *vmap_obj(struct drm_i915_gem_object *obj) -{ - struct sg_page_iter sg_iter; - struct page **pages; - void *addr; - int i; - - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); - if (pages == NULL) - return NULL; - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) - pages[i++] = sg_page_iter_page(&sg_iter); - - addr = vmap(pages, i, 0, PAGE_KERNEL); - drm_free_large(pages); - - return addr; -} - int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf) { struct drm_i915_private *dev_priv = to_i915(dev); + struct i915_ggtt *ggtt = &dev_priv->ggtt; struct drm_i915_gem_object *obj = ringbuf->obj; int ret; @@ -2098,15 +2105,13 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, return ret; ret = i915_gem_object_set_to_cpu_domain(obj, true); - if (ret) { - i915_gem_object_ggtt_unpin(obj); - return ret; - } + if (ret) + goto err_unpin; - ringbuf->virtual_start = vmap_obj(obj); + ringbuf->virtual_start = i915_gem_object_pin_map(obj); if (ringbuf->virtual_start == NULL) { - i915_gem_object_ggtt_unpin(obj); - return -ENOMEM; + ret = -ENOMEM; + goto err_unpin; } } else { ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); @@ -2114,25 +2119,26 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, return ret; ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) { - i915_gem_object_ggtt_unpin(obj); - return ret; - } + if (ret) + goto err_unpin; /* Access through the GTT requires the device to be awake. */ assert_rpm_wakelock_held(dev_priv); - ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base + + ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), ringbuf->size); if (ringbuf->virtual_start == NULL) { - i915_gem_object_ggtt_unpin(obj); - return -EINVAL; + ret = -ENOMEM; + goto err_unpin; } } ringbuf->vma = i915_gem_obj_to_ggtt(obj); - return 0; + +err_unpin: + i915_gem_object_ggtt_unpin(obj); + return ret; } static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) @@ -2175,7 +2181,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size) return ERR_PTR(-ENOMEM); } - ring->ring = engine; + ring->engine = engine; list_add(&ring->link, &engine->buffers); ring->size = size; @@ -2211,37 +2217,38 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring) } static int intel_init_ring_buffer(struct drm_device *dev, - struct intel_engine_cs *ring) + struct intel_engine_cs *engine) { struct intel_ringbuffer *ringbuf; int ret; - WARN_ON(ring->buffer); + WARN_ON(engine->buffer); - ring->dev = dev; - INIT_LIST_HEAD(&ring->active_list); - INIT_LIST_HEAD(&ring->request_list); - INIT_LIST_HEAD(&ring->execlist_queue); - INIT_LIST_HEAD(&ring->buffers); - i915_gem_batch_pool_init(dev, &ring->batch_pool); - memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); + engine->dev = dev; + INIT_LIST_HEAD(&engine->active_list); + INIT_LIST_HEAD(&engine->request_list); + INIT_LIST_HEAD(&engine->execlist_queue); + INIT_LIST_HEAD(&engine->buffers); + i915_gem_batch_pool_init(dev, &engine->batch_pool); + memset(engine->semaphore.sync_seqno, 0, + sizeof(engine->semaphore.sync_seqno)); - init_waitqueue_head(&ring->irq_queue); + init_waitqueue_head(&engine->irq_queue); - ringbuf = intel_engine_create_ringbuffer(ring, 32 * PAGE_SIZE); + ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); if (IS_ERR(ringbuf)) { ret = PTR_ERR(ringbuf); goto error; } - ring->buffer = ringbuf; + engine->buffer = ringbuf; if (I915_NEED_GFX_HWS(dev)) { - ret = init_status_page(ring); + ret = init_status_page(engine); if (ret) goto error; } else { - WARN_ON(ring->id != RCS); - ret = init_phys_status_page(ring); + WARN_ON(engine->id != RCS); + ret = init_phys_status_page(engine); if (ret) goto error; } @@ -2249,58 +2256,58 @@ static int intel_init_ring_buffer(struct drm_device *dev, ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); if (ret) { DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", - ring->name, ret); + engine->name, ret); intel_destroy_ringbuffer_obj(ringbuf); goto error; } - ret = i915_cmd_parser_init_ring(ring); + ret = i915_cmd_parser_init_ring(engine); if (ret) goto error; return 0; error: - intel_cleanup_ring_buffer(ring); + intel_cleanup_engine(engine); return ret; } -void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) +void intel_cleanup_engine(struct intel_engine_cs *engine) { struct drm_i915_private *dev_priv; - if (!intel_ring_initialized(ring)) + if (!intel_engine_initialized(engine)) return; - dev_priv = to_i915(ring->dev); + dev_priv = to_i915(engine->dev); - if (ring->buffer) { - intel_stop_ring_buffer(ring); - WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); + if (engine->buffer) { + intel_stop_engine(engine); + WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); - intel_unpin_ringbuffer_obj(ring->buffer); - intel_ringbuffer_free(ring->buffer); - ring->buffer = NULL; + intel_unpin_ringbuffer_obj(engine->buffer); + intel_ringbuffer_free(engine->buffer); + engine->buffer = NULL; } - if (ring->cleanup) - ring->cleanup(ring); + if (engine->cleanup) + engine->cleanup(engine); - if (I915_NEED_GFX_HWS(ring->dev)) { - cleanup_status_page(ring); + if (I915_NEED_GFX_HWS(engine->dev)) { + cleanup_status_page(engine); } else { - WARN_ON(ring->id != RCS); - cleanup_phys_status_page(ring); + WARN_ON(engine->id != RCS); + cleanup_phys_status_page(engine); } - i915_cmd_parser_fini_ring(ring); - i915_gem_batch_pool_fini(&ring->batch_pool); - ring->dev = NULL; + i915_cmd_parser_fini_ring(engine); + i915_gem_batch_pool_fini(&engine->batch_pool); + engine->dev = NULL; } -static int ring_wait_for_space(struct intel_engine_cs *ring, int n) +static int ring_wait_for_space(struct intel_engine_cs *engine, int n) { - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = engine->buffer; struct drm_i915_gem_request *request; unsigned space; int ret; @@ -2311,14 +2318,14 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n) /* The whole point of reserving space is to not wait! */ WARN_ON(ringbuf->reserved_in_use); - list_for_each_entry(request, &ring->request_list, list) { + list_for_each_entry(request, &engine->request_list, list) { space = __intel_ring_space(request->postfix, ringbuf->tail, ringbuf->size); if (space >= n) break; } - if (WARN_ON(&request->list == &ring->request_list)) + if (WARN_ON(&request->list == &engine->request_list)) return -ENOSPC; ret = i915_wait_request(request); @@ -2343,28 +2350,28 @@ static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) intel_ring_update_space(ringbuf); } -int intel_ring_idle(struct intel_engine_cs *ring) +int intel_engine_idle(struct intel_engine_cs *engine) { struct drm_i915_gem_request *req; /* Wait upon the last request to be completed */ - if (list_empty(&ring->request_list)) + if (list_empty(&engine->request_list)) return 0; - req = list_entry(ring->request_list.prev, - struct drm_i915_gem_request, - list); + req = list_entry(engine->request_list.prev, + struct drm_i915_gem_request, + list); /* Make sure we do not trigger any retires */ return __i915_wait_request(req, - atomic_read(&to_i915(ring->dev)->gpu_error.reset_counter), - to_i915(ring->dev)->mm.interruptible, + atomic_read(&to_i915(engine->dev)->gpu_error.reset_counter), + to_i915(engine->dev)->mm.interruptible, NULL, NULL); } int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) { - request->ringbuf = request->ring->buffer; + request->ringbuf = request->engine->buffer; return 0; } @@ -2430,9 +2437,9 @@ void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) ringbuf->reserved_in_use = false; } -static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) +static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) { - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = engine->buffer; int remain_usable = ringbuf->effective_size - ringbuf->tail; int remain_actual = ringbuf->size - ringbuf->tail; int ret, total_bytes, wait_bytes = 0; @@ -2454,11 +2461,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) if (unlikely(total_bytes > remain_usable)) { /* * The base request will fit but the reserved space - * falls off the end. So only need to to wait for the - * reserved size after flushing out the remainder. + * falls off the end. So don't need an immediate wrap + * and only need to effectively wait for the reserved + * size space from the start of ringbuffer. */ wait_bytes = remain_actual + ringbuf->reserved_size; - need_wrap = true; } else if (total_bytes > ringbuf->space) { /* No wrapping required, just waiting. */ wait_bytes = total_bytes; @@ -2466,7 +2473,7 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) } if (wait_bytes) { - ret = ring_wait_for_space(ring, wait_bytes); + ret = ring_wait_for_space(engine, wait_bytes); if (unlikely(ret)) return ret; @@ -2480,32 +2487,32 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes) int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) { - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct drm_i915_private *dev_priv; int ret; WARN_ON(req == NULL); - ring = req->ring; - dev_priv = ring->dev->dev_private; + engine = req->engine; + dev_priv = req->i915; ret = i915_gem_check_wedge(&dev_priv->gpu_error, dev_priv->mm.interruptible); if (ret) return ret; - ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); + ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); if (ret) return ret; - ring->buffer->space -= num_dwords * sizeof(uint32_t); + engine->buffer->space -= num_dwords * sizeof(uint32_t); return 0; } /* Align the ring tail to a cacheline boundary */ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; - int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); + struct intel_engine_cs *engine = req->engine; + int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); int ret; if (num_dwords == 0) @@ -2517,33 +2524,52 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req) return ret; while (num_dwords--) - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, MI_NOOP); - intel_ring_advance(ring); + intel_ring_advance(engine); return 0; } -void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno) +void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) { - struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = to_i915(engine->dev); - if (INTEL_INFO(dev)->gen == 6 || INTEL_INFO(dev)->gen == 7) { - I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); - I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); - if (HAS_VEBOX(dev)) - I915_WRITE(RING_SYNC_2(ring->mmio_base), 0); + /* Our semaphore implementation is strictly monotonic (i.e. we proceed + * so long as the semaphore value in the register/page is greater + * than the sync value), so whenever we reset the seqno, + * so long as we reset the tracking semaphore value to 0, it will + * always be before the next request's seqno. If we don't reset + * the semaphore value, then when the seqno moves backwards all + * future waits will complete instantly (causing rendering corruption). + */ + if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { + I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); + I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); + if (HAS_VEBOX(dev_priv)) + I915_WRITE(RING_SYNC_2(engine->mmio_base), 0); + } + if (dev_priv->semaphore_obj) { + struct drm_i915_gem_object *obj = dev_priv->semaphore_obj; + struct page *page = i915_gem_object_get_dirty_page(obj, 0); + void *semaphores = kmap(page); + memset(semaphores + GEN8_SEMAPHORE_OFFSET(engine->id, 0), + 0, I915_NUM_ENGINES * gen8_semaphore_seqno_size); + kunmap(page); } + memset(engine->semaphore.sync_seqno, 0, + sizeof(engine->semaphore.sync_seqno)); + + engine->set_seqno(engine, seqno); + engine->last_submitted_seqno = seqno; - ring->set_seqno(ring, seqno); - ring->hangcheck.seqno = seqno; + engine->hangcheck.seqno = seqno; } -static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, +static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, u32 value) { - struct drm_i915_private *dev_priv = ring->dev->dev_private; + struct drm_i915_private *dev_priv = engine->dev->dev_private; /* Every tail move must follow the sequence below */ @@ -2563,8 +2589,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); /* Now that the ring is fully powered up, update the tail */ - I915_WRITE_TAIL(ring, value); - POSTING_READ(RING_TAIL(ring->mmio_base)); + I915_WRITE_TAIL(engine, value); + POSTING_READ(RING_TAIL(engine->mmio_base)); /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. @@ -2576,7 +2602,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring, static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; uint32_t cmd; int ret; @@ -2585,7 +2611,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(ring->dev)->gen >= 8) + if (INTEL_INFO(engine->dev)->gen >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2604,16 +2630,17 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, if (invalidate & I915_GEM_GPU_DOMAINS) cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD; - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(ring->dev)->gen >= 8) { - intel_ring_emit(ring, 0); /* upper addr */ - intel_ring_emit(ring, 0); /* value */ + intel_ring_emit(engine, cmd); + intel_ring_emit(engine, + I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + if (INTEL_INFO(engine->dev)->gen >= 8) { + intel_ring_emit(engine, 0); /* upper addr */ + intel_ring_emit(engine, 0); /* value */ } else { - intel_ring_emit(ring, 0); - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, MI_NOOP); } - intel_ring_advance(ring); + intel_ring_advance(engine); return 0; } @@ -2622,8 +2649,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *ring = req->ring; - bool ppgtt = USES_PPGTT(ring->dev) && + struct intel_engine_cs *engine = req->engine; + bool ppgtt = USES_PPGTT(engine->dev) && !(dispatch_flags & I915_DISPATCH_SECURE); int ret; @@ -2632,13 +2659,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, return ret; /* FIXME(BDW): Address space and security selectors. */ - intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | + intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); - intel_ring_emit(ring, lower_32_bits(offset)); - intel_ring_emit(ring, upper_32_bits(offset)); - intel_ring_emit(ring, MI_NOOP); - intel_ring_advance(ring); + intel_ring_emit(engine, lower_32_bits(offset)); + intel_ring_emit(engine, upper_32_bits(offset)); + intel_ring_emit(engine, MI_NOOP); + intel_ring_advance(engine); return 0; } @@ -2648,22 +2675,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, + intel_ring_emit(engine, MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) | (dispatch_flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0)); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(ring, offset); - intel_ring_advance(ring); + intel_ring_emit(engine, offset); + intel_ring_advance(engine); return 0; } @@ -2673,20 +2700,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, u64 offset, u32 len, unsigned dispatch_flags) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; ret = intel_ring_begin(req, 2); if (ret) return ret; - intel_ring_emit(ring, + intel_ring_emit(engine, MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965)); /* bit0-7 is the length on GEN6+ */ - intel_ring_emit(ring, offset); - intel_ring_advance(ring); + intel_ring_emit(engine, offset); + intel_ring_advance(engine); return 0; } @@ -2696,8 +2723,8 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req, static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 invalidate, u32 flush) { - struct intel_engine_cs *ring = req->ring; - struct drm_device *dev = ring->dev; + struct intel_engine_cs *engine = req->engine; + struct drm_device *dev = engine->dev; uint32_t cmd; int ret; @@ -2724,16 +2751,17 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, */ if (invalidate & I915_GEM_DOMAIN_RENDER) cmd |= MI_INVALIDATE_TLB; - intel_ring_emit(ring, cmd); - intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); + intel_ring_emit(engine, cmd); + intel_ring_emit(engine, + I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); if (INTEL_INFO(dev)->gen >= 8) { - intel_ring_emit(ring, 0); /* upper addr */ - intel_ring_emit(ring, 0); /* value */ + intel_ring_emit(engine, 0); /* upper addr */ + intel_ring_emit(engine, 0); /* value */ } else { - intel_ring_emit(ring, 0); - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(engine, 0); + intel_ring_emit(engine, MI_NOOP); } - intel_ring_advance(ring); + intel_ring_advance(engine); return 0; } @@ -2741,14 +2769,14 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, int intel_init_render_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; + struct intel_engine_cs *engine = &dev_priv->engine[RCS]; struct drm_i915_gem_object *obj; int ret; - ring->name = "render ring"; - ring->id = RCS; - ring->exec_id = I915_EXEC_RENDER; - ring->mmio_base = RENDER_RING_BASE; + engine->name = "render ring"; + engine->id = RCS; + engine->exec_id = I915_EXEC_RENDER; + engine->mmio_base = RENDER_RING_BASE; if (INTEL_INFO(dev)->gen >= 8) { if (i915_semaphore_is_enabled(dev)) { @@ -2768,34 +2796,36 @@ int intel_init_render_ring_buffer(struct drm_device *dev) } } - ring->init_context = intel_rcs_ctx_init; - ring->add_request = gen6_add_request; - ring->flush = gen8_render_ring_flush; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - ring->get_seqno = gen6_ring_get_seqno; - ring->set_seqno = ring_set_seqno; + engine->init_context = intel_rcs_ctx_init; + engine->add_request = gen6_add_request; + engine->flush = gen8_render_ring_flush; + engine->irq_get = gen8_ring_get_irq; + engine->irq_put = gen8_ring_put_irq; + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (i915_semaphore_is_enabled(dev)) { WARN_ON(!dev_priv->semaphore_obj); - ring->semaphore.sync_to = gen8_ring_sync; - ring->semaphore.signal = gen8_rcs_signal; - GEN8_RING_SEMAPHORE_INIT; + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_rcs_signal; + GEN8_RING_SEMAPHORE_INIT(engine); } } else if (INTEL_INFO(dev)->gen >= 6) { - ring->init_context = intel_rcs_ctx_init; - ring->add_request = gen6_add_request; - ring->flush = gen7_render_ring_flush; + engine->init_context = intel_rcs_ctx_init; + engine->add_request = gen6_add_request; + engine->flush = gen7_render_ring_flush; if (INTEL_INFO(dev)->gen == 6) - ring->flush = gen6_render_ring_flush; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT; - ring->get_seqno = gen6_ring_get_seqno; - ring->set_seqno = ring_set_seqno; + engine->flush = gen6_render_ring_flush; + engine->irq_get = gen6_ring_get_irq; + engine->irq_put = gen6_ring_put_irq; + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; + engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; /* * The current semaphore is only applied on pre-gen8 * platform. And there is no VCS2 ring on the pre-gen8 @@ -2803,59 +2833,59 @@ int intel_init_render_ring_buffer(struct drm_device *dev) * initialized as INVALID. Gen8 will initialize the * sema between VCS2 and RCS later. */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; + engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; + engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; + engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; + engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; + engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; + engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; + engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } } else if (IS_GEN5(dev)) { - ring->add_request = pc_render_add_request; - ring->flush = gen4_render_ring_flush; - ring->get_seqno = pc_render_get_seqno; - ring->set_seqno = pc_render_set_seqno; - ring->irq_get = gen5_ring_get_irq; - ring->irq_put = gen5_ring_put_irq; - ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT | + engine->add_request = pc_render_add_request; + engine->flush = gen4_render_ring_flush; + engine->get_seqno = pc_render_get_seqno; + engine->set_seqno = pc_render_set_seqno; + engine->irq_get = gen5_ring_get_irq; + engine->irq_put = gen5_ring_put_irq; + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT; } else { - ring->add_request = i9xx_add_request; + engine->add_request = i9xx_add_request; if (INTEL_INFO(dev)->gen < 4) - ring->flush = gen2_render_ring_flush; + engine->flush = gen2_render_ring_flush; else - ring->flush = gen4_render_ring_flush; - ring->get_seqno = ring_get_seqno; - ring->set_seqno = ring_set_seqno; + engine->flush = gen4_render_ring_flush; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (IS_GEN2(dev)) { - ring->irq_get = i8xx_ring_get_irq; - ring->irq_put = i8xx_ring_put_irq; + engine->irq_get = i8xx_ring_get_irq; + engine->irq_put = i8xx_ring_put_irq; } else { - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; + engine->irq_get = i9xx_ring_get_irq; + engine->irq_put = i9xx_ring_put_irq; } - ring->irq_enable_mask = I915_USER_INTERRUPT; + engine->irq_enable_mask = I915_USER_INTERRUPT; } - ring->write_tail = ring_write_tail; + engine->write_tail = ring_write_tail; if (IS_HASWELL(dev)) - ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; + engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; else if (IS_GEN8(dev)) - ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 6) - ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; else if (INTEL_INFO(dev)->gen >= 4) - ring->dispatch_execbuffer = i965_dispatch_execbuffer; + engine->dispatch_execbuffer = i965_dispatch_execbuffer; else if (IS_I830(dev) || IS_845G(dev)) - ring->dispatch_execbuffer = i830_dispatch_execbuffer; + engine->dispatch_execbuffer = i830_dispatch_execbuffer; else - ring->dispatch_execbuffer = i915_dispatch_execbuffer; - ring->init_hw = init_render_ring; - ring->cleanup = render_ring_cleanup; + engine->dispatch_execbuffer = i915_dispatch_execbuffer; + engine->init_hw = init_render_ring; + engine->cleanup = render_ring_cleanup; /* Workaround batchbuffer to combat CS tlb bug. */ if (HAS_BROKEN_CS_TLB(dev)) { @@ -2872,16 +2902,16 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return ret; } - ring->scratch.obj = obj; - ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); + engine->scratch.obj = obj; + engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); } - ret = intel_init_ring_buffer(dev, ring); + ret = intel_init_ring_buffer(dev, engine); if (ret) return ret; if (INTEL_INFO(dev)->gen >= 5) { - ret = intel_init_pipe_control(ring); + ret = intel_init_pipe_control(engine); if (ret) return ret; } @@ -2892,75 +2922,76 @@ int intel_init_render_ring_buffer(struct drm_device *dev) int intel_init_bsd_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[VCS]; + struct intel_engine_cs *engine = &dev_priv->engine[VCS]; - ring->name = "bsd ring"; - ring->id = VCS; - ring->exec_id = I915_EXEC_BSD; + engine->name = "bsd ring"; + engine->id = VCS; + engine->exec_id = I915_EXEC_BSD; - ring->write_tail = ring_write_tail; + engine->write_tail = ring_write_tail; if (INTEL_INFO(dev)->gen >= 6) { - ring->mmio_base = GEN6_BSD_RING_BASE; + engine->mmio_base = GEN6_BSD_RING_BASE; /* gen6 bsd needs a special wa for tail updates */ if (IS_GEN6(dev)) - ring->write_tail = gen6_bsd_ring_write_tail; - ring->flush = gen6_bsd_ring_flush; - ring->add_request = gen6_add_request; - ring->get_seqno = gen6_ring_get_seqno; - ring->set_seqno = ring_set_seqno; + engine->write_tail = gen6_bsd_ring_write_tail; + engine->flush = gen6_bsd_ring_flush; + engine->add_request = gen6_add_request; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (INTEL_INFO(dev)->gen >= 8) { - ring->irq_enable_mask = + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - ring->dispatch_execbuffer = + engine->irq_get = gen8_ring_get_irq; + engine->irq_put = gen8_ring_put_irq; + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen8_ring_sync; - ring->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT; + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT(engine); } } else { - ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - ring->dispatch_execbuffer = + engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; + engine->irq_get = gen6_ring_get_irq; + engine->irq_put = gen6_ring_put_irq; + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; + engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; + engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; + engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; + engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; + engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; + engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; + engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; + engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } } } else { - ring->mmio_base = BSD_RING_BASE; - ring->flush = bsd_ring_flush; - ring->add_request = i9xx_add_request; - ring->get_seqno = ring_get_seqno; - ring->set_seqno = ring_set_seqno; + engine->mmio_base = BSD_RING_BASE; + engine->flush = bsd_ring_flush; + engine->add_request = i9xx_add_request; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (IS_GEN5(dev)) { - ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT; - ring->irq_get = gen5_ring_get_irq; - ring->irq_put = gen5_ring_put_irq; + engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; + engine->irq_get = gen5_ring_get_irq; + engine->irq_put = gen5_ring_put_irq; } else { - ring->irq_enable_mask = I915_BSD_USER_INTERRUPT; - ring->irq_get = i9xx_ring_get_irq; - ring->irq_put = i9xx_ring_put_irq; + engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; + engine->irq_get = i9xx_ring_get_irq; + engine->irq_put = i9xx_ring_put_irq; } - ring->dispatch_execbuffer = i965_dispatch_execbuffer; + engine->dispatch_execbuffer = i965_dispatch_execbuffer; } - ring->init_hw = init_ring_common; + engine->init_hw = init_ring_common; - return intel_init_ring_buffer(dev, ring); + return intel_init_ring_buffer(dev, engine); } /** @@ -2969,68 +3000,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev) int intel_init_bsd2_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; - - ring->name = "bsd2 ring"; - ring->id = VCS2; - ring->exec_id = I915_EXEC_BSD; - - ring->write_tail = ring_write_tail; - ring->mmio_base = GEN8_BSD2_RING_BASE; - ring->flush = gen6_bsd_ring_flush; - ring->add_request = gen6_add_request; - ring->get_seqno = gen6_ring_get_seqno; - ring->set_seqno = ring_set_seqno; - ring->irq_enable_mask = + struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; + + engine->name = "bsd2 ring"; + engine->id = VCS2; + engine->exec_id = I915_EXEC_BSD; + + engine->write_tail = ring_write_tail; + engine->mmio_base = GEN8_BSD2_RING_BASE; + engine->flush = gen6_bsd_ring_flush; + engine->add_request = gen6_add_request; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - ring->dispatch_execbuffer = + engine->irq_get = gen8_ring_get_irq; + engine->irq_put = gen8_ring_put_irq; + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen8_ring_sync; - ring->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT; + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT(engine); } - ring->init_hw = init_ring_common; + engine->init_hw = init_ring_common; - return intel_init_ring_buffer(dev, ring); + return intel_init_ring_buffer(dev, engine); } int intel_init_blt_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[BCS]; - - ring->name = "blitter ring"; - ring->id = BCS; - ring->exec_id = I915_EXEC_BLT; - - ring->mmio_base = BLT_RING_BASE; - ring->write_tail = ring_write_tail; - ring->flush = gen6_ring_flush; - ring->add_request = gen6_add_request; - ring->get_seqno = gen6_ring_get_seqno; - ring->set_seqno = ring_set_seqno; + struct intel_engine_cs *engine = &dev_priv->engine[BCS]; + + engine->name = "blitter ring"; + engine->id = BCS; + engine->exec_id = I915_EXEC_BLT; + + engine->mmio_base = BLT_RING_BASE; + engine->write_tail = ring_write_tail; + engine->flush = gen6_ring_flush; + engine->add_request = gen6_add_request; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (INTEL_INFO(dev)->gen >= 8) { - ring->irq_enable_mask = + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + engine->irq_get = gen8_ring_get_irq; + engine->irq_put = gen8_ring_put_irq; + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen8_ring_sync; - ring->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT; + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT(engine); } } else { - ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; - ring->irq_get = gen6_ring_get_irq; - ring->irq_put = gen6_ring_put_irq; - ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; + engine->irq_get = gen6_ring_get_irq; + engine->irq_put = gen6_ring_put_irq; + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.signal = gen6_signal; - ring->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; + engine->semaphore.sync_to = gen6_ring_sync; /* * The current semaphore is only applied on pre-gen8 * platform. And there is no VCS2 ring on the pre-gen8 @@ -3038,127 +3071,128 @@ int intel_init_blt_ring_buffer(struct drm_device *dev) * initialized as INVALID. Gen8 will initialize the * sema between BCS and VCS2 later. */ - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; + engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; + engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; + engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; + engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; + engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; + engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; + engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } } - ring->init_hw = init_ring_common; + engine->init_hw = init_ring_common; - return intel_init_ring_buffer(dev, ring); + return intel_init_ring_buffer(dev, engine); } int intel_init_vebox_ring_buffer(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[VECS]; + struct intel_engine_cs *engine = &dev_priv->engine[VECS]; - ring->name = "video enhancement ring"; - ring->id = VECS; - ring->exec_id = I915_EXEC_VEBOX; + engine->name = "video enhancement ring"; + engine->id = VECS; + engine->exec_id = I915_EXEC_VEBOX; - ring->mmio_base = VEBOX_RING_BASE; - ring->write_tail = ring_write_tail; - ring->flush = gen6_ring_flush; - ring->add_request = gen6_add_request; - ring->get_seqno = gen6_ring_get_seqno; - ring->set_seqno = ring_set_seqno; + engine->mmio_base = VEBOX_RING_BASE; + engine->write_tail = ring_write_tail; + engine->flush = gen6_ring_flush; + engine->add_request = gen6_add_request; + engine->irq_seqno_barrier = gen6_seqno_barrier; + engine->get_seqno = ring_get_seqno; + engine->set_seqno = ring_set_seqno; if (INTEL_INFO(dev)->gen >= 8) { - ring->irq_enable_mask = + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; - ring->irq_get = gen8_ring_get_irq; - ring->irq_put = gen8_ring_put_irq; - ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; + engine->irq_get = gen8_ring_get_irq; + engine->irq_put = gen8_ring_put_irq; + engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen8_ring_sync; - ring->semaphore.signal = gen8_xcs_signal; - GEN8_RING_SEMAPHORE_INIT; + engine->semaphore.sync_to = gen8_ring_sync; + engine->semaphore.signal = gen8_xcs_signal; + GEN8_RING_SEMAPHORE_INIT(engine); } } else { - ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; - ring->irq_get = hsw_vebox_get_irq; - ring->irq_put = hsw_vebox_put_irq; - ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; + engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; + engine->irq_get = hsw_vebox_get_irq; + engine->irq_put = hsw_vebox_put_irq; + engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; if (i915_semaphore_is_enabled(dev)) { - ring->semaphore.sync_to = gen6_ring_sync; - ring->semaphore.signal = gen6_signal; - ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; - ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; - ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; - ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; - ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; - ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; - ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; - ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; - ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + engine->semaphore.sync_to = gen6_ring_sync; + engine->semaphore.signal = gen6_signal; + engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; + engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; + engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; + engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; + engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; + engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; + engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; + engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; + engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; } } - ring->init_hw = init_ring_common; + engine->init_hw = init_ring_common; - return intel_init_ring_buffer(dev, ring); + return intel_init_ring_buffer(dev, engine); } int intel_ring_flush_all_caches(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; int ret; - if (!ring->gpu_caches_dirty) + if (!engine->gpu_caches_dirty) return 0; - ret = ring->flush(req, 0, I915_GEM_GPU_DOMAINS); + ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS); if (ret) return ret; trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS); - ring->gpu_caches_dirty = false; + engine->gpu_caches_dirty = false; return 0; } int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req) { - struct intel_engine_cs *ring = req->ring; + struct intel_engine_cs *engine = req->engine; uint32_t flush_domains; int ret; flush_domains = 0; - if (ring->gpu_caches_dirty) + if (engine->gpu_caches_dirty) flush_domains = I915_GEM_GPU_DOMAINS; - ret = ring->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); + ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains); if (ret) return ret; trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains); - ring->gpu_caches_dirty = false; + engine->gpu_caches_dirty = false; return 0; } void -intel_stop_ring_buffer(struct intel_engine_cs *ring) +intel_stop_engine(struct intel_engine_cs *engine) { int ret; - if (!intel_ring_initialized(ring)) + if (!intel_engine_initialized(engine)) return; - ret = intel_ring_idle(ring); - if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) + ret = intel_engine_idle(engine); + if (ret && !i915_reset_in_progress(&to_i915(engine->dev)->gpu_error)) DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", - ring->name, ret); + engine->name, ret); - stop_ring(ring); + stop_ring(engine); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 566b0ae10ce0..2ade194bbea9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -52,34 +52,32 @@ struct intel_hw_status_page { /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. */ -#define i915_semaphore_seqno_size sizeof(uint64_t) +#define gen8_semaphore_seqno_size sizeof(uint64_t) +#define GEN8_SEMAPHORE_OFFSET(__from, __to) \ + (((__from) * I915_NUM_ENGINES + (__to)) * gen8_semaphore_seqno_size) #define GEN8_SIGNAL_OFFSET(__ring, to) \ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ - ((__ring)->id * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ - (i915_semaphore_seqno_size * (to))) - + GEN8_SEMAPHORE_OFFSET((__ring)->id, (to))) #define GEN8_WAIT_OFFSET(__ring, from) \ (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ - ((from) * I915_NUM_RINGS * i915_semaphore_seqno_size) + \ - (i915_semaphore_seqno_size * (__ring)->id)) + GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) -#define GEN8_RING_SEMAPHORE_INIT do { \ +#define GEN8_RING_SEMAPHORE_INIT(e) do { \ if (!dev_priv->semaphore_obj) { \ break; \ } \ - ring->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET(ring, RCS); \ - ring->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET(ring, VCS); \ - ring->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET(ring, BCS); \ - ring->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET(ring, VECS); \ - ring->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET(ring, VCS2); \ - ring->semaphore.signal_ggtt[ring->id] = MI_SEMAPHORE_SYNC_INVALID; \ + (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \ + (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \ + (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \ + (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \ + (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \ + (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \ } while(0) enum intel_ring_hangcheck_action { HANGCHECK_IDLE = 0, HANGCHECK_WAIT, HANGCHECK_ACTIVE, - HANGCHECK_ACTIVE_LOOP, HANGCHECK_KICK, HANGCHECK_HUNG, }; @@ -88,8 +86,8 @@ enum intel_ring_hangcheck_action { struct intel_ring_hangcheck { u64 acthd; - u64 max_acthd; u32 seqno; + unsigned user_interrupts; int score; enum intel_ring_hangcheck_action action; int deadlock; @@ -101,7 +99,7 @@ struct intel_ringbuffer { void __iomem *virtual_start; struct i915_vma *vma; - struct intel_engine_cs *ring; + struct intel_engine_cs *engine; struct list_head link; u32 head; @@ -125,7 +123,7 @@ struct intel_ringbuffer { }; struct intel_context; -struct drm_i915_reg_descriptor; +struct drm_i915_reg_table; /* * we use a single page to load ctx workarounds so all of these @@ -148,14 +146,14 @@ struct i915_ctx_workarounds { struct intel_engine_cs { const char *name; - enum intel_ring_id { + enum intel_engine_id { RCS = 0, BCS, VCS, VCS2, /* Keep instances of the same type engine together. */ VECS } id; -#define I915_NUM_RINGS 5 +#define I915_NUM_ENGINES 5 #define _VCS(n) (VCS + (n)) unsigned int exec_id; unsigned int guc_id; @@ -196,8 +194,8 @@ struct intel_engine_cs { * seen value is good enough. Note that the seqno will always be * monotonic, even if not coherent. */ - u32 (*get_seqno)(struct intel_engine_cs *ring, - bool lazy_coherency); + void (*irq_seqno_barrier)(struct intel_engine_cs *ring); + u32 (*get_seqno)(struct intel_engine_cs *ring); void (*set_seqno)(struct intel_engine_cs *ring, u32 seqno); int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, @@ -246,16 +244,16 @@ struct intel_engine_cs { * ie. transpose of f(x, y) */ struct { - u32 sync_seqno[I915_NUM_RINGS-1]; + u32 sync_seqno[I915_NUM_ENGINES-1]; union { struct { /* our mbox written by others */ - u32 wait[I915_NUM_RINGS]; + u32 wait[I915_NUM_ENGINES]; /* mboxes this ring signals to */ - i915_reg_t signal[I915_NUM_RINGS]; + i915_reg_t signal[I915_NUM_ENGINES]; } mbox; - u64 signal_ggtt[I915_NUM_RINGS]; + u64 signal_ggtt[I915_NUM_ENGINES]; }; /* AKA wait() */ @@ -268,10 +266,13 @@ struct intel_engine_cs { } semaphore; /* Execlists */ - spinlock_t execlist_lock; + struct tasklet_struct irq_tasklet; + spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ struct list_head execlist_queue; struct list_head execlist_retired_req_list; - u8 next_context_status_buffer; + unsigned int fw_domains; + unsigned int next_context_status_buffer; + unsigned int idle_lite_restore_wa; bool disable_lite_restore_wa; u32 ctx_desc_template; u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ @@ -306,6 +307,7 @@ struct intel_engine_cs { * inspecting request list. */ u32 last_submitted_seqno; + unsigned user_interrupts; bool gpu_caches_dirty; @@ -332,15 +334,8 @@ struct intel_engine_cs { /* * Table of registers allowed in commands that read/write registers. */ - const struct drm_i915_reg_descriptor *reg_table; - int reg_count; - - /* - * Table of registers allowed in commands that read/write registers, but - * only from the DRM master. - */ - const struct drm_i915_reg_descriptor *master_reg_table; - int master_reg_count; + const struct drm_i915_reg_table *reg_tables; + int reg_table_count; /* * Returns the bitmask for the length field of the specified command. @@ -356,19 +351,19 @@ struct intel_engine_cs { }; static inline bool -intel_ring_initialized(struct intel_engine_cs *ring) +intel_engine_initialized(struct intel_engine_cs *engine) { - return ring->dev != NULL; + return engine->dev != NULL; } static inline unsigned -intel_ring_flag(struct intel_engine_cs *ring) +intel_engine_flag(struct intel_engine_cs *engine) { - return 1 << ring->id; + return 1 << engine->id; } static inline u32 -intel_ring_sync_index(struct intel_engine_cs *ring, +intel_ring_sync_index(struct intel_engine_cs *engine, struct intel_engine_cs *other) { int idx; @@ -381,34 +376,33 @@ intel_ring_sync_index(struct intel_engine_cs *ring, * vcs2 -> 0 = rcs, 1 = vcs, 2 = bcs, 3 = vecs; */ - idx = (other - ring) - 1; + idx = (other - engine) - 1; if (idx < 0) - idx += I915_NUM_RINGS; + idx += I915_NUM_ENGINES; return idx; } static inline void -intel_flush_status_page(struct intel_engine_cs *ring, int reg) +intel_flush_status_page(struct intel_engine_cs *engine, int reg) { - drm_clflush_virt_range(&ring->status_page.page_addr[reg], - sizeof(uint32_t)); + mb(); + clflush(&engine->status_page.page_addr[reg]); + mb(); } static inline u32 -intel_read_status_page(struct intel_engine_cs *ring, - int reg) +intel_read_status_page(struct intel_engine_cs *engine, int reg) { /* Ensure that the compiler doesn't optimize away the load. */ - barrier(); - return ring->status_page.page_addr[reg]; + return READ_ONCE(engine->status_page.page_addr[reg]); } static inline void -intel_write_status_page(struct intel_engine_cs *ring, +intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) { - ring->status_page.page_addr[reg] = value; + engine->status_page.page_addr[reg] = value; } /* @@ -439,42 +433,42 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); void intel_ringbuffer_free(struct intel_ringbuffer *ring); -void intel_stop_ring_buffer(struct intel_engine_cs *ring); -void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); +void intel_stop_engine(struct intel_engine_cs *engine); +void intel_cleanup_engine(struct intel_engine_cs *engine); int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); -static inline void intel_ring_emit(struct intel_engine_cs *ring, +static inline void intel_ring_emit(struct intel_engine_cs *engine, u32 data) { - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = engine->buffer; iowrite32(data, ringbuf->virtual_start + ringbuf->tail); ringbuf->tail += 4; } -static inline void intel_ring_emit_reg(struct intel_engine_cs *ring, +static inline void intel_ring_emit_reg(struct intel_engine_cs *engine, i915_reg_t reg) { - intel_ring_emit(ring, i915_mmio_reg_offset(reg)); + intel_ring_emit(engine, i915_mmio_reg_offset(reg)); } -static inline void intel_ring_advance(struct intel_engine_cs *ring) +static inline void intel_ring_advance(struct intel_engine_cs *engine) { - struct intel_ringbuffer *ringbuf = ring->buffer; + struct intel_ringbuffer *ringbuf = engine->buffer; ringbuf->tail &= ringbuf->size - 1; } int __intel_ring_space(int head, int tail, int size); void intel_ring_update_space(struct intel_ringbuffer *ringbuf); int intel_ring_space(struct intel_ringbuffer *ringbuf); -bool intel_ring_stopped(struct intel_engine_cs *ring); +bool intel_engine_stopped(struct intel_engine_cs *engine); -int __must_check intel_ring_idle(struct intel_engine_cs *ring); -void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno); +int __must_check intel_engine_idle(struct intel_engine_cs *engine); +void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); -void intel_fini_pipe_control(struct intel_engine_cs *ring); -int intel_init_pipe_control(struct intel_engine_cs *ring); +void intel_fini_pipe_control(struct intel_engine_cs *engine); +int intel_init_pipe_control(struct intel_engine_cs *engine); int intel_init_render_ring_buffer(struct drm_device *dev); int intel_init_bsd_ring_buffer(struct drm_device *dev); @@ -482,9 +476,9 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev); int intel_init_blt_ring_buffer(struct drm_device *dev); int intel_init_vebox_ring_buffer(struct drm_device *dev); -u64 intel_ring_get_active_head(struct intel_engine_cs *ring); +u64 intel_ring_get_active_head(struct intel_engine_cs *engine); -int init_workarounds_ring(struct intel_engine_cs *ring); +int init_workarounds_ring(struct intel_engine_cs *engine); static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) { diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6e54d978d9d4..8f9797f17991 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -89,6 +89,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain) return "TRANSCODER_C"; case POWER_DOMAIN_TRANSCODER_EDP: return "TRANSCODER_EDP"; + case POWER_DOMAIN_TRANSCODER_DSI_A: + return "TRANSCODER_DSI_A"; + case POWER_DOMAIN_TRANSCODER_DSI_C: + return "TRANSCODER_DSI_C"; case POWER_DOMAIN_PORT_DDI_A_LANES: return "PORT_DDI_A_LANES"; case POWER_DOMAIN_PORT_DDI_B_LANES: @@ -419,8 +423,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv, BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \ BIT(POWER_DOMAIN_PIPE_A) | \ BIT(POWER_DOMAIN_TRANSCODER_EDP) | \ + BIT(POWER_DOMAIN_TRANSCODER_DSI_A) | \ + BIT(POWER_DOMAIN_TRANSCODER_DSI_C) | \ BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \ BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \ + BIT(POWER_DOMAIN_PORT_DSI) | \ BIT(POWER_DOMAIN_AUX_A) | \ BIT(POWER_DOMAIN_PLLS) | \ BIT(POWER_DOMAIN_INIT)) @@ -458,8 +465,6 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) { WARN(intel_irqs_enabled(dev_priv), "Interrupts not disabled yet.\n"); - WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9), - "DC9 already programmed to be disabled.\n"); WARN(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5, "DC5 still not disabled.\n"); @@ -472,24 +477,6 @@ static void assert_can_disable_dc9(struct drm_i915_private *dev_priv) */ } -static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) -{ - uint32_t val, mask; - - mask = DC_STATE_DEBUG_MASK_MEMORY_UP; - - if (IS_BROXTON(dev_priv)) - mask |= DC_STATE_DEBUG_MASK_CORES; - - /* The below bit doesn't need to be cleared ever afterwards */ - val = I915_READ(DC_STATE_DEBUG); - if ((val & mask) != mask) { - val |= mask; - I915_WRITE(DC_STATE_DEBUG, val); - POSTING_READ(DC_STATE_DEBUG); - } -} - static void gen9_write_dc_state(struct drm_i915_private *dev_priv, u32 state) { @@ -538,12 +525,8 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state) else mask |= DC_STATE_EN_UPTO_DC6; - WARN_ON_ONCE(state & ~mask); - - if (i915.enable_dc == 0) - state = DC_STATE_DISABLE; - else if (i915.enable_dc == 1 && state > DC_STATE_EN_UPTO_DC5) - state = DC_STATE_EN_UPTO_DC5; + if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask)) + state &= dev_priv->csr.allowed_dc_mask; val = I915_READ(DC_STATE_EN); DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n", @@ -606,18 +589,6 @@ static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) assert_csr_loaded(dev_priv); } -static void assert_can_disable_dc5(struct drm_i915_private *dev_priv) -{ - /* - * During initialization, the firmware may not be loaded yet. - * We still want to make sure that the DC enabling flag is cleared. - */ - if (dev_priv->power_domains.initializing) - return; - - assert_rpm_wakelock_held(dev_priv); -} - static void gen9_enable_dc5(struct drm_i915_private *dev_priv) { assert_can_enable_dc5(dev_priv); @@ -642,30 +613,6 @@ static void assert_can_enable_dc6(struct drm_i915_private *dev_priv) assert_csr_loaded(dev_priv); } -static void assert_can_disable_dc6(struct drm_i915_private *dev_priv) -{ - /* - * During initialization, the firmware may not be loaded yet. - * We still want to make sure that the DC enabling flag is cleared. - */ - if (dev_priv->power_domains.initializing) - return; - - WARN_ONCE(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6), - "DC6 already programmed to be disabled.\n"); -} - -static void gen9_disable_dc5_dc6(struct drm_i915_private *dev_priv) -{ - assert_can_disable_dc5(dev_priv); - - if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && - i915.enable_dc != 0 && i915.enable_dc != 1) - assert_can_disable_dc6(dev_priv); - - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); -} - void skl_enable_dc6(struct drm_i915_private *dev_priv) { assert_can_enable_dc6(dev_priv); @@ -678,8 +625,6 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv) void skl_disable_dc6(struct drm_i915_private *dev_priv) { - assert_can_disable_dc6(dev_priv); - DRM_DEBUG_KMS("Disabling DC6\n"); gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); @@ -833,32 +778,25 @@ static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - gen9_disable_dc5_dc6(dev_priv); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); } static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && - i915.enable_dc != 0 && i915.enable_dc != 1) + if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) skl_enable_dc6(dev_priv); - else + else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) gen9_enable_dc5(dev_priv); } static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - if (power_well->count > 0) { - gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); - } else { - if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && - i915.enable_dc != 0 && - i915.enable_dc != 1) - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); - else - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5); - } + if (power_well->count > 0) + gen9_dc_off_power_well_enable(dev_priv, power_well); + else + gen9_dc_off_power_well_disable(dev_priv, power_well); } static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv, @@ -962,6 +900,17 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv, return enabled; } +static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); + + /* + * Disable trickle feed and enable pnd deadline calculation + */ + I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + I915_WRITE(CBR1_VLV, 0); +} + static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -984,6 +933,8 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) I915_WRITE(DPLL(pipe), val); } + vlv_init_display_clock_gating(dev_priv); + spin_lock_irq(&dev_priv->irq_lock); valleyview_enable_display_irqs(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); @@ -2023,6 +1974,55 @@ sanitize_disable_power_well_option(const struct drm_i915_private *dev_priv, return 1; } +static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv, + int enable_dc) +{ + uint32_t mask; + int requested_dc; + int max_dc; + + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { + max_dc = 2; + mask = 0; + } else if (IS_BROXTON(dev_priv)) { + max_dc = 1; + /* + * DC9 has a separate HW flow from the rest of the DC states, + * not depending on the DMC firmware. It's needed by system + * suspend/resume, so allow it unconditionally. + */ + mask = DC_STATE_EN_DC9; + } else { + max_dc = 0; + mask = 0; + } + + if (!i915.disable_power_well) + max_dc = 0; + + if (enable_dc >= 0 && enable_dc <= max_dc) { + requested_dc = enable_dc; + } else if (enable_dc == -1) { + requested_dc = max_dc; + } else if (enable_dc > max_dc && enable_dc <= 2) { + DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", + enable_dc, max_dc); + requested_dc = max_dc; + } else { + DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc); + requested_dc = max_dc; + } + + if (requested_dc > 1) + mask |= DC_STATE_EN_UPTO_DC6; + if (requested_dc > 0) + mask |= DC_STATE_EN_UPTO_DC5; + + DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); + + return mask; +} + #define set_power_wells(power_domains, __power_wells) ({ \ (power_domains)->power_wells = (__power_wells); \ (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \ @@ -2041,6 +2041,8 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) i915.disable_power_well = sanitize_disable_power_well_option(dev_priv, i915.disable_power_well); + dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, + i915.enable_dc); BUILD_BUG_ON(POWER_DOMAIN_NUM > 31); @@ -2050,17 +2052,17 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) * The enabling order will be from lower to higher indexed wells, * the disabling order is reversed. */ - if (IS_HASWELL(dev_priv->dev)) { + if (IS_HASWELL(dev_priv)) { set_power_wells(power_domains, hsw_power_wells); - } else if (IS_BROADWELL(dev_priv->dev)) { + } else if (IS_BROADWELL(dev_priv)) { set_power_wells(power_domains, bdw_power_wells); - } else if (IS_SKYLAKE(dev_priv->dev) || IS_KABYLAKE(dev_priv->dev)) { + } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { set_power_wells(power_domains, skl_power_wells); - } else if (IS_BROXTON(dev_priv->dev)) { + } else if (IS_BROXTON(dev_priv)) { set_power_wells(power_domains, bxt_power_wells); - } else if (IS_CHERRYVIEW(dev_priv->dev)) { + } else if (IS_CHERRYVIEW(dev_priv)) { set_power_wells(power_domains, chv_power_wells); - } else if (IS_VALLEYVIEW(dev_priv->dev)) { + } else if (IS_VALLEYVIEW(dev_priv)) { set_power_wells(power_domains, vlv_power_wells); } else { set_power_wells(power_domains, i9xx_always_on_power_well); @@ -2141,8 +2143,8 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv, skl_init_cdclk(dev_priv); - if (dev_priv->csr.dmc_payload && intel_csr_load_program(dev_priv)) - gen9_set_dc_state_debugmask(dev_priv); + if (dev_priv->csr.dmc_payload) + intel_csr_load_program(dev_priv); } static void skl_display_core_uninit(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 4ecc076c4041..2128fae5687d 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1398,12 +1398,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder, } dotclock = pipe_config->port_clock; + if (pipe_config->pixel_multiplier) dotclock /= pipe_config->pixel_multiplier; - if (HAS_PCH_SPLIT(dev)) - ironlake_check_encoder_dotclock(pipe_config, dotclock); - pipe_config->base.adjusted_mode.crtc_clock = dotclock; /* Cross check the port pixel multiplier with the sdvo encoder state. */ @@ -2262,9 +2260,9 @@ intel_sdvo_select_ddc_bus(struct drm_i915_private *dev_priv, struct sdvo_device_mapping *mapping; if (sdvo->port == PORT_B) - mapping = &(dev_priv->sdvo_mappings[0]); + mapping = &dev_priv->vbt.sdvo_mappings[0]; else - mapping = &(dev_priv->sdvo_mappings[1]); + mapping = &dev_priv->vbt.sdvo_mappings[1]; if (mapping->initialized) sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4); @@ -2280,9 +2278,9 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, u8 pin; if (sdvo->port == PORT_B) - mapping = &dev_priv->sdvo_mappings[0]; + mapping = &dev_priv->vbt.sdvo_mappings[0]; else - mapping = &dev_priv->sdvo_mappings[1]; + mapping = &dev_priv->vbt.sdvo_mappings[1]; if (mapping->initialized && intel_gmbus_is_valid_pin(dev_priv, mapping->i2c_pin)) @@ -2318,11 +2316,11 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) struct sdvo_device_mapping *my_mapping, *other_mapping; if (sdvo->port == PORT_B) { - my_mapping = &dev_priv->sdvo_mappings[0]; - other_mapping = &dev_priv->sdvo_mappings[1]; + my_mapping = &dev_priv->vbt.sdvo_mappings[0]; + other_mapping = &dev_priv->vbt.sdvo_mappings[1]; } else { - my_mapping = &dev_priv->sdvo_mappings[1]; - other_mapping = &dev_priv->sdvo_mappings[0]; + my_mapping = &dev_priv->vbt.sdvo_mappings[1]; + other_mapping = &dev_priv->vbt.sdvo_mappings[0]; } /* If the BIOS described our SDVO device, take advantage of it. */ diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index a2582c455b36..0f3e2303e0e9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -193,7 +193,7 @@ skl_update_plane(struct drm_plane *drm_plane, const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 surf_addr; u32 tile_height, plane_offset, plane_size; - unsigned int rotation; + unsigned int rotation = plane_state->base.rotation; int x_offset, y_offset; int crtc_x = plane_state->dst.x1; int crtc_y = plane_state->dst.y1; @@ -213,7 +213,6 @@ skl_update_plane(struct drm_plane *drm_plane, plane_ctl |= skl_plane_ctl_format(fb->pixel_format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); - rotation = plane_state->base.rotation; plane_ctl |= skl_plane_ctl_rotation(rotation); stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], @@ -351,6 +350,7 @@ vlv_update_plane(struct drm_plane *dplane, int plane = intel_plane->plane; u32 sprctl; u32 sprsurf_offset, linear_offset; + unsigned int rotation = dplane->state->rotation; int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->dst.x1; @@ -423,12 +423,11 @@ vlv_update_plane(struct drm_plane *dplane, crtc_h--; linear_offset = y * fb->pitches[0] + x * cpp; - sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], cpp, - fb->pitches[0]); + sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, + fb->pitches[0], rotation); linear_offset -= sprsurf_offset; - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + if (rotation == BIT(DRM_ROTATE_180)) { sprctl |= SP_ROTATE_180; x += src_w; @@ -493,6 +492,7 @@ ivb_update_plane(struct drm_plane *plane, enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; u32 sprsurf_offset, linear_offset; + unsigned int rotation = plane_state->base.rotation; int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->dst.x1; @@ -556,12 +556,11 @@ ivb_update_plane(struct drm_plane *plane, sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; linear_offset = y * fb->pitches[0] + x * cpp; - sprsurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], cpp, - fb->pitches[0]); + sprsurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, + fb->pitches[0], rotation); linear_offset -= sprsurf_offset; - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + if (rotation == BIT(DRM_ROTATE_180)) { sprctl |= SPRITE_ROTATE_180; /* HSW and BDW does this automagically in hardware */ @@ -634,6 +633,7 @@ ilk_update_plane(struct drm_plane *plane, int pipe = intel_plane->pipe; u32 dvscntr, dvsscale; u32 dvssurf_offset, linear_offset; + unsigned int rotation = plane_state->base.rotation; int cpp = drm_format_plane_cpp(fb->pixel_format, 0); const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; int crtc_x = plane_state->dst.x1; @@ -693,12 +693,11 @@ ilk_update_plane(struct drm_plane *plane, dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; linear_offset = y * fb->pitches[0] + x * cpp; - dvssurf_offset = intel_compute_tile_offset(dev_priv, &x, &y, - fb->modifier[0], cpp, - fb->pitches[0]); + dvssurf_offset = intel_compute_tile_offset(&x, &y, fb, 0, + fb->pitches[0], rotation); linear_offset -= dvssurf_offset; - if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { + if (rotation == BIT(DRM_ROTATE_180)) { dvscntr |= DVS_ROTATE_180; x += src_w; @@ -1026,8 +1025,8 @@ static uint32_t skl_plane_formats[] = { int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) { - struct intel_plane *intel_plane; - struct intel_plane_state *state; + struct intel_plane *intel_plane = NULL; + struct intel_plane_state *state = NULL; unsigned long possible_crtcs; const uint32_t *plane_formats; int num_plane_formats; @@ -1037,13 +1036,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) return -ENODEV; intel_plane = kzalloc(sizeof(*intel_plane), GFP_KERNEL); - if (!intel_plane) - return -ENOMEM; + if (!intel_plane) { + ret = -ENOMEM; + goto fail; + } state = intel_create_plane_state(&intel_plane->base); if (!state) { - kfree(intel_plane); - return -ENOMEM; + ret = -ENOMEM; + goto fail; } intel_plane->base.state = &state->base; @@ -1098,28 +1099,34 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) num_plane_formats = ARRAY_SIZE(skl_plane_formats); break; default: - kfree(intel_plane); - return -ENODEV; + MISSING_CASE(INTEL_INFO(dev)->gen); + ret = -ENODEV; + goto fail; } intel_plane->pipe = pipe; intel_plane->plane = plane; intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane); intel_plane->check_plane = intel_check_sprite_plane; + possible_crtcs = (1 << pipe); + ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, &intel_plane_funcs, plane_formats, num_plane_formats, DRM_PLANE_TYPE_OVERLAY, NULL); - if (ret) { - kfree(intel_plane); - goto out; - } + if (ret) + goto fail; intel_create_rotation_property(dev, intel_plane); drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs); -out: + return 0; + +fail: + kfree(state); + kfree(intel_plane); + return ret; } diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 6745bad5bff0..223129d3c765 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -326,24 +326,12 @@ static const struct color_conversion sdtv_csc_yprpb = { .rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200, }; -static const struct color_conversion sdtv_csc_rgb = { - .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, - .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, - .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, -}; - static const struct color_conversion hdtv_csc_yprpb = { .ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145, .ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200, .rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200, }; -static const struct color_conversion hdtv_csc_rgb = { - .ry = 0x0000, .gy = 0x0f00, .by = 0x0000, .ay = 0x0166, - .ru = 0x0000, .gu = 0x0000, .bu = 0x0f00, .au = 0x0166, - .rv = 0x0f00, .gv = 0x0000, .bv = 0x0000, .av = 0x0166, -}; - static const struct video_levels component_levels = { .blank = 279, .black = 279, .burst = 0, }; @@ -1531,47 +1519,6 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = { .destroy = intel_encoder_destroy, }; -/* - * Enumerate the child dev array parsed from VBT to check whether - * the integrated TV is present. - * If it is present, return 1. - * If it is not present, return false. - * If no child dev is parsed from VBT, it assumes that the TV is present. - */ -static int tv_is_present_in_vbt(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - union child_device_config *p_child; - int i, ret; - - if (!dev_priv->vbt.child_dev_num) - return 1; - - ret = 0; - for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { - p_child = dev_priv->vbt.child_dev + i; - /* - * If the device type is not TV, continue. - */ - switch (p_child->old.device_type) { - case DEVICE_TYPE_INT_TV: - case DEVICE_TYPE_TV: - case DEVICE_TYPE_TV_SVIDEO_COMPOSITE: - break; - default: - continue; - } - /* Only when the addin_offset is non-zero, it is regarded - * as present. - */ - if (p_child->old.addin_offset) { - ret = 1; - break; - } - } - return ret; -} - void intel_tv_init(struct drm_device *dev) { @@ -1587,13 +1534,10 @@ intel_tv_init(struct drm_device *dev) if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED) return; - if (!tv_is_present_in_vbt(dev)) { + if (!intel_bios_is_tv_present(dev_priv)) { DRM_DEBUG_KMS("Integrated TV is not present.\n"); return; } - /* Even if we have an encoder we may not have a connector */ - if (!dev_priv->vbt.int_tv_support) - return; /* * Sanity check the TV output by checking to see if the diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 436d8f2b8682..144700cd360b 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -60,7 +60,11 @@ fw_domain_reset(const struct intel_uncore_forcewake_domain *d) static inline void fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d) { - mod_timer_pinned(&d->timer, jiffies + 1); + d->wake_count++; + hrtimer_start_range_ns(&d->timer, + ktime_set(0, NSEC_PER_MSEC), + NSEC_PER_MSEC, + HRTIMER_MODE_REL); } static inline void @@ -107,22 +111,22 @@ static void fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; - enum forcewake_domain_id id; - for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { + for_each_fw_domain_masked(d, fw_domains, dev_priv) { fw_domain_wait_ack_clear(d); fw_domain_get(d); - fw_domain_wait_ack(d); } + + for_each_fw_domain_masked(d, fw_domains, dev_priv) + fw_domain_wait_ack(d); } static void fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; - enum forcewake_domain_id id; - for_each_fw_domain_mask(d, fw_domains, dev_priv, id) { + for_each_fw_domain_masked(d, fw_domains, dev_priv) { fw_domain_put(d); fw_domain_posting_read(d); } @@ -132,10 +136,9 @@ static void fw_domains_posting_read(struct drm_i915_private *dev_priv) { struct intel_uncore_forcewake_domain *d; - enum forcewake_domain_id id; /* No need to do for all, just do for first found */ - for_each_fw_domain(d, dev_priv, id) { + for_each_fw_domain(d, dev_priv) { fw_domain_posting_read(d); break; } @@ -145,12 +148,11 @@ static void fw_domains_reset(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *d; - enum forcewake_domain_id id; if (dev_priv->uncore.fw_domains == 0) return; - for_each_fw_domain_mask(d, fw_domains, dev_priv, id) + for_each_fw_domain_masked(d, fw_domains, dev_priv) fw_domain_reset(d); fw_domains_posting_read(dev_priv); @@ -204,7 +206,7 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) /* On VLV, FIFO will be shared by both SW and HW. * So, we need to read the FREE_ENTRIES everytime */ - if (IS_VALLEYVIEW(dev_priv->dev)) + if (IS_VALLEYVIEW(dev_priv)) dev_priv->uncore.fifo_count = fifo_free_entries(dev_priv); if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { @@ -224,9 +226,11 @@ static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) return ret; } -static void intel_uncore_fw_release_timer(unsigned long arg) +static enum hrtimer_restart +intel_uncore_fw_release_timer(struct hrtimer *timer) { - struct intel_uncore_forcewake_domain *domain = (void *)arg; + struct intel_uncore_forcewake_domain *domain = + container_of(timer, struct intel_uncore_forcewake_domain, timer); unsigned long irqflags; assert_rpm_device_not_suspended(domain->i915); @@ -240,6 +244,8 @@ static void intel_uncore_fw_release_timer(unsigned long arg) 1 << domain->id); spin_unlock_irqrestore(&domain->i915->uncore.lock, irqflags); + + return HRTIMER_NORESTART; } void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) @@ -248,7 +254,6 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) unsigned long irqflags; struct intel_uncore_forcewake_domain *domain; int retry_count = 100; - enum forcewake_domain_id id; enum forcewake_domains fw = 0, active_domains; /* Hold uncore.lock across reset to prevent any register access @@ -258,18 +263,18 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) while (1) { active_domains = 0; - for_each_fw_domain(domain, dev_priv, id) { - if (del_timer_sync(&domain->timer) == 0) + for_each_fw_domain(domain, dev_priv) { + if (hrtimer_cancel(&domain->timer) == 0) continue; - intel_uncore_fw_release_timer((unsigned long)domain); + intel_uncore_fw_release_timer(&domain->timer); } spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - for_each_fw_domain(domain, dev_priv, id) { - if (timer_pending(&domain->timer)) - active_domains |= (1 << id); + for_each_fw_domain(domain, dev_priv) { + if (hrtimer_active(&domain->timer)) + active_domains |= domain->mask; } if (active_domains == 0) @@ -286,9 +291,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) WARN_ON(active_domains); - for_each_fw_domain(domain, dev_priv, id) + for_each_fw_domain(domain, dev_priv) if (domain->wake_count) - fw |= 1 << id; + fw |= domain->mask; if (fw) dev_priv->uncore.funcs.force_wake_put(dev_priv, fw); @@ -410,16 +415,15 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; - enum forcewake_domain_id id; if (!dev_priv->uncore.funcs.force_wake_get) return; fw_domains &= dev_priv->uncore.fw_domains; - for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { + for_each_fw_domain_masked(domain, fw_domains, dev_priv) { if (domain->wake_count++) - fw_domains &= ~(1 << id); + fw_domains &= ~domain->mask; } if (fw_domains) @@ -477,21 +481,19 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; - enum forcewake_domain_id id; if (!dev_priv->uncore.funcs.force_wake_put) return; fw_domains &= dev_priv->uncore.fw_domains; - for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { + for_each_fw_domain_masked(domain, fw_domains, dev_priv) { if (WARN_ON(domain->wake_count == 0)) continue; if (--domain->wake_count) continue; - domain->wake_count++; fw_domain_arm_timer(domain); } } @@ -539,18 +541,27 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv, void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) { struct intel_uncore_forcewake_domain *domain; - enum forcewake_domain_id id; if (!dev_priv->uncore.funcs.force_wake_get) return; - for_each_fw_domain(domain, dev_priv, id) + for_each_fw_domain(domain, dev_priv) WARN_ON(domain->wake_count); } /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) +#define __gen6_reg_read_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd; \ + if (NEEDS_FORCE_WAKE(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else \ + __fwd = 0; \ + __fwd; \ +}) + #define REG_RANGE(reg, start, end) ((reg) >= (start) && (reg) < (end)) #define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \ @@ -564,6 +575,48 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) REG_RANGE((reg), 0x22000, 0x24000) || \ REG_RANGE((reg), 0x30000, 0x40000)) +#define __vlv_reg_read_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd = 0; \ + if (!NEEDS_FORCE_WAKE(offset)) \ + __fwd = 0; \ + else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_MEDIA; \ + __fwd; \ +}) + +static const i915_reg_t gen8_shadowed_regs[] = { + GEN6_RPNSWREQ, + GEN6_RC_VIDEO_FREQ, + RING_TAIL(RENDER_RING_BASE), + RING_TAIL(GEN6_BSD_RING_BASE), + RING_TAIL(VEBOX_RING_BASE), + RING_TAIL(BLT_RING_BASE), + /* TODO: Other registers are not yet used */ +}; + +static bool is_gen8_shadowed(u32 offset) +{ + int i; + for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) + if (offset == gen8_shadowed_regs[i].reg) + return true; + + return false; +} + +#define __gen8_reg_write_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd; \ + if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else \ + __fwd = 0; \ + __fwd; \ +}) + #define FORCEWAKE_CHV_RENDER_RANGE_OFFSET(reg) \ (REG_RANGE((reg), 0x2000, 0x4000) || \ REG_RANGE((reg), 0x5200, 0x8000) || \ @@ -586,6 +639,34 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) REG_RANGE((reg), 0x9000, 0xB000) || \ REG_RANGE((reg), 0xF000, 0x10000)) +#define __chv_reg_read_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd = 0; \ + if (!NEEDS_FORCE_WAKE(offset)) \ + __fwd = 0; \ + else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_MEDIA; \ + else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + __fwd; \ +}) + +#define __chv_reg_write_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd = 0; \ + if (!NEEDS_FORCE_WAKE(offset) || is_gen8_shadowed(offset)) \ + __fwd = 0; \ + else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_MEDIA; \ + else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + __fwd; \ +}) + #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ REG_RANGE((reg), 0xB00, 0x2000) @@ -618,6 +699,61 @@ void assert_forcewakes_inactive(struct drm_i915_private *dev_priv) !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \ !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) +#define SKL_NEEDS_FORCE_WAKE(reg) \ + ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) + +#define __gen9_reg_read_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd; \ + if (!SKL_NEEDS_FORCE_WAKE(offset)) \ + __fwd = 0; \ + else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_MEDIA; \ + else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + else \ + __fwd = FORCEWAKE_BLITTER; \ + __fwd; \ +}) + +static const i915_reg_t gen9_shadowed_regs[] = { + RING_TAIL(RENDER_RING_BASE), + RING_TAIL(GEN6_BSD_RING_BASE), + RING_TAIL(VEBOX_RING_BASE), + RING_TAIL(BLT_RING_BASE), + GEN6_RPNSWREQ, + GEN6_RC_VIDEO_FREQ, + /* TODO: Other registers are not yet used */ +}; + +static bool is_gen9_shadowed(u32 offset) +{ + int i; + for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) + if (offset == gen9_shadowed_regs[i].reg) + return true; + + return false; +} + +#define __gen9_reg_write_fw_domains(offset) \ +({ \ + enum forcewake_domains __fwd; \ + if (!SKL_NEEDS_FORCE_WAKE(offset) || is_gen9_shadowed(offset)) \ + __fwd = 0; \ + else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER; \ + else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_MEDIA; \ + else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ + __fwd = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + else \ + __fwd = FORCEWAKE_BLITTER; \ + __fwd; \ +}) + static void ilk_dummy_write(struct drm_i915_private *dev_priv) { @@ -633,15 +769,6 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv, const bool read, const bool before) { - /* XXX. We limit the auto arming traces for mmio - * debugs on these platforms. There are just too many - * revealed by these and CI/Bat suffers from the noise. - * Please fix and then re-enable the automatic traces. - */ - if (i915.mmio_debug < 2 && - (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) - return; - if (WARN(check_for_unclaimed_mmio(dev_priv), "Unclaimed register detected %s %s register 0x%x\n", before ? "before" : "after", @@ -716,23 +843,21 @@ __gen2_read(64) trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \ return val -static inline void __force_wake_get(struct drm_i915_private *dev_priv, - enum forcewake_domains fw_domains) +static inline void __force_wake_auto(struct drm_i915_private *dev_priv, + enum forcewake_domains fw_domains) { struct intel_uncore_forcewake_domain *domain; - enum forcewake_domain_id id; if (WARN_ON(!fw_domains)) return; /* Ideally GCC would be constant-fold and eliminate this loop */ - for_each_fw_domain_mask(domain, fw_domains, dev_priv, id) { + for_each_fw_domain_masked(domain, fw_domains, dev_priv) { if (domain->wake_count) { - fw_domains &= ~(1 << id); + fw_domains &= ~domain->mask; continue; } - domain->wake_count++; fw_domain_arm_timer(domain); } @@ -743,9 +868,11 @@ static inline void __force_wake_get(struct drm_i915_private *dev_priv, #define __gen6_read(x) \ static u##x \ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ + enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - if (NEEDS_FORCE_WAKE(offset)) \ - __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ + fw_engine = __gen6_reg_read_fw_domains(offset); \ + if (fw_engine) \ + __force_wake_auto(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } @@ -753,16 +880,11 @@ gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ #define __vlv_read(x) \ static u##x \ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ - enum forcewake_domains fw_engine = 0; \ + enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - if (!NEEDS_FORCE_WAKE(offset)) \ - fw_engine = 0; \ - else if (FORCEWAKE_VLV_RENDER_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_MEDIA; \ + fw_engine = __vlv_reg_read_fw_domains(offset); \ if (fw_engine) \ - __force_wake_get(dev_priv, fw_engine); \ + __force_wake_auto(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } @@ -770,42 +892,23 @@ vlv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ #define __chv_read(x) \ static u##x \ chv_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ - enum forcewake_domains fw_engine = 0; \ + enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - if (!NEEDS_FORCE_WAKE(offset)) \ - fw_engine = 0; \ - else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_MEDIA; \ - else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + fw_engine = __chv_reg_read_fw_domains(offset); \ if (fw_engine) \ - __force_wake_get(dev_priv, fw_engine); \ + __force_wake_auto(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } -#define SKL_NEEDS_FORCE_WAKE(reg) \ - ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg)) - #define __gen9_read(x) \ static u##x \ gen9_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_READ_HEADER(x); \ - if (!SKL_NEEDS_FORCE_WAKE(offset)) \ - fw_engine = 0; \ - else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_MEDIA; \ - else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ - else \ - fw_engine = FORCEWAKE_BLITTER; \ + fw_engine = __gen9_reg_read_fw_domains(offset); \ if (fw_engine) \ - __force_wake_get(dev_priv, fw_engine); \ + __force_wake_auto(dev_priv, fw_engine); \ val = __raw_i915_read##x(dev_priv, reg); \ GEN6_READ_FOOTER; \ } @@ -942,34 +1045,14 @@ hsw_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool t GEN6_WRITE_FOOTER; \ } -static const i915_reg_t gen8_shadowed_regs[] = { - FORCEWAKE_MT, - GEN6_RPNSWREQ, - GEN6_RC_VIDEO_FREQ, - RING_TAIL(RENDER_RING_BASE), - RING_TAIL(GEN6_BSD_RING_BASE), - RING_TAIL(VEBOX_RING_BASE), - RING_TAIL(BLT_RING_BASE), - /* TODO: Other registers are not yet used */ -}; - -static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, - i915_reg_t reg) -{ - int i; - for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++) - if (i915_mmio_reg_equal(reg, gen8_shadowed_regs[i])) - return true; - - return false; -} - #define __gen8_write(x) \ static void \ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ + enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(dev_priv, reg)) \ - __force_wake_get(dev_priv, FORCEWAKE_RENDER); \ + fw_engine = __gen8_reg_write_fw_domains(offset); \ + if (fw_engine) \ + __force_wake_auto(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ GEN6_WRITE_FOOTER; \ } @@ -977,66 +1060,24 @@ gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool #define __chv_write(x) \ static void \ chv_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \ - enum forcewake_domains fw_engine = 0; \ + enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - if (!NEEDS_FORCE_WAKE(offset) || \ - is_gen8_shadowed(dev_priv, reg)) \ - fw_engine = 0; \ - else if (FORCEWAKE_CHV_RENDER_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_CHV_MEDIA_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_MEDIA; \ - else if (FORCEWAKE_CHV_COMMON_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ + fw_engine = __chv_reg_write_fw_domains(offset); \ if (fw_engine) \ - __force_wake_get(dev_priv, fw_engine); \ + __force_wake_auto(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ GEN6_WRITE_FOOTER; \ } -static const i915_reg_t gen9_shadowed_regs[] = { - RING_TAIL(RENDER_RING_BASE), - RING_TAIL(GEN6_BSD_RING_BASE), - RING_TAIL(VEBOX_RING_BASE), - RING_TAIL(BLT_RING_BASE), - FORCEWAKE_BLITTER_GEN9, - FORCEWAKE_RENDER_GEN9, - FORCEWAKE_MEDIA_GEN9, - GEN6_RPNSWREQ, - GEN6_RC_VIDEO_FREQ, - /* TODO: Other registers are not yet used */ -}; - -static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, - i915_reg_t reg) -{ - int i; - for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++) - if (i915_mmio_reg_equal(reg, gen9_shadowed_regs[i])) - return true; - - return false; -} - #define __gen9_write(x) \ static void \ gen9_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, \ bool trace) { \ enum forcewake_domains fw_engine; \ GEN6_WRITE_HEADER; \ - if (!SKL_NEEDS_FORCE_WAKE(offset) || \ - is_gen9_shadowed(dev_priv, reg)) \ - fw_engine = 0; \ - else if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER; \ - else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_MEDIA; \ - else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(offset)) \ - fw_engine = FORCEWAKE_RENDER | FORCEWAKE_MEDIA; \ - else \ - fw_engine = FORCEWAKE_BLITTER; \ + fw_engine = __gen9_reg_write_fw_domains(offset); \ if (fw_engine) \ - __force_wake_get(dev_priv, fw_engine); \ + __force_wake_auto(dev_priv, fw_engine); \ __raw_i915_write##x(dev_priv, reg, val); \ GEN6_WRITE_FOOTER; \ } @@ -1150,7 +1191,14 @@ static void fw_domain_init(struct drm_i915_private *dev_priv, d->i915 = dev_priv; d->id = domain_id; - setup_timer(&d->timer, intel_uncore_fw_release_timer, (unsigned long)d); + BUILD_BUG_ON(FORCEWAKE_RENDER != (1 << FW_DOMAIN_ID_RENDER)); + BUILD_BUG_ON(FORCEWAKE_BLITTER != (1 << FW_DOMAIN_ID_BLITTER)); + BUILD_BUG_ON(FORCEWAKE_MEDIA != (1 << FW_DOMAIN_ID_MEDIA)); + + d->mask = 1 << domain_id; + + hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + d->timer.function = intel_uncore_fw_release_timer; dev_priv->uncore.fw_domains |= (1 << domain_id); @@ -1161,7 +1209,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - if (INTEL_INFO(dev_priv->dev)->gen <= 5) + if (INTEL_INFO(dev_priv)->gen <= 5) return; if (IS_GEN9(dev)) { @@ -1433,7 +1481,7 @@ static int i915_reset_complete(struct drm_device *dev) return (gdrst & GRDOM_RESET_STATUS) == 0; } -static int i915_do_reset(struct drm_device *dev) +static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) { /* assert reset for at least 20 usec */ pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); @@ -1450,13 +1498,13 @@ static int g4x_reset_complete(struct drm_device *dev) return (gdrst & GRDOM_RESET_ENABLE) == 0; } -static int g33_do_reset(struct drm_device *dev) +static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) { pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); return wait_for(g4x_reset_complete(dev), 500); } -static int g4x_do_reset(struct drm_device *dev) +static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -1486,7 +1534,7 @@ static int g4x_do_reset(struct drm_device *dev) return 0; } -static int ironlake_do_reset(struct drm_device *dev) +static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; @@ -1510,75 +1558,132 @@ static int ironlake_do_reset(struct drm_device *dev) return 0; } -static int gen6_do_reset(struct drm_device *dev) +/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */ +static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, + u32 hw_domain_mask) { - struct drm_i915_private *dev_priv = dev->dev_private; - int ret; - - /* Reset the chip */ + int ret; /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ - __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL); + __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); - /* Spin waiting for the device to ack the reset request */ - ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); +#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0) + /* Spin waiting for the device to ack the reset requests */ + ret = wait_for(ACKED, 500); +#undef ACKED + + return ret; +} + +/** + * gen6_reset_engines - reset individual engines + * @dev: DRM device + * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset + * + * This function will reset the individual engines that are set in engine_mask. + * If you provide ALL_ENGINES as mask, full global domain reset will be issued. + * + * Note: It is responsibility of the caller to handle the difference between + * asking full domain reset versus reset for all available individual engines. + * + * Returns 0 on success, nonzero on error. + */ +static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_engine_cs *engine; + const u32 hw_engine_mask[I915_NUM_ENGINES] = { + [RCS] = GEN6_GRDOM_RENDER, + [BCS] = GEN6_GRDOM_BLT, + [VCS] = GEN6_GRDOM_MEDIA, + [VCS2] = GEN8_GRDOM_MEDIA2, + [VECS] = GEN6_GRDOM_VECS, + }; + u32 hw_mask; + int ret; + + if (engine_mask == ALL_ENGINES) { + hw_mask = GEN6_GRDOM_FULL; + } else { + hw_mask = 0; + for_each_engine_masked(engine, dev_priv, engine_mask) + hw_mask |= hw_engine_mask[engine->id]; + } + + ret = gen6_hw_domain_reset(dev_priv, hw_mask); intel_uncore_forcewake_reset(dev, true); return ret; } -static int wait_for_register(struct drm_i915_private *dev_priv, - i915_reg_t reg, - const u32 mask, - const u32 value, - const unsigned long timeout_ms) +static int wait_for_register_fw(struct drm_i915_private *dev_priv, + i915_reg_t reg, + const u32 mask, + const u32 value, + const unsigned long timeout_ms) +{ + return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms); +} + +static int gen8_request_engine_reset(struct intel_engine_cs *engine) { - return wait_for((I915_READ(reg) & mask) == value, timeout_ms); + int ret; + struct drm_i915_private *dev_priv = engine->dev->dev_private; + + I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); + + ret = wait_for_register_fw(dev_priv, + RING_RESET_CTL(engine->mmio_base), + RESET_CTL_READY_TO_RESET, + RESET_CTL_READY_TO_RESET, + 700); + if (ret) + DRM_ERROR("%s: reset request timeout\n", engine->name); + + return ret; } -static int gen8_do_reset(struct drm_device *dev) +static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) +{ + struct drm_i915_private *dev_priv = engine->dev->dev_private; + + I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), + _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); +} + +static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_engine_cs *engine; - int i; - - for_each_ring(engine, dev_priv, i) { - I915_WRITE(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); - if (wait_for_register(dev_priv, - RING_RESET_CTL(engine->mmio_base), - RESET_CTL_READY_TO_RESET, - RESET_CTL_READY_TO_RESET, - 700)) { - DRM_ERROR("%s: reset request timeout\n", engine->name); + for_each_engine_masked(engine, dev_priv, engine_mask) + if (gen8_request_engine_reset(engine)) goto not_ready; - } - } - return gen6_do_reset(dev); + return gen6_reset_engines(dev, engine_mask); not_ready: - for_each_ring(engine, dev_priv, i) - I915_WRITE(RING_RESET_CTL(engine->mmio_base), - _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); + for_each_engine_masked(engine, dev_priv, engine_mask) + gen8_unrequest_engine_reset(engine); return -EIO; } -static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *) +static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, + unsigned engine_mask) { if (!i915.reset) return NULL; if (INTEL_INFO(dev)->gen >= 8) - return gen8_do_reset; + return gen8_reset_engines; else if (INTEL_INFO(dev)->gen >= 6) - return gen6_do_reset; + return gen6_reset_engines; else if (IS_GEN5(dev)) return ironlake_do_reset; else if (IS_G4X(dev)) @@ -1591,10 +1696,10 @@ static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *) return NULL; } -int intel_gpu_reset(struct drm_device *dev) +int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) { struct drm_i915_private *dev_priv = to_i915(dev); - int (*reset)(struct drm_device *); + int (*reset)(struct drm_device *, unsigned); int ret; reset = intel_get_gpu_reset(dev); @@ -1605,7 +1710,7 @@ int intel_gpu_reset(struct drm_device *dev) * request may be dropped and never completes (causing -EIO). */ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); - ret = reset(dev); + ret = reset(dev, engine_mask); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; @@ -1616,6 +1721,25 @@ bool intel_has_gpu_reset(struct drm_device *dev) return intel_get_gpu_reset(dev) != NULL; } +int intel_guc_reset(struct drm_i915_private *dev_priv) +{ + int ret; + unsigned long irqflags; + + if (!i915.enable_guc_submission) + return -EINVAL; + + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + ret = gen6_hw_domain_reset(dev_priv, GEN9_GRDOM_GUC); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + + return ret; +} + bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv) { return check_for_unclaimed_mmio(dev_priv); @@ -1639,3 +1763,111 @@ intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv) return false; } + +static enum forcewake_domains +intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, + i915_reg_t reg) +{ + enum forcewake_domains fw_domains; + + if (intel_vgpu_active(dev_priv->dev)) + return 0; + + switch (INTEL_INFO(dev_priv)->gen) { + case 9: + fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); + break; + case 8: + if (IS_CHERRYVIEW(dev_priv)) + fw_domains = __chv_reg_read_fw_domains(i915_mmio_reg_offset(reg)); + else + fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg)); + break; + case 7: + case 6: + if (IS_VALLEYVIEW(dev_priv)) + fw_domains = __vlv_reg_read_fw_domains(i915_mmio_reg_offset(reg)); + else + fw_domains = __gen6_reg_read_fw_domains(i915_mmio_reg_offset(reg)); + break; + default: + MISSING_CASE(INTEL_INFO(dev_priv)->gen); + case 5: /* forcewake was introduced with gen6 */ + case 4: + case 3: + case 2: + return 0; + } + + WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); + + return fw_domains; +} + +static enum forcewake_domains +intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, + i915_reg_t reg) +{ + enum forcewake_domains fw_domains; + + if (intel_vgpu_active(dev_priv->dev)) + return 0; + + switch (INTEL_INFO(dev_priv)->gen) { + case 9: + fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); + break; + case 8: + if (IS_CHERRYVIEW(dev_priv)) + fw_domains = __chv_reg_write_fw_domains(i915_mmio_reg_offset(reg)); + else + fw_domains = __gen8_reg_write_fw_domains(i915_mmio_reg_offset(reg)); + break; + case 7: + case 6: + fw_domains = FORCEWAKE_RENDER; + break; + default: + MISSING_CASE(INTEL_INFO(dev_priv)->gen); + case 5: + case 4: + case 3: + case 2: + return 0; + } + + WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains); + + return fw_domains; +} + +/** + * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access + * a register + * @dev_priv: pointer to struct drm_i915_private + * @reg: register in question + * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE + * + * Returns a set of forcewake domains required to be taken with for example + * intel_uncore_forcewake_get for the specified register to be accessible in the + * specified mode (read, write or read/write) with raw mmio accessors. + * + * NOTE: On Gen6 and Gen7 write forcewake domain (FORCEWAKE_RENDER) requires the + * callers to do FIFO management on their own or risk losing writes. + */ +enum forcewake_domains +intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, + i915_reg_t reg, unsigned int op) +{ + enum forcewake_domains fw_domains = 0; + + WARN_ON(!op); + + if (op & FW_REG_READ) + fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); + + if (op & FW_REG_WRITE) + fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); + + return fw_domains; +} diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h new file mode 100644 index 000000000000..9ff1e960d617 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h @@ -0,0 +1,832 @@ +/* + * Copyright © 2006-2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +/* + * This information is private to VBT parsing in intel_bios.c. + * + * Please do NOT include anywhere else. + */ +#ifndef _INTEL_BIOS_PRIVATE +#error "intel_vbt_defs.h is private to intel_bios.c" +#endif + +#ifndef _INTEL_VBT_DEFS_H_ +#define _INTEL_VBT_DEFS_H_ + +#include "intel_bios.h" + +/** + * struct vbt_header - VBT Header structure + * @signature: VBT signature, always starts with "$VBT" + * @version: Version of this structure + * @header_size: Size of this structure + * @vbt_size: Size of VBT (VBT Header, BDB Header and data blocks) + * @vbt_checksum: Checksum + * @reserved0: Reserved + * @bdb_offset: Offset of &struct bdb_header from beginning of VBT + * @aim_offset: Offsets of add-in data blocks from beginning of VBT + */ +struct vbt_header { + u8 signature[20]; + u16 version; + u16 header_size; + u16 vbt_size; + u8 vbt_checksum; + u8 reserved0; + u32 bdb_offset; + u32 aim_offset[4]; +} __packed; + +/** + * struct bdb_header - BDB Header structure + * @signature: BDB signature "BIOS_DATA_BLOCK" + * @version: Version of the data block definitions + * @header_size: Size of this structure + * @bdb_size: Size of BDB (BDB Header and data blocks) + */ +struct bdb_header { + u8 signature[16]; + u16 version; + u16 header_size; + u16 bdb_size; +} __packed; + +/* strictly speaking, this is a "skip" block, but it has interesting info */ +struct vbios_data { + u8 type; /* 0 == desktop, 1 == mobile */ + u8 relstage; + u8 chipset; + u8 lvds_present:1; + u8 tv_present:1; + u8 rsvd2:6; /* finish byte */ + u8 rsvd3[4]; + u8 signon[155]; + u8 copyright[61]; + u16 code_segment; + u8 dos_boot_mode; + u8 bandwidth_percent; + u8 rsvd4; /* popup memory size */ + u8 resize_pci_bios; + u8 rsvd5; /* is crt already on ddc2 */ +} __packed; + +/* + * There are several types of BIOS data blocks (BDBs), each block has + * an ID and size in the first 3 bytes (ID in first, size in next 2). + * Known types are listed below. + */ +#define BDB_GENERAL_FEATURES 1 +#define BDB_GENERAL_DEFINITIONS 2 +#define BDB_OLD_TOGGLE_LIST 3 +#define BDB_MODE_SUPPORT_LIST 4 +#define BDB_GENERIC_MODE_TABLE 5 +#define BDB_EXT_MMIO_REGS 6 +#define BDB_SWF_IO 7 +#define BDB_SWF_MMIO 8 +#define BDB_PSR 9 +#define BDB_MODE_REMOVAL_TABLE 10 +#define BDB_CHILD_DEVICE_TABLE 11 +#define BDB_DRIVER_FEATURES 12 +#define BDB_DRIVER_PERSISTENCE 13 +#define BDB_EXT_TABLE_PTRS 14 +#define BDB_DOT_CLOCK_OVERRIDE 15 +#define BDB_DISPLAY_SELECT 16 +/* 17 rsvd */ +#define BDB_DRIVER_ROTATION 18 +#define BDB_DISPLAY_REMOVE 19 +#define BDB_OEM_CUSTOM 20 +#define BDB_EFP_LIST 21 /* workarounds for VGA hsync/vsync */ +#define BDB_SDVO_LVDS_OPTIONS 22 +#define BDB_SDVO_PANEL_DTDS 23 +#define BDB_SDVO_LVDS_PNP_IDS 24 +#define BDB_SDVO_LVDS_POWER_SEQ 25 +#define BDB_TV_OPTIONS 26 +#define BDB_EDP 27 +#define BDB_LVDS_OPTIONS 40 +#define BDB_LVDS_LFP_DATA_PTRS 41 +#define BDB_LVDS_LFP_DATA 42 +#define BDB_LVDS_BACKLIGHT 43 +#define BDB_LVDS_POWER 44 +#define BDB_MIPI_CONFIG 52 +#define BDB_MIPI_SEQUENCE 53 +#define BDB_SKIP 254 /* VBIOS private block, ignore */ + +struct bdb_general_features { + /* bits 1 */ + u8 panel_fitting:2; + u8 flexaim:1; + u8 msg_enable:1; + u8 clear_screen:3; + u8 color_flip:1; + + /* bits 2 */ + u8 download_ext_vbt:1; + u8 enable_ssc:1; + u8 ssc_freq:1; + u8 enable_lfp_on_override:1; + u8 disable_ssc_ddt:1; + u8 rsvd7:1; + u8 display_clock_mode:1; + u8 rsvd8:1; /* finish byte */ + + /* bits 3 */ + u8 disable_smooth_vision:1; + u8 single_dvi:1; + u8 rsvd9:1; + u8 fdi_rx_polarity_inverted:1; + u8 rsvd10:4; /* finish byte */ + + /* bits 4 */ + u8 legacy_monitor_detect; + + /* bits 5 */ + u8 int_crt_support:1; + u8 int_tv_support:1; + u8 int_efp_support:1; + u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ + u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ + u8 rsvd11:3; /* finish byte */ +} __packed; + +/* pre-915 */ +#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */ +#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */ +#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */ +#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */ + +/* Pre 915 */ +#define DEVICE_TYPE_NONE 0x00 +#define DEVICE_TYPE_CRT 0x01 +#define DEVICE_TYPE_TV 0x09 +#define DEVICE_TYPE_EFP 0x12 +#define DEVICE_TYPE_LFP 0x22 +/* On 915+ */ +#define DEVICE_TYPE_CRT_DPMS 0x6001 +#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001 +#define DEVICE_TYPE_TV_COMPOSITE 0x0209 +#define DEVICE_TYPE_TV_MACROVISION 0x0289 +#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c +#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609 +#define DEVICE_TYPE_TV_SCART 0x0209 +#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009 +#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012 +#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052 +#define DEVICE_TYPE_EFP_DVI_I 0x6053 +#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152 +#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2 +#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062 +#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162 +#define DEVICE_TYPE_LFP_PANELLINK 0x5012 +#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042 +#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062 +#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162 +#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2 + +#define DEVICE_CFG_NONE 0x00 +#define DEVICE_CFG_12BIT_DVOB 0x01 +#define DEVICE_CFG_12BIT_DVOC 0x02 +#define DEVICE_CFG_24BIT_DVOBC 0x09 +#define DEVICE_CFG_24BIT_DVOCB 0x0a +#define DEVICE_CFG_DUAL_DVOB 0x11 +#define DEVICE_CFG_DUAL_DVOC 0x12 +#define DEVICE_CFG_DUAL_DVOBC 0x13 +#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19 +#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a + +#define DEVICE_WIRE_NONE 0x00 +#define DEVICE_WIRE_DVOB 0x01 +#define DEVICE_WIRE_DVOC 0x02 +#define DEVICE_WIRE_DVOBC 0x03 +#define DEVICE_WIRE_DVOBB 0x05 +#define DEVICE_WIRE_DVOCC 0x06 +#define DEVICE_WIRE_DVOB_MASTER 0x0d +#define DEVICE_WIRE_DVOC_MASTER 0x0e + +#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */ +#define DEVICE_PORT_DVOB 0x01 +#define DEVICE_PORT_DVOC 0x02 + +/* + * We used to keep this struct but without any version control. We should avoid + * using it in the future, but it should be safe to keep using it in the old + * code. Do not change; we rely on its size. + */ +struct old_child_dev_config { + u16 handle; + u16 device_type; + u8 device_id[10]; /* ascii string */ + u16 addin_offset; + u8 dvo_port; /* See Device_PORT_* above */ + u8 i2c_pin; + u8 slave_addr; + u8 ddc_pin; + u16 edid_ptr; + u8 dvo_cfg; /* See DEVICE_CFG_* above */ + u8 dvo2_port; + u8 i2c2_pin; + u8 slave2_addr; + u8 ddc2_pin; + u8 capabilities; + u8 dvo_wiring;/* See DEVICE_WIRE_* above */ + u8 dvo2_wiring; + u16 extended_type; + u8 dvo_function; +} __packed; + +/* This one contains field offsets that are known to be common for all BDB + * versions. Notice that the meaning of the contents contents may still change, + * but at least the offsets are consistent. */ + +struct common_child_dev_config { + u16 handle; + u16 device_type; + u8 not_common1[12]; + u8 dvo_port; + u8 not_common2[2]; + u8 ddc_pin; + u16 edid_ptr; + u8 dvo_cfg; /* See DEVICE_CFG_* above */ + u8 efp_routed:1; + u8 lane_reversal:1; + u8 lspcon:1; + u8 iboost:1; + u8 hpd_invert:1; + u8 flag_reserved:3; + u8 hdmi_support:1; + u8 dp_support:1; + u8 tmds_support:1; + u8 support_reserved:5; + u8 not_common3[12]; + u8 iboost_level; +} __packed; + + +/* This field changes depending on the BDB version, so the most reliable way to + * read it is by checking the BDB version and reading the raw pointer. */ +union child_device_config { + /* This one is safe to be used anywhere, but the code should still check + * the BDB version. */ + u8 raw[33]; + /* This one should only be kept for legacy code. */ + struct old_child_dev_config old; + /* This one should also be safe to use anywhere, even without version + * checks. */ + struct common_child_dev_config common; +} __packed; + +struct bdb_general_definitions { + /* DDC GPIO */ + u8 crt_ddc_gmbus_pin; + + /* DPMS bits */ + u8 dpms_acpi:1; + u8 skip_boot_crt_detect:1; + u8 dpms_aim:1; + u8 rsvd1:5; /* finish byte */ + + /* boot device bits */ + u8 boot_display[2]; + u8 child_dev_size; + + /* + * Device info: + * If TV is present, it'll be at devices[0]. + * LVDS will be next, either devices[0] or [1], if present. + * On some platforms the number of device is 6. But could be as few as + * 4 if both TV and LVDS are missing. + * And the device num is related with the size of general definition + * block. It is obtained by using the following formula: + * number = (block_size - sizeof(bdb_general_definitions))/ + * defs->child_dev_size; + */ + uint8_t devices[0]; +} __packed; + +/* Mask for DRRS / Panel Channel / SSC / BLT control bits extraction */ +#define MODE_MASK 0x3 + +struct bdb_lvds_options { + u8 panel_type; + u8 rsvd1; + /* LVDS capabilities, stored in a dword */ + u8 pfit_mode:2; + u8 pfit_text_mode_enhanced:1; + u8 pfit_gfx_mode_enhanced:1; + u8 pfit_ratio_auto:1; + u8 pixel_dither:1; + u8 lvds_edid:1; + u8 rsvd2:1; + u8 rsvd4; + /* LVDS Panel channel bits stored here */ + u32 lvds_panel_channel_bits; + /* LVDS SSC (Spread Spectrum Clock) bits stored here. */ + u16 ssc_bits; + u16 ssc_freq; + u16 ssc_ddt; + /* Panel color depth defined here */ + u16 panel_color_depth; + /* LVDS panel type bits stored here */ + u32 dps_panel_type_bits; + /* LVDS backlight control type bits stored here */ + u32 blt_control_type_bits; +} __packed; + +/* LFP pointer table contains entries to the struct below */ +struct bdb_lvds_lfp_data_ptr { + u16 fp_timing_offset; /* offsets are from start of bdb */ + u8 fp_table_size; + u16 dvo_timing_offset; + u8 dvo_table_size; + u16 panel_pnp_id_offset; + u8 pnp_table_size; +} __packed; + +struct bdb_lvds_lfp_data_ptrs { + u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */ + struct bdb_lvds_lfp_data_ptr ptr[16]; +} __packed; + +/* LFP data has 3 blocks per entry */ +struct lvds_fp_timing { + u16 x_res; + u16 y_res; + u32 lvds_reg; + u32 lvds_reg_val; + u32 pp_on_reg; + u32 pp_on_reg_val; + u32 pp_off_reg; + u32 pp_off_reg_val; + u32 pp_cycle_reg; + u32 pp_cycle_reg_val; + u32 pfit_reg; + u32 pfit_reg_val; + u16 terminator; +} __packed; + +struct lvds_dvo_timing { + u16 clock; /**< In 10khz */ + u8 hactive_lo; + u8 hblank_lo; + u8 hblank_hi:4; + u8 hactive_hi:4; + u8 vactive_lo; + u8 vblank_lo; + u8 vblank_hi:4; + u8 vactive_hi:4; + u8 hsync_off_lo; + u8 hsync_pulse_width; + u8 vsync_pulse_width:4; + u8 vsync_off:4; + u8 rsvd0:6; + u8 hsync_off_hi:2; + u8 h_image; + u8 v_image; + u8 max_hv; + u8 h_border; + u8 v_border; + u8 rsvd1:3; + u8 digital:2; + u8 vsync_positive:1; + u8 hsync_positive:1; + u8 rsvd2:1; +} __packed; + +struct lvds_pnp_id { + u16 mfg_name; + u16 product_code; + u32 serial; + u8 mfg_week; + u8 mfg_year; +} __packed; + +struct bdb_lvds_lfp_data_entry { + struct lvds_fp_timing fp_timing; + struct lvds_dvo_timing dvo_timing; + struct lvds_pnp_id pnp_id; +} __packed; + +struct bdb_lvds_lfp_data { + struct bdb_lvds_lfp_data_entry data[16]; +} __packed; + +#define BDB_BACKLIGHT_TYPE_NONE 0 +#define BDB_BACKLIGHT_TYPE_PWM 2 + +struct bdb_lfp_backlight_data_entry { + u8 type:2; + u8 active_low_pwm:1; + u8 obsolete1:5; + u16 pwm_freq_hz; + u8 min_brightness; + u8 obsolete2; + u8 obsolete3; +} __packed; + +struct bdb_lfp_backlight_data { + u8 entry_size; + struct bdb_lfp_backlight_data_entry data[16]; + u8 level[16]; +} __packed; + +struct aimdb_header { + char signature[16]; + char oem_device[20]; + u16 aimdb_version; + u16 aimdb_header_size; + u16 aimdb_size; +} __packed; + +struct aimdb_block { + u8 aimdb_id; + u16 aimdb_size; +} __packed; + +struct vch_panel_data { + u16 fp_timing_offset; + u8 fp_timing_size; + u16 dvo_timing_offset; + u8 dvo_timing_size; + u16 text_fitting_offset; + u8 text_fitting_size; + u16 graphics_fitting_offset; + u8 graphics_fitting_size; +} __packed; + +struct vch_bdb_22 { + struct aimdb_block aimdb_block; + struct vch_panel_data panels[16]; +} __packed; + +struct bdb_sdvo_lvds_options { + u8 panel_backlight; + u8 h40_set_panel_type; + u8 panel_type; + u8 ssc_clk_freq; + u16 als_low_trip; + u16 als_high_trip; + u8 sclalarcoeff_tab_row_num; + u8 sclalarcoeff_tab_row_size; + u8 coefficient[8]; + u8 panel_misc_bits_1; + u8 panel_misc_bits_2; + u8 panel_misc_bits_3; + u8 panel_misc_bits_4; +} __packed; + + +#define BDB_DRIVER_FEATURE_NO_LVDS 0 +#define BDB_DRIVER_FEATURE_INT_LVDS 1 +#define BDB_DRIVER_FEATURE_SDVO_LVDS 2 +#define BDB_DRIVER_FEATURE_EDP 3 + +struct bdb_driver_features { + u8 boot_dev_algorithm:1; + u8 block_display_switch:1; + u8 allow_display_switch:1; + u8 hotplug_dvo:1; + u8 dual_view_zoom:1; + u8 int15h_hook:1; + u8 sprite_in_clone:1; + u8 primary_lfp_id:1; + + u16 boot_mode_x; + u16 boot_mode_y; + u8 boot_mode_bpp; + u8 boot_mode_refresh; + + u16 enable_lfp_primary:1; + u16 selective_mode_pruning:1; + u16 dual_frequency:1; + u16 render_clock_freq:1; /* 0: high freq; 1: low freq */ + u16 nt_clone_support:1; + u16 power_scheme_ui:1; /* 0: CUI; 1: 3rd party */ + u16 sprite_display_assign:1; /* 0: secondary; 1: primary */ + u16 cui_aspect_scaling:1; + u16 preserve_aspect_ratio:1; + u16 sdvo_device_power_down:1; + u16 crt_hotplug:1; + u16 lvds_config:2; + u16 tv_hotplug:1; + u16 hdmi_config:2; + + u8 static_display:1; + u8 reserved2:7; + u16 legacy_crt_max_x; + u16 legacy_crt_max_y; + u8 legacy_crt_max_refresh; + + u8 hdmi_termination; + u8 custom_vbt_version; + /* Driver features data block */ + u16 rmpm_enabled:1; + u16 s2ddt_enabled:1; + u16 dpst_enabled:1; + u16 bltclt_enabled:1; + u16 adb_enabled:1; + u16 drrs_enabled:1; + u16 grs_enabled:1; + u16 gpmt_enabled:1; + u16 tbt_enabled:1; + u16 psr_enabled:1; + u16 ips_enabled:1; + u16 reserved3:4; + u16 pc_feature_valid:1; +} __packed; + +#define EDP_18BPP 0 +#define EDP_24BPP 1 +#define EDP_30BPP 2 +#define EDP_RATE_1_62 0 +#define EDP_RATE_2_7 1 +#define EDP_LANE_1 0 +#define EDP_LANE_2 1 +#define EDP_LANE_4 3 +#define EDP_PREEMPHASIS_NONE 0 +#define EDP_PREEMPHASIS_3_5dB 1 +#define EDP_PREEMPHASIS_6dB 2 +#define EDP_PREEMPHASIS_9_5dB 3 +#define EDP_VSWING_0_4V 0 +#define EDP_VSWING_0_6V 1 +#define EDP_VSWING_0_8V 2 +#define EDP_VSWING_1_2V 3 + + +struct edp_link_params { + u8 rate:4; + u8 lanes:4; + u8 preemphasis:4; + u8 vswing:4; +} __packed; + +struct bdb_edp { + struct edp_power_seq power_seqs[16]; + u32 color_depth; + struct edp_link_params link_params[16]; + u32 sdrrs_msa_timing_delay; + + /* ith bit indicates enabled/disabled for (i+1)th panel */ + u16 edp_s3d_feature; + u16 edp_t3_optimization; + u64 edp_vswing_preemph; /* v173 */ +} __packed; + +struct psr_table { + /* Feature bits */ + u8 full_link:1; + u8 require_aux_to_wakeup:1; + u8 feature_bits_rsvd:6; + + /* Wait times */ + u8 idle_frames:4; + u8 lines_to_wait:3; + u8 wait_times_rsvd:1; + + /* TP wake up time in multiple of 100 */ + u16 tp1_wakeup_time; + u16 tp2_tp3_wakeup_time; +} __packed; + +struct bdb_psr { + struct psr_table psr_table[16]; +} __packed; + +/* + * Driver<->VBIOS interaction occurs through scratch bits in + * GR18 & SWF*. + */ + +/* GR18 bits are set on display switch and hotkey events */ +#define GR18_DRIVER_SWITCH_EN (1<<7) /* 0: VBIOS control, 1: driver control */ +#define GR18_HOTKEY_MASK 0x78 /* See also SWF4 15:0 */ +#define GR18_HK_NONE (0x0<<3) +#define GR18_HK_LFP_STRETCH (0x1<<3) +#define GR18_HK_TOGGLE_DISP (0x2<<3) +#define GR18_HK_DISP_SWITCH (0x4<<3) /* see SWF14 15:0 for what to enable */ +#define GR18_HK_POPUP_DISABLED (0x6<<3) +#define GR18_HK_POPUP_ENABLED (0x7<<3) +#define GR18_HK_PFIT (0x8<<3) +#define GR18_HK_APM_CHANGE (0xa<<3) +#define GR18_HK_MULTIPLE (0xc<<3) +#define GR18_USER_INT_EN (1<<2) +#define GR18_A0000_FLUSH_EN (1<<1) +#define GR18_SMM_EN (1<<0) + +/* Set by driver, cleared by VBIOS */ +#define SWF00_YRES_SHIFT 16 +#define SWF00_XRES_SHIFT 0 +#define SWF00_RES_MASK 0xffff + +/* Set by VBIOS at boot time and driver at runtime */ +#define SWF01_TV2_FORMAT_SHIFT 8 +#define SWF01_TV1_FORMAT_SHIFT 0 +#define SWF01_TV_FORMAT_MASK 0xffff + +#define SWF10_VBIOS_BLC_I2C_EN (1<<29) +#define SWF10_GTT_OVERRIDE_EN (1<<28) +#define SWF10_LFP_DPMS_OVR (1<<27) /* override DPMS on display switch */ +#define SWF10_ACTIVE_TOGGLE_LIST_MASK (7<<24) +#define SWF10_OLD_TOGGLE 0x0 +#define SWF10_TOGGLE_LIST_1 0x1 +#define SWF10_TOGGLE_LIST_2 0x2 +#define SWF10_TOGGLE_LIST_3 0x3 +#define SWF10_TOGGLE_LIST_4 0x4 +#define SWF10_PANNING_EN (1<<23) +#define SWF10_DRIVER_LOADED (1<<22) +#define SWF10_EXTENDED_DESKTOP (1<<21) +#define SWF10_EXCLUSIVE_MODE (1<<20) +#define SWF10_OVERLAY_EN (1<<19) +#define SWF10_PLANEB_HOLDOFF (1<<18) +#define SWF10_PLANEA_HOLDOFF (1<<17) +#define SWF10_VGA_HOLDOFF (1<<16) +#define SWF10_ACTIVE_DISP_MASK 0xffff +#define SWF10_PIPEB_LFP2 (1<<15) +#define SWF10_PIPEB_EFP2 (1<<14) +#define SWF10_PIPEB_TV2 (1<<13) +#define SWF10_PIPEB_CRT2 (1<<12) +#define SWF10_PIPEB_LFP (1<<11) +#define SWF10_PIPEB_EFP (1<<10) +#define SWF10_PIPEB_TV (1<<9) +#define SWF10_PIPEB_CRT (1<<8) +#define SWF10_PIPEA_LFP2 (1<<7) +#define SWF10_PIPEA_EFP2 (1<<6) +#define SWF10_PIPEA_TV2 (1<<5) +#define SWF10_PIPEA_CRT2 (1<<4) +#define SWF10_PIPEA_LFP (1<<3) +#define SWF10_PIPEA_EFP (1<<2) +#define SWF10_PIPEA_TV (1<<1) +#define SWF10_PIPEA_CRT (1<<0) + +#define SWF11_MEMORY_SIZE_SHIFT 16 +#define SWF11_SV_TEST_EN (1<<15) +#define SWF11_IS_AGP (1<<14) +#define SWF11_DISPLAY_HOLDOFF (1<<13) +#define SWF11_DPMS_REDUCED (1<<12) +#define SWF11_IS_VBE_MODE (1<<11) +#define SWF11_PIPEB_ACCESS (1<<10) /* 0 here means pipe a */ +#define SWF11_DPMS_MASK 0x07 +#define SWF11_DPMS_OFF (1<<2) +#define SWF11_DPMS_SUSPEND (1<<1) +#define SWF11_DPMS_STANDBY (1<<0) +#define SWF11_DPMS_ON 0 + +#define SWF14_GFX_PFIT_EN (1<<31) +#define SWF14_TEXT_PFIT_EN (1<<30) +#define SWF14_LID_STATUS_CLOSED (1<<29) /* 0 here means open */ +#define SWF14_POPUP_EN (1<<28) +#define SWF14_DISPLAY_HOLDOFF (1<<27) +#define SWF14_DISP_DETECT_EN (1<<26) +#define SWF14_DOCKING_STATUS_DOCKED (1<<25) /* 0 here means undocked */ +#define SWF14_DRIVER_STATUS (1<<24) +#define SWF14_OS_TYPE_WIN9X (1<<23) +#define SWF14_OS_TYPE_WINNT (1<<22) +/* 21:19 rsvd */ +#define SWF14_PM_TYPE_MASK 0x00070000 +#define SWF14_PM_ACPI_VIDEO (0x4 << 16) +#define SWF14_PM_ACPI (0x3 << 16) +#define SWF14_PM_APM_12 (0x2 << 16) +#define SWF14_PM_APM_11 (0x1 << 16) +#define SWF14_HK_REQUEST_MASK 0x0000ffff /* see GR18 6:3 for event type */ + /* if GR18 indicates a display switch */ +#define SWF14_DS_PIPEB_LFP2_EN (1<<15) +#define SWF14_DS_PIPEB_EFP2_EN (1<<14) +#define SWF14_DS_PIPEB_TV2_EN (1<<13) +#define SWF14_DS_PIPEB_CRT2_EN (1<<12) +#define SWF14_DS_PIPEB_LFP_EN (1<<11) +#define SWF14_DS_PIPEB_EFP_EN (1<<10) +#define SWF14_DS_PIPEB_TV_EN (1<<9) +#define SWF14_DS_PIPEB_CRT_EN (1<<8) +#define SWF14_DS_PIPEA_LFP2_EN (1<<7) +#define SWF14_DS_PIPEA_EFP2_EN (1<<6) +#define SWF14_DS_PIPEA_TV2_EN (1<<5) +#define SWF14_DS_PIPEA_CRT2_EN (1<<4) +#define SWF14_DS_PIPEA_LFP_EN (1<<3) +#define SWF14_DS_PIPEA_EFP_EN (1<<2) +#define SWF14_DS_PIPEA_TV_EN (1<<1) +#define SWF14_DS_PIPEA_CRT_EN (1<<0) + /* if GR18 indicates a panel fitting request */ +#define SWF14_PFIT_EN (1<<0) /* 0 means disable */ + /* if GR18 indicates an APM change request */ +#define SWF14_APM_HIBERNATE 0x4 +#define SWF14_APM_SUSPEND 0x3 +#define SWF14_APM_STANDBY 0x1 +#define SWF14_APM_RESTORE 0x0 + +/* Add the device class for LFP, TV, HDMI */ +#define DEVICE_TYPE_INT_LFP 0x1022 +#define DEVICE_TYPE_INT_TV 0x1009 +#define DEVICE_TYPE_HDMI 0x60D2 +#define DEVICE_TYPE_DP 0x68C6 +#define DEVICE_TYPE_eDP 0x78C6 + +#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) +#define DEVICE_TYPE_POWER_MANAGEMENT (1 << 14) +#define DEVICE_TYPE_HOTPLUG_SIGNALING (1 << 13) +#define DEVICE_TYPE_INTERNAL_CONNECTOR (1 << 12) +#define DEVICE_TYPE_NOT_HDMI_OUTPUT (1 << 11) +#define DEVICE_TYPE_MIPI_OUTPUT (1 << 10) +#define DEVICE_TYPE_COMPOSITE_OUTPUT (1 << 9) +#define DEVICE_TYPE_DUAL_CHANNEL (1 << 8) +#define DEVICE_TYPE_HIGH_SPEED_LINK (1 << 6) +#define DEVICE_TYPE_LVDS_SINGALING (1 << 5) +#define DEVICE_TYPE_TMDS_DVI_SIGNALING (1 << 4) +#define DEVICE_TYPE_VIDEO_SIGNALING (1 << 3) +#define DEVICE_TYPE_DISPLAYPORT_OUTPUT (1 << 2) +#define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) +#define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) + +/* + * Bits we care about when checking for DEVICE_TYPE_eDP + * Depending on the system, the other bits may or may not + * be set for eDP outputs. + */ +#define DEVICE_TYPE_eDP_BITS \ + (DEVICE_TYPE_INTERNAL_CONNECTOR | \ + DEVICE_TYPE_MIPI_OUTPUT | \ + DEVICE_TYPE_COMPOSITE_OUTPUT | \ + DEVICE_TYPE_DUAL_CHANNEL | \ + DEVICE_TYPE_LVDS_SINGALING | \ + DEVICE_TYPE_TMDS_DVI_SIGNALING | \ + DEVICE_TYPE_VIDEO_SIGNALING | \ + DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ + DEVICE_TYPE_ANALOG_OUTPUT) + +/* define the DVO port for HDMI output type */ +#define DVO_B 1 +#define DVO_C 2 +#define DVO_D 3 + +/* Possible values for the "DVO Port" field for versions >= 155: */ +#define DVO_PORT_HDMIA 0 +#define DVO_PORT_HDMIB 1 +#define DVO_PORT_HDMIC 2 +#define DVO_PORT_HDMID 3 +#define DVO_PORT_LVDS 4 +#define DVO_PORT_TV 5 +#define DVO_PORT_CRT 6 +#define DVO_PORT_DPB 7 +#define DVO_PORT_DPC 8 +#define DVO_PORT_DPD 9 +#define DVO_PORT_DPA 10 +#define DVO_PORT_DPE 11 +#define DVO_PORT_HDMIE 12 +#define DVO_PORT_MIPIA 21 +#define DVO_PORT_MIPIB 22 +#define DVO_PORT_MIPIC 23 +#define DVO_PORT_MIPID 24 + +/* Block 52 contains MIPI configuration block + * 6 * bdb_mipi_config, followed by 6 pps data block + * block below + */ +#define MAX_MIPI_CONFIGURATIONS 6 + +struct bdb_mipi_config { + struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; + struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; +} __packed; + +/* Block 53 contains MIPI sequences as needed by the panel + * for enabling it. This block can be variable in size and + * can be maximum of 6 blocks + */ +struct bdb_mipi_sequence { + u8 version; + u8 data[0]; +} __packed; + +enum mipi_gpio_pin_index { + MIPI_GPIO_UNDEFINED = 0, + MIPI_GPIO_PANEL_ENABLE, + MIPI_GPIO_BL_ENABLE, + MIPI_GPIO_PWM_ENABLE, + MIPI_GPIO_RESET_N, + MIPI_GPIO_PWR_DOWN_R, + MIPI_GPIO_STDBY_RST_N, + MIPI_GPIO_MAX +}; + +#endif /* _INTEL_VBT_DEFS_H_ */ diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 2a95d10e9d92..a24631fdf4ad 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -225,8 +225,6 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, if (!iores) return -ENXIO; - platform_set_drvdata(pdev, hdmi); - encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); /* * If we failed to find the CRTC(s) which this encoder is @@ -245,7 +243,16 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, drm_encoder_init(drm, encoder, &dw_hdmi_imx_encoder_funcs, DRM_MODE_ENCODER_TMDS, NULL); - return dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); + ret = dw_hdmi_bind(dev, master, data, encoder, iores, irq, plat_data); + + /* + * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(), + * which would have called the encoder cleanup. Do it manually. + */ + if (ret) + drm_encoder_cleanup(encoder); + + return ret; } static void dw_hdmi_imx_unbind(struct device *dev, struct device *master, diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 9876e0f0c3e1..e26dcdec2aba 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -326,7 +326,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, { struct imx_drm_device *imxdrm = drm->dev_private; struct imx_drm_crtc *imx_drm_crtc; - int ret; /* * The vblank arrays are dimensioned by MAX_CRTC - we can't @@ -351,10 +350,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, *new_crtc = imx_drm_crtc; - ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256); - if (ret) - goto err_register; - drm_crtc_helper_add(crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs); @@ -362,11 +357,6 @@ int imx_drm_add_crtc(struct drm_device *drm, struct drm_crtc *crtc, imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs, NULL); return 0; - -err_register: - imxdrm->crtc[--imxdrm->pipes] = NULL; - kfree(imx_drm_crtc); - return ret; } EXPORT_SYMBOL_GPL(imx_drm_add_crtc); diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 588827844f30..681ec6eb77d9 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -72,22 +72,101 @@ static inline int calc_bandwidth(int width, int height, unsigned int vref) int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, int x, int y) { - struct drm_gem_cma_object *cma_obj; - unsigned long eba; - int active; - - cma_obj = drm_fb_cma_get_gem_obj(fb, 0); - if (!cma_obj) { - DRM_DEBUG_KMS("entry is null.\n"); - return -EFAULT; + struct drm_gem_cma_object *cma_obj[3]; + unsigned long eba, ubo, vbo; + int active, i; + + for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { + cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i); + if (!cma_obj[i]) { + DRM_DEBUG_KMS("plane %d entry is null.\n", i); + return -EFAULT; + } } - dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", - &cma_obj->paddr, x, y); - - eba = cma_obj->paddr + fb->offsets[0] + + eba = cma_obj[0]->paddr + fb->offsets[0] + fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; + if (eba & 0x7) { + DRM_DEBUG_KMS("base address must be a multiple of 8.\n"); + return -EINVAL; + } + + if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) { + DRM_DEBUG_KMS("pitches out of range.\n"); + return -EINVAL; + } + + if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) { + DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n"); + return -EINVAL; + } + + ipu_plane->stride[0] = fb->pitches[0]; + + switch (fb->pixel_format) { + case DRM_FORMAT_YUV420: + case DRM_FORMAT_YVU420: + /* + * Multiplanar formats have to meet the following restrictions: + * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO + * - EBA, UBO and VBO are a multiple of 8 + * - UBO and VBO are unsigned and not larger than 0xfffff8 + * - Only EBA may be changed while scanout is active + * - The strides of U and V planes must be identical. + */ + ubo = cma_obj[1]->paddr + fb->offsets[1] + + fb->pitches[1] * y / 2 + x / 2 - eba; + vbo = cma_obj[2]->paddr + fb->offsets[2] + + fb->pitches[2] * y / 2 + x / 2 - eba; + + if ((ubo & 0x7) || (vbo & 0x7)) { + DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n"); + return -EINVAL; + } + + if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) { + DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n"); + return -EINVAL; + } + + if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) || + (ipu_plane->v_offset != vbo))) { + DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n"); + return -EINVAL; + } + + if (fb->pitches[1] != fb->pitches[2]) { + DRM_DEBUG_KMS("U/V pitches must be identical.\n"); + return -EINVAL; + } + + if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) { + DRM_DEBUG_KMS("U/V pitches out of range.\n"); + return -EINVAL; + } + + if (ipu_plane->enabled && + (ipu_plane->stride[1] != fb->pitches[1])) { + DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n"); + return -EINVAL; + } + + ipu_plane->u_offset = ubo; + ipu_plane->v_offset = vbo; + ipu_plane->stride[1] = fb->pitches[1]; + + dev_dbg(ipu_plane->base.dev->dev, + "phys = %pad %pad %pad, x = %d, y = %d", + &cma_obj[0]->paddr, &cma_obj[1]->paddr, + &cma_obj[2]->paddr, x, y); + break; + default: + dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", + &cma_obj[0]->paddr, x, y); + break; + } + if (ipu_plane->enabled) { active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); @@ -201,12 +280,6 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, } } - ret = ipu_dmfc_init_channel(ipu_plane->dmfc, crtc_w); - if (ret) { - dev_err(dev, "initializing dmfc channel failed with %d\n", ret); - return ret; - } - ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc, calc_bandwidth(crtc_w, crtc_h, calc_vref(mode)), 64); @@ -215,6 +288,8 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, return ret; } + ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w); + ipu_cpmem_zero(ipu_plane->ipu_ch); ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h); ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format); @@ -233,6 +308,18 @@ int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc, if (interlaced) ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]); + if (fb->pixel_format == DRM_FORMAT_YUV420) { + ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, + ipu_plane->stride[1], + ipu_plane->u_offset, + ipu_plane->v_offset); + } else if (fb->pixel_format == DRM_FORMAT_YVU420) { + ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, + ipu_plane->stride[1], + ipu_plane->v_offset, + ipu_plane->u_offset); + } + ipu_plane->w = src_w; ipu_plane->h = src_h; diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index 3a443b413c60..4448fd4ad4eb 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -29,6 +29,10 @@ struct ipu_plane { int w; int h; + unsigned int u_offset; + unsigned int v_offset; + unsigned int stride[2]; + bool enabled; }; diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index b0af77454d52..ebb470ff7200 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -116,10 +116,8 @@ static struct pci_driver mgag200_pci_driver = { static int __init mgag200_init(void) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && mgag200_modeset == -1) return -EINVAL; -#endif if (mgag200_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h index 16641cec18a2..b5370cb56e3c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h @@ -11,6 +11,7 @@ struct nvkm_device_tegra { struct reset_control *rst; struct clk *clk; + struct clk *clk_ref; struct clk *clk_pwr; struct regulator *vdd; @@ -36,6 +37,10 @@ struct nvkm_device_tegra_func { * bypassed). A value of 0 means an IOMMU is never used. */ u8 iommu_bit; + /* + * Whether the chip requires a reference clock + */ + bool require_ref_clk; }; int nvkm_device_tegra_new(const struct nvkm_device_tegra_func *, diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d06877d9c1ed..db5c7d0cc25c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1083,10 +1083,8 @@ nouveau_drm_init(void) nouveau_display_options(); if (nouveau_modeset == -1) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force()) nouveau_modeset = 0; -#endif } if (!nouveau_modeset) diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index 2dfe58af12e4..4c4cc2260257 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -55,6 +55,11 @@ static const struct nvkm_device_tegra_func gk20a_platform_data = { .iommu_bit = 34, }; +static const struct nvkm_device_tegra_func gm20b_platform_data = { + .iommu_bit = 34, + .require_ref_clk = true, +}; + static const struct of_device_id nouveau_platform_match[] = { { .compatible = "nvidia,gk20a", @@ -62,7 +67,7 @@ static const struct of_device_id nouveau_platform_match[] = { }, { .compatible = "nvidia,gm20b", - .data = &gk20a_platform_data, + .data = &gm20b_platform_data, }, { } }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index 9afa5f3e3c1c..ec12efb4689a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -35,6 +35,11 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) ret = clk_prepare_enable(tdev->clk); if (ret) goto err_clk; + if (tdev->clk_ref) { + ret = clk_prepare_enable(tdev->clk_ref); + if (ret) + goto err_clk_ref; + } ret = clk_prepare_enable(tdev->clk_pwr); if (ret) goto err_clk_pwr; @@ -57,6 +62,9 @@ nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) err_clamp: clk_disable_unprepare(tdev->clk_pwr); err_clk_pwr: + if (tdev->clk_ref) + clk_disable_unprepare(tdev->clk_ref); +err_clk_ref: clk_disable_unprepare(tdev->clk); err_clk: regulator_disable(tdev->vdd); @@ -71,6 +79,8 @@ nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) udelay(10); clk_disable_unprepare(tdev->clk_pwr); + if (tdev->clk_ref) + clk_disable_unprepare(tdev->clk_ref); clk_disable_unprepare(tdev->clk); udelay(10); @@ -274,6 +284,13 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, goto free; } + if (func->require_ref_clk) + tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); + if (IS_ERR(tdev->clk_ref)) { + ret = PTR_ERR(tdev->clk_ref); + goto free; + } + tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); if (IS_ERR(tdev->clk_pwr)) { ret = PTR_ERR(tdev->clk_pwr); diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 43e5f503d1c5..030409a3ee4e 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, qxl_bo_kunmap(user_bo); + qcrtc->cur_x += qcrtc->hot_spot_x - hot_x; + qcrtc->cur_y += qcrtc->hot_spot_y - hot_y; + qcrtc->hot_spot_x = hot_x; + qcrtc->hot_spot_y = hot_y; + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; - cmd->u.set.position.x = qcrtc->cur_x; - cmd->u.set.position.y = qcrtc->cur_y; + cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x; + cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); @@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_MOVE; - cmd->u.position.x = qcrtc->cur_x; - cmd->u.position.y = qcrtc->cur_y; + cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x; + cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 7307b07fe06b..dc9df5fe50ba 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -272,10 +272,8 @@ static struct drm_driver qxl_driver = { static int __init qxl_init(void) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && qxl_modeset == -1) return -EINVAL; -#endif if (qxl_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 6e6b9b1519b8..3f3897eb458c 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -135,6 +135,8 @@ struct qxl_crtc { int index; int cur_x; int cur_y; + int hot_spot_x; + int hot_spot_y; }; struct qxl_output { diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index cf61e0856f4a..b80b08f71cb4 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -275,13 +275,15 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); atombios_blank_crtc(crtc, ATOM_DISABLE); - drm_vblank_on(dev, radeon_crtc->crtc_id); + if (dev->num_crtcs > radeon_crtc->crtc_id) + drm_vblank_on(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, radeon_crtc->crtc_id); + if (dev->num_crtcs > radeon_crtc->crtc_id) + drm_vblank_off(dev, radeon_crtc->crtc_id); if (radeon_crtc->enabled) atombios_blank_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index ccd4ad4ee592..5d44ed0d104a 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -566,12 +566,10 @@ static struct pci_driver radeon_kms_pci_driver = { static int __init radeon_init(void) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && radeon_modeset == -1) { DRM_INFO("VGACON disable radeon kernel modesetting.\n"); radeon_modeset = 0; } -#endif /* set to modesetting by default if not nomodeset */ if (radeon_modeset == -1) radeon_modeset = 1; diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index 979f3bf65f2c..1e9304d1c88f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -291,6 +291,8 @@ int radeon_irq_kms_init(struct radeon_device *rdev) if (r) { return r; } + rdev->ddev->vblank_disable_allowed = true; + /* enable msi */ rdev->msi_enabled = 0; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 24152dfef199..478d4099b0d0 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -331,13 +331,15 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode) RADEON_CRTC_DISP_REQ_EN_B)); WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); } - drm_vblank_on(dev, radeon_crtc->crtc_id); + if (dev->num_crtcs > radeon_crtc->crtc_id) + drm_vblank_on(dev, radeon_crtc->crtc_id); radeon_crtc_load_lut(crtc); break; case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - drm_vblank_off(dev, radeon_crtc->crtc_id); + if (dev->num_crtcs > radeon_crtc->crtc_id) + drm_vblank_off(dev, radeon_crtc->crtc_id); if (radeon_crtc->crtc_id) WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); else { diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c008312e1bcd..7dddfdce85e6 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -615,7 +615,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) set_page_dirty(page); mark_page_accessed(page); - page_cache_release(page); + put_page(page); } sg_free_table(ttm->sg); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index ed6006bf6bd8..644db36d0d20 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -278,10 +278,7 @@ static int rcar_du_remove(struct platform_device *pdev) struct rcar_du_device *rcdu = platform_get_drvdata(pdev); struct drm_device *ddev = rcdu->ddev; - mutex_lock(&ddev->mode_config.mutex); - drm_connector_unplug_all(ddev); - mutex_unlock(&ddev->mode_config.mutex); - + drm_connector_unregister_all(ddev); drm_dev_unregister(ddev); if (rcdu->fbdev) diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 76b3362c5e59..d30bdc38a760 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -16,6 +16,15 @@ config DRM_ROCKCHIP 2D or 3D acceleration; acceleration is performed by other IP found on the SoC. +config ROCKCHIP_ANALOGIX_DP + tristate "Rockchip specific extensions for Analogix DP driver" + depends on DRM_ROCKCHIP + select DRM_ANALOGIX_DP + help + This selects support for Rockchip SoC specific extensions + for the Analogix Core DP driver. If you want to enable DP + on RK3288 based SoC, you should selet this option. + config ROCKCHIP_DW_HDMI tristate "Rockchip specific extensions for Synopsys DW HDMI" depends on DRM_ROCKCHIP diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index df8fbef17791..05d07138a2b2 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -6,6 +6,7 @@ rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o \ rockchip_drm_gem.o rockchip_drm_vop.o rockchipdrm-$(CONFIG_DRM_FBDEV_EMULATION) += rockchip_drm_fbdev.o +obj-$(CONFIG_ROCKCHIP_ANALOGIX_DP) += analogix_dp-rockchip.o obj-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o obj-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o obj-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c new file mode 100644 index 000000000000..a1d94d8d9443 --- /dev/null +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c @@ -0,0 +1,384 @@ +/* + * Rockchip SoC DP (Display Port) interface driver. + * + * Copyright (C) Fuzhou Rockchip Electronics Co., Ltd. + * Author: Andy Yan <andy.yan@rock-chips.com> + * Yakir Yang <ykk@rock-chips.com> + * Jeff Chen <jeff.chen@rock-chips.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include <linux/component.h> +#include <linux/mfd/syscon.h> +#include <linux/of_graph.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/clk.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_of.h> +#include <drm/drm_panel.h> + +#include <video/of_videomode.h> +#include <video/videomode.h> + +#include <drm/bridge/analogix_dp.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_vop.h" + +#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm) + +/* dp grf register offset */ +#define GRF_SOC_CON6 0x025c +#define GRF_EDP_LCD_SEL_MASK BIT(5) +#define GRF_EDP_SEL_VOP_LIT BIT(5) +#define GRF_EDP_SEL_VOP_BIG 0 + +struct rockchip_dp_device { + struct drm_device *drm_dev; + struct device *dev; + struct drm_encoder encoder; + struct drm_display_mode mode; + + struct clk *pclk; + struct regmap *grf; + struct reset_control *rst; + + struct analogix_dp_plat_data plat_data; +}; + +static int rockchip_dp_pre_init(struct rockchip_dp_device *dp) +{ + reset_control_assert(dp->rst); + usleep_range(10, 20); + reset_control_deassert(dp->rst); + + return 0; +} + +static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data) +{ + struct rockchip_dp_device *dp = to_dp(plat_data); + int ret; + + ret = clk_prepare_enable(dp->pclk); + if (ret < 0) { + dev_err(dp->dev, "failed to enable pclk %d\n", ret); + return ret; + } + + ret = rockchip_dp_pre_init(dp); + if (ret < 0) { + dev_err(dp->dev, "failed to dp pre init %d\n", ret); + return ret; + } + + return 0; +} + +static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data) +{ + struct rockchip_dp_device *dp = to_dp(plat_data); + + clk_disable_unprepare(dp->pclk); + + return 0; +} + +static bool +rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + /* do nothing */ + return true; +} + +static void rockchip_dp_drm_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + /* do nothing */ +} + +static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder) +{ + struct rockchip_dp_device *dp = to_dp(encoder); + int ret; + u32 val; + + /* + * FIXME(Yakir): driver should configure the CRTC output video + * mode with the display information which indicated the monitor + * support colorimetry. + * + * But don't know why the CRTC driver seems could only output the + * RGBaaa rightly. For example, if connect the "innolux,n116bge" + * eDP screen, EDID would indicated that screen only accepted the + * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP + * screen would show a blue picture (RGB888 show a green picture). + * But if I configure CTRC to RGBaaa, and eDP driver still keep + * RGB666 input video mode, then screen would works prefect. + */ + ret = rockchip_drm_crtc_mode_config(encoder->crtc, + DRM_MODE_CONNECTOR_eDP, + ROCKCHIP_OUT_MODE_AAAA); + if (ret < 0) { + dev_err(dp->dev, "Could not set crtc mode config (%d)\n", ret); + return; + } + + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); + if (ret < 0) + return; + + if (ret) + val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16); + else + val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16); + + dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); + + ret = regmap_write(dp->grf, GRF_SOC_CON6, val); + if (ret != 0) { + dev_err(dp->dev, "Could not write to GRF: %d\n", ret); + return; + } +} + +static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder) +{ + /* do nothing */ +} + +static struct drm_encoder_helper_funcs rockchip_dp_encoder_helper_funcs = { + .mode_fixup = rockchip_dp_drm_encoder_mode_fixup, + .mode_set = rockchip_dp_drm_encoder_mode_set, + .enable = rockchip_dp_drm_encoder_enable, + .disable = rockchip_dp_drm_encoder_nop, +}; + +static void rockchip_dp_drm_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static struct drm_encoder_funcs rockchip_dp_encoder_funcs = { + .destroy = rockchip_dp_drm_encoder_destroy, +}; + +static int rockchip_dp_init(struct rockchip_dp_device *dp) +{ + struct device *dev = dp->dev; + struct device_node *np = dev->of_node; + int ret; + + dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); + if (IS_ERR(dp->grf)) { + dev_err(dev, "failed to get rockchip,grf property\n"); + return PTR_ERR(dp->grf); + } + + dp->pclk = devm_clk_get(dev, "pclk"); + if (IS_ERR(dp->pclk)) { + dev_err(dev, "failed to get pclk property\n"); + return PTR_ERR(dp->pclk); + } + + dp->rst = devm_reset_control_get(dev, "dp"); + if (IS_ERR(dp->rst)) { + dev_err(dev, "failed to get dp reset control\n"); + return PTR_ERR(dp->rst); + } + + ret = clk_prepare_enable(dp->pclk); + if (ret < 0) { + dev_err(dp->dev, "failed to enable pclk %d\n", ret); + return ret; + } + + ret = rockchip_dp_pre_init(dp); + if (ret < 0) { + dev_err(dp->dev, "failed to pre init %d\n", ret); + return ret; + } + + return 0; +} + +static int rockchip_dp_drm_create_encoder(struct rockchip_dp_device *dp) +{ + struct drm_encoder *encoder = &dp->encoder; + struct drm_device *drm_dev = dp->drm_dev; + struct device *dev = dp->dev; + int ret; + + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, + dev->of_node); + DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs); + + ret = drm_encoder_init(drm_dev, encoder, &rockchip_dp_encoder_funcs, + DRM_MODE_ENCODER_TMDS, NULL); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + return ret; + } + + drm_encoder_helper_add(encoder, &rockchip_dp_encoder_helper_funcs); + + return 0; +} + +static int rockchip_dp_bind(struct device *dev, struct device *master, + void *data) +{ + struct rockchip_dp_device *dp = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + int ret; + + /* + * Just like the probe function said, we don't need the + * device drvrate anymore, we should leave the charge to + * analogix dp driver, set the device drvdata to NULL. + */ + dev_set_drvdata(dev, NULL); + + ret = rockchip_dp_init(dp); + if (ret < 0) + return ret; + + dp->drm_dev = drm_dev; + + ret = rockchip_dp_drm_create_encoder(dp); + if (ret) { + DRM_ERROR("failed to create drm encoder\n"); + return ret; + } + + dp->plat_data.encoder = &dp->encoder; + + dp->plat_data.dev_type = RK3288_DP; + dp->plat_data.power_on = rockchip_dp_poweron; + dp->plat_data.power_off = rockchip_dp_powerdown; + + return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); +} + +static void rockchip_dp_unbind(struct device *dev, struct device *master, + void *data) +{ + return analogix_dp_unbind(dev, master, data); +} + +static const struct component_ops rockchip_dp_component_ops = { + .bind = rockchip_dp_bind, + .unbind = rockchip_dp_unbind, +}; + +static int rockchip_dp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *panel_node, *port, *endpoint; + struct rockchip_dp_device *dp; + struct drm_panel *panel; + + port = of_graph_get_port_by_id(dev->of_node, 1); + if (!port) { + dev_err(dev, "can't find output port\n"); + return -EINVAL; + } + + endpoint = of_get_child_by_name(port, "endpoint"); + of_node_put(port); + if (!endpoint) { + dev_err(dev, "no output endpoint found\n"); + return -EINVAL; + } + + panel_node = of_graph_get_remote_port_parent(endpoint); + of_node_put(endpoint); + if (!panel_node) { + dev_err(dev, "no output node found\n"); + return -EINVAL; + } + + panel = of_drm_find_panel(panel_node); + if (!panel) { + DRM_ERROR("failed to find panel\n"); + of_node_put(panel_node); + return -EPROBE_DEFER; + } + + of_node_put(panel_node); + + dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + dp->dev = dev; + + dp->plat_data.panel = panel; + + /* + * We just use the drvdata until driver run into component + * add function, and then we would set drvdata to null, so + * that analogix dp driver could take charge of the drvdata. + */ + platform_set_drvdata(pdev, dp); + + return component_add(dev, &rockchip_dp_component_ops); +} + +static int rockchip_dp_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &rockchip_dp_component_ops); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int rockchip_dp_suspend(struct device *dev) +{ + return analogix_dp_suspend(dev); +} + +static int rockchip_dp_resume(struct device *dev) +{ + return analogix_dp_resume(dev); +} +#endif + +static const struct dev_pm_ops rockchip_dp_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume) +}; + +static const struct of_device_id rockchip_dp_dt_ids[] = { + {.compatible = "rockchip,rk3288-dp",}, + {} +}; +MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids); + +static struct platform_driver rockchip_dp_driver = { + .probe = rockchip_dp_probe, + .remove = rockchip_dp_remove, + .driver = { + .name = "rockchip-dp", + .owner = THIS_MODULE, + .pm = &rockchip_dp_pm_ops, + .of_match_table = of_match_ptr(rockchip_dp_dt_ids), + }, +}; + +module_platform_driver(rockchip_dp_driver); + +MODULE_AUTHOR("Yakir Yang <ykk@rock-chips.com>"); +MODULE_AUTHOR("Jeff chen <jeff.chen@rock-chips.com>"); +MODULE_DESCRIPTION("Rockchip Specific Analogix-DP Driver Extension"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index b433b9f040c9..f92325800f8a 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -2,9 +2,10 @@ # Makefile for the drm device driver. This driver provides support for the ccflags-y := -Iinclude/drm -ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \ +ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \ ttm_bo_manager.o ttm_page_alloc_dma.o +ttm-$(CONFIG_AGP) += ttm_agp_backend.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 764be36397fd..028ab6007873 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -34,7 +34,6 @@ #include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> -#ifdef TTM_HAS_AGP #include <drm/ttm/ttm_placement.h> #include <linux/agp_backend.h> #include <linux/module.h> @@ -148,5 +147,3 @@ void ttm_agp_tt_unpopulate(struct ttm_tt *ttm) ttm_pool_unpopulate(ttm); } EXPORT_SYMBOL(ttm_agp_tt_unpopulate); - -#endif diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 025c429050c0..a37de5db5731 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -48,7 +48,7 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) #include <asm/agp.h> #endif @@ -219,7 +219,7 @@ static struct ttm_pool_manager *_manager; #ifndef CONFIG_X86 static int set_pages_array_wb(struct page **pages, int addrinarray) { -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) int i; for (i = 0; i < addrinarray; i++) @@ -230,7 +230,7 @@ static int set_pages_array_wb(struct page **pages, int addrinarray) static int set_pages_array_wc(struct page **pages, int addrinarray) { -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) int i; for (i = 0; i < addrinarray; i++) @@ -241,7 +241,7 @@ static int set_pages_array_wc(struct page **pages, int addrinarray) static int set_pages_array_uc(struct page **pages, int addrinarray) { -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) int i; for (i = 0; i < addrinarray; i++) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 624d941aaad1..bef9f6feb635 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -50,7 +50,7 @@ #include <linux/kthread.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_page_alloc.h> -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) #include <asm/agp.h> #endif @@ -271,7 +271,7 @@ static struct kobj_type ttm_pool_kobj_type = { #ifndef CONFIG_X86 static int set_pages_array_wb(struct page **pages, int addrinarray) { -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) int i; for (i = 0; i < addrinarray; i++) @@ -282,7 +282,7 @@ static int set_pages_array_wb(struct page **pages, int addrinarray) static int set_pages_array_wc(struct page **pages, int addrinarray) { -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) int i; for (i = 0; i < addrinarray; i++) @@ -293,7 +293,7 @@ static int set_pages_array_wc(struct page **pages, int addrinarray) static int set_pages_array_uc(struct page **pages, int addrinarray) { -#ifdef TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) int i; for (i = 0; i < addrinarray; i++) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 4e19d0f9cc30..077ae9b2865d 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm) goto out_err; copy_highpage(to_page, from_page); - page_cache_release(from_page); + put_page(from_page); } if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP)) @@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) copy_highpage(to_page, from_page); set_page_dirty(to_page); mark_page_accessed(to_page); - page_cache_release(to_page); + put_page(to_page); } ttm_tt_unpopulate(ttm); diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 772ec9e1f590..c20408940cd0 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -94,7 +94,7 @@ static void udl_usb_disconnect(struct usb_interface *interface) struct drm_device *dev = usb_get_intfdata(interface); drm_kms_helper_poll_disable(dev); - drm_connector_unplug_all(dev); + drm_connector_unregister_all(dev); udl_fbdev_unplug(dev); udl_drop_usb(dev); drm_unplug_dev(dev); diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c index e797dfc07ae3..7e2a12c4fed2 100644 --- a/drivers/gpu/drm/via/via_dmablit.c +++ b/drivers/gpu/drm/via/via_dmablit.c @@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) if (NULL != (page = vsg->pages[i])) { if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) SetPageDirty(page); - page_cache_release(page); + put_page(page); } } case dr_via_pages_alloc: diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 7f898cfdc746..3cc7afa77a35 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -42,10 +42,8 @@ module_param_named(modeset, virtio_gpu_modeset, int, 0400); static int virtio_gpu_probe(struct virtio_device *vdev) { -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && virtio_gpu_modeset == -1) return -EINVAL; -#endif if (virtio_gpu_modeset == 0) return -EINVAL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6cbb7d4bdd11..e7335a48ebf6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1530,10 +1530,8 @@ static int __init vmwgfx_init(void) { int ret; -#ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force()) return -EINVAL; -#endif ret = drm_pci_init(&driver, &vmw_pci_driver); if (ret) diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c index 883a314cd83a..6494a4d28171 100644 --- a/drivers/gpu/ipu-v3/ipu-cpmem.c +++ b/drivers/gpu/ipu-v3/ipu-cpmem.c @@ -395,60 +395,48 @@ void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format) EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_interleaved); void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, - u32 pixel_format, int stride, - int u_offset, int v_offset) + unsigned int uv_stride, + unsigned int u_offset, unsigned int v_offset) { - switch (pixel_format) { - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YUV422P: - ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1); - ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8); - ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8); - break; - case V4L2_PIX_FMT_YVU420: - ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, (stride / 2) - 1); - ipu_ch_param_write_field(ch, IPU_FIELD_UBO, v_offset / 8); - ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8); - break; - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV16: - ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, stride - 1); - ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8); - ipu_ch_param_write_field(ch, IPU_FIELD_VBO, u_offset / 8); - break; - } + ipu_ch_param_write_field(ch, IPU_FIELD_SLUV, uv_stride - 1); + ipu_ch_param_write_field(ch, IPU_FIELD_UBO, u_offset / 8); + ipu_ch_param_write_field(ch, IPU_FIELD_VBO, v_offset / 8); } EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar_full); void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, u32 pixel_format, int stride, int height) { - int u_offset, v_offset; + int fourcc, u_offset, v_offset; int uv_stride = 0; - switch (pixel_format) { - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YVU420: + fourcc = v4l2_pix_fmt_to_drm_fourcc(pixel_format); + switch (fourcc) { + case DRM_FORMAT_YUV420: uv_stride = stride / 2; u_offset = stride * height; v_offset = u_offset + (uv_stride * height / 2); - ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, - u_offset, v_offset); break; - case V4L2_PIX_FMT_YUV422P: + case DRM_FORMAT_YVU420: + uv_stride = stride / 2; + v_offset = stride * height; + u_offset = v_offset + (uv_stride * height / 2); + break; + case DRM_FORMAT_YUV422: uv_stride = stride / 2; u_offset = stride * height; v_offset = u_offset + (uv_stride * height); - ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, - u_offset, v_offset); break; - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV16: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV16: + uv_stride = stride; u_offset = stride * height; - ipu_cpmem_set_yuv_planar_full(ch, pixel_format, stride, - u_offset, 0); + v_offset = 0; break; + default: + return; } + ipu_cpmem_set_yuv_planar_full(ch, uv_stride, u_offset, v_offset); } EXPORT_SYMBOL_GPL(ipu_cpmem_set_yuv_planar); @@ -684,17 +672,25 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) switch (pix->pixelformat) { case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YVU420: offset = Y_OFFSET(pix, image->rect.left, image->rect.top); u_offset = U_OFFSET(pix, image->rect.left, image->rect.top) - offset; v_offset = V_OFFSET(pix, image->rect.left, image->rect.top) - offset; - ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, - pix->bytesperline, + ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2, u_offset, v_offset); break; + case V4L2_PIX_FMT_YVU420: + offset = Y_OFFSET(pix, image->rect.left, image->rect.top); + u_offset = U_OFFSET(pix, image->rect.left, + image->rect.top) - offset; + v_offset = V_OFFSET(pix, image->rect.left, + image->rect.top) - offset; + + ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2, + v_offset, u_offset); + break; case V4L2_PIX_FMT_YUV422P: offset = Y_OFFSET(pix, image->rect.left, image->rect.top); u_offset = U2_OFFSET(pix, image->rect.left, @@ -702,8 +698,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) v_offset = V2_OFFSET(pix, image->rect.left, image->rect.top) - offset; - ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, - pix->bytesperline, + ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline / 2, u_offset, v_offset); break; case V4L2_PIX_FMT_NV12: @@ -712,8 +707,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) image->rect.top) - offset; v_offset = 0; - ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, - pix->bytesperline, + ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline, u_offset, v_offset); break; case V4L2_PIX_FMT_NV16: @@ -722,8 +716,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image) image->rect.top) - offset; v_offset = 0; - ipu_cpmem_set_yuv_planar_full(ch, pix->pixelformat, - pix->bytesperline, + ipu_cpmem_set_yuv_planar_full(ch, pix->bytesperline, u_offset, v_offset); break; case V4L2_PIX_FMT_UYVY: diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c index 042c3958e2a0..837b1ec22800 100644 --- a/drivers/gpu/ipu-v3/ipu-dmfc.c +++ b/drivers/gpu/ipu-v3/ipu-dmfc.c @@ -350,11 +350,13 @@ out: } EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth); -int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width) +void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width) { struct ipu_dmfc_priv *priv = dmfc->priv; u32 dmfc_gen1; + mutex_lock(&priv->mutex); + dmfc_gen1 = readl(priv->base + DMFC_GENERAL1); if ((dmfc->slots * 64 * 4) / width > dmfc->data->max_fifo_lines) @@ -364,9 +366,9 @@ int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width) writel(dmfc_gen1, priv->base + DMFC_GENERAL1); - return 0; + mutex_unlock(&priv->mutex); } -EXPORT_SYMBOL_GPL(ipu_dmfc_init_channel); +EXPORT_SYMBOL_GPL(ipu_dmfc_config_wait4eot); struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipu_channel) { diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c index f325663c27c5..ba14a863b451 100644 --- a/drivers/i2c/busses/i2c-jz4780.c +++ b/drivers/i2c/busses/i2c-jz4780.c @@ -771,11 +771,16 @@ static int jz4780_i2c_probe(struct platform_device *pdev) ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clk_freq); if (ret) { - dev_err(&pdev->dev, "clock-frequency not specified in DT"); + dev_err(&pdev->dev, "clock-frequency not specified in DT\n"); goto err; } i2c->speed = clk_freq / 1000; + if (i2c->speed == 0) { + ret = -EINVAL; + dev_err(&pdev->dev, "clock-frequency minimum is 1000\n"); + goto err; + } jz4780_i2c_set_speed(i2c); dev_info(&pdev->dev, "Bus frequency is %d KHz\n", i2c->speed); diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 0f2f8484e8ec..e584d88ee337 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -525,22 +525,16 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv) return 0; } - -/* uevent helps with hotplug: modprobe -q $(MODALIAS) */ static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env) { - struct i2c_client *client = to_i2c_client(dev); + struct i2c_client *client = to_i2c_client(dev); int rc; rc = acpi_device_uevent_modalias(dev, env); if (rc != -ENODEV) return rc; - if (add_uevent_var(env, "MODALIAS=%s%s", - I2C_MODULE_PREFIX, client->name)) - return -ENOMEM; - dev_dbg(dev, "uevent\n"); - return 0; + return add_uevent_var(env, "MODALIAS=%s%s", I2C_MODULE_PREFIX, client->name); } /* i2c bus recovery routines */ diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index 7748a0a5ddb9..8de073aed001 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c @@ -140,22 +140,34 @@ static int i2c_demux_change_master(struct i2c_demux_pinctrl_priv *priv, u32 new_ return i2c_demux_activate_master(priv, new_chan); } -static ssize_t cur_master_show(struct device *dev, struct device_attribute *attr, - char *buf) +static ssize_t available_masters_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); int count = 0, i; for (i = 0; i < priv->num_chan && count < PAGE_SIZE; i++) - count += scnprintf(buf + count, PAGE_SIZE - count, "%c %d - %s\n", - i == priv->cur_chan ? '*' : ' ', i, - priv->chan[i].parent_np->full_name); + count += scnprintf(buf + count, PAGE_SIZE - count, "%d:%s%c", + i, priv->chan[i].parent_np->full_name, + i == priv->num_chan - 1 ? '\n' : ' '); return count; } +static DEVICE_ATTR_RO(available_masters); -static ssize_t cur_master_store(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t current_master_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", priv->cur_chan); +} + +static ssize_t current_master_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct i2c_demux_pinctrl_priv *priv = dev_get_drvdata(dev); unsigned int val; @@ -172,7 +184,7 @@ static ssize_t cur_master_store(struct device *dev, struct device_attribute *att return ret < 0 ? ret : count; } -static DEVICE_ATTR_RW(cur_master); +static DEVICE_ATTR_RW(current_master); static int i2c_demux_pinctrl_probe(struct platform_device *pdev) { @@ -218,12 +230,18 @@ static int i2c_demux_pinctrl_probe(struct platform_device *pdev) /* switch to first parent as active master */ i2c_demux_activate_master(priv, 0); - err = device_create_file(&pdev->dev, &dev_attr_cur_master); + err = device_create_file(&pdev->dev, &dev_attr_available_masters); if (err) goto err_rollback; + err = device_create_file(&pdev->dev, &dev_attr_current_master); + if (err) + goto err_rollback_available; + return 0; +err_rollback_available: + device_remove_file(&pdev->dev, &dev_attr_available_masters); err_rollback: for (j = 0; j < i; j++) { of_node_put(priv->chan[j].parent_np); @@ -238,7 +256,8 @@ static int i2c_demux_pinctrl_remove(struct platform_device *pdev) struct i2c_demux_pinctrl_priv *priv = platform_get_drvdata(pdev); int i; - device_remove_file(&pdev->dev, &dev_attr_cur_master); + device_remove_file(&pdev->dev, &dev_attr_current_master); + device_remove_file(&pdev->dev, &dev_attr_available_masters); i2c_demux_deactivate_master(priv); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index ba947df5a8c7..c6935de425fa 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -660,6 +660,35 @@ static struct cpuidle_state skl_cstates[] = { .enter = NULL } }; +static struct cpuidle_state skx_cstates[] = { + { + .name = "C1-SKX", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00), + .exit_latency = 2, + .target_residency = 2, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .name = "C1E-SKX", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01), + .exit_latency = 10, + .target_residency = 20, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .name = "C6-SKX", + .desc = "MWAIT 0x20", + .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 133, + .target_residency = 600, + .enter = &intel_idle, + .enter_freeze = intel_idle_freeze, }, + { + .enter = NULL } +}; + static struct cpuidle_state atom_cstates[] = { { .name = "C1E-ATM", @@ -818,8 +847,11 @@ static int cpu_hotplug_notify(struct notifier_block *n, * driver in this case */ dev = per_cpu_ptr(intel_idle_cpuidle_devices, hotcpu); - if (!dev->registered) - intel_idle_cpu_init(hotcpu); + if (dev->registered) + break; + + if (intel_idle_cpu_init(hotcpu)) + return NOTIFY_BAD; break; } @@ -904,6 +936,10 @@ static const struct idle_cpu idle_cpu_skl = { .disable_promotion_to_c1e = true, }; +static const struct idle_cpu idle_cpu_skx = { + .state_table = skx_cstates, + .disable_promotion_to_c1e = true, +}; static const struct idle_cpu idle_cpu_avn = { .state_table = avn_cstates, @@ -945,6 +981,9 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { ICPU(0x56, idle_cpu_bdw), ICPU(0x4e, idle_cpu_skl), ICPU(0x5e, idle_cpu_skl), + ICPU(0x8e, idle_cpu_skl), + ICPU(0x9e, idle_cpu_skl), + ICPU(0x55, idle_cpu_skx), ICPU(0x57, idle_cpu_knl), {} }; @@ -987,22 +1026,15 @@ static int __init intel_idle_probe(void) icpu = (const struct idle_cpu *)id->driver_data; cpuidle_state_table = icpu->state_table; - if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ - lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; - else - on_each_cpu(__setup_broadcast_timer, (void *)true, 1); - pr_debug(PREFIX "v" INTEL_IDLE_VERSION " model 0x%X\n", boot_cpu_data.x86_model); - pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", - lapic_timer_reliable_states); return 0; } /* * intel_idle_cpuidle_devices_uninit() - * unregister, free cpuidle_devices + * Unregisters the cpuidle devices. */ static void intel_idle_cpuidle_devices_uninit(void) { @@ -1013,9 +1045,6 @@ static void intel_idle_cpuidle_devices_uninit(void) dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); cpuidle_unregister_device(dev); } - - free_percpu(intel_idle_cpuidle_devices); - return; } /* @@ -1111,7 +1140,7 @@ static void intel_idle_state_table_update(void) * intel_idle_cpuidle_driver_init() * allocate, initialize cpuidle_states */ -static int __init intel_idle_cpuidle_driver_init(void) +static void __init intel_idle_cpuidle_driver_init(void) { int cstate; struct cpuidle_driver *drv = &intel_idle_driver; @@ -1163,18 +1192,10 @@ static int __init intel_idle_cpuidle_driver_init(void) drv->state_count += 1; } - if (icpu->auto_demotion_disable_flags) - on_each_cpu(auto_demotion_disable, NULL, 1); - if (icpu->byt_auto_demotion_disable_flag) { wrmsrl(MSR_CC6_DEMOTION_POLICY_CONFIG, 0); wrmsrl(MSR_MC6_DEMOTION_POLICY_CONFIG, 0); } - - if (icpu->disable_promotion_to_c1e) /* each-cpu is redundant */ - on_each_cpu(c1e_promotion_disable, NULL, 1); - - return 0; } @@ -1193,7 +1214,6 @@ static int intel_idle_cpu_init(int cpu) if (cpuidle_register_device(dev)) { pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu); - intel_idle_cpuidle_devices_uninit(); return -EIO; } @@ -1218,40 +1238,51 @@ static int __init intel_idle_init(void) if (retval) return retval; + intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); + if (intel_idle_cpuidle_devices == NULL) + return -ENOMEM; + intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { struct cpuidle_driver *drv = cpuidle_get_driver(); printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", drv ? drv->name : "none"); + free_percpu(intel_idle_cpuidle_devices); return retval; } - intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device); - if (intel_idle_cpuidle_devices == NULL) - return -ENOMEM; - cpu_notifier_register_begin(); for_each_online_cpu(i) { retval = intel_idle_cpu_init(i); if (retval) { + intel_idle_cpuidle_devices_uninit(); cpu_notifier_register_done(); cpuidle_unregister_driver(&intel_idle_driver); + free_percpu(intel_idle_cpuidle_devices); return retval; } } __register_cpu_notifier(&cpu_hotplug_notifier); + if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */ + lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE; + else + on_each_cpu(__setup_broadcast_timer, (void *)true, 1); + cpu_notifier_register_done(); + pr_debug(PREFIX "lapic_timer_reliable_states 0x%x\n", + lapic_timer_reliable_states); + return 0; } static void __exit intel_idle_exit(void) { - intel_idle_cpuidle_devices_uninit(); - cpuidle_unregister_driver(&intel_idle_driver); + struct cpuidle_device *dev; + int i; cpu_notifier_register_begin(); @@ -1259,9 +1290,15 @@ static void __exit intel_idle_exit(void) on_each_cpu(__setup_broadcast_timer, (void *)false, 1); __unregister_cpu_notifier(&cpu_hotplug_notifier); + for_each_possible_cpu(i) { + dev = per_cpu_ptr(intel_idle_cpuidle_devices, i); + cpuidle_unregister_device(dev); + } + cpu_notifier_register_done(); - return; + cpuidle_unregister_driver(&intel_idle_driver); + free_percpu(intel_idle_cpuidle_devices); } module_init(intel_idle_init); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index c73331f7782b..2072a31e813b 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -547,7 +547,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data, { int ret; int axis = chan->scan_index; - unsigned int raw_val; + __le16 raw_val; mutex_lock(&data->mutex); ret = bmc150_accel_set_power_state(data, true); @@ -557,14 +557,14 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data, } ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis), - &raw_val, 2); + &raw_val, sizeof(raw_val)); if (ret < 0) { dev_err(data->dev, "Error reading axis %d\n", axis); bmc150_accel_set_power_state(data, false); mutex_unlock(&data->mutex); return ret; } - *val = sign_extend32(raw_val >> chan->scan_type.shift, + *val = sign_extend32(le16_to_cpu(raw_val) >> chan->scan_type.shift, chan->scan_type.realbits - 1); ret = bmc150_accel_set_power_state(data, false); mutex_unlock(&data->mutex); @@ -988,6 +988,7 @@ static const struct iio_event_spec bmc150_accel_event = { .realbits = (bits), \ .storagebits = 16, \ .shift = 16 - (bits), \ + .endianness = IIO_LE, \ }, \ .event_spec = &bmc150_accel_event, \ .num_event_specs = 1 \ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index af4aea7b20f9..82c718c515a0 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -134,6 +134,7 @@ config AT91_ADC config AT91_SAMA5D2_ADC tristate "Atmel AT91 SAMA5D2 ADC" depends on ARCH_AT91 || COMPILE_TEST + depends on HAS_IOMEM help Say yes here to build support for Atmel SAMA5D2 ADC which is available on SAMA5D2 SoC family. diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index 929508e5266c..998dc3caad4c 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -1386,7 +1386,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11644] = { .bits = 12, - .int_vref_mv = 2048, + .int_vref_mv = 4096, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1396,7 +1396,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11645] = { .bits = 12, - .int_vref_mv = 4096, + .int_vref_mv = 2048, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1406,7 +1406,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11646] = { .bits = 10, - .int_vref_mv = 2048, + .int_vref_mv = 4096, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1416,7 +1416,7 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = { }, [max11647] = { .bits = 10, - .int_vref_mv = 4096, + .int_vref_mv = 2048, .mode_list = max11644_mode_list, .num_modes = ARRAY_SIZE(max11644_mode_list), .default_mode = s0to1, @@ -1680,6 +1680,10 @@ static const struct i2c_device_id max1363_id[] = { { "max11615", max11615 }, { "max11616", max11616 }, { "max11617", max11617 }, + { "max11644", max11644 }, + { "max11645", max11645 }, + { "max11646", max11646 }, + { "max11647", max11647 }, {} }; diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c index bbce3b09ac45..4dac567e75b4 100644 --- a/drivers/iio/gyro/bmg160_core.c +++ b/drivers/iio/gyro/bmg160_core.c @@ -452,7 +452,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val) static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val) { int ret; - unsigned int raw_val; + __le16 raw_val; mutex_lock(&data->mutex); ret = bmg160_set_power_state(data, true); @@ -462,7 +462,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val) } ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val, - 2); + sizeof(raw_val)); if (ret < 0) { dev_err(data->dev, "Error reading axis %d\n", axis); bmg160_set_power_state(data, false); @@ -470,7 +470,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val) return ret; } - *val = sign_extend32(raw_val, 15); + *val = sign_extend32(le16_to_cpu(raw_val), 15); ret = bmg160_set_power_state(data, false); mutex_unlock(&data->mutex); if (ret < 0) @@ -733,6 +733,7 @@ static const struct iio_event_spec bmg160_event = { .sign = 's', \ .realbits = 16, \ .storagebits = 16, \ + .endianness = IIO_LE, \ }, \ .event_spec = &bmg160_event, \ .num_event_specs = 1 \ @@ -780,7 +781,7 @@ static irqreturn_t bmg160_trigger_handler(int irq, void *p) mutex_unlock(&data->mutex); goto err; } - data->buffer[i++] = ret; + data->buffer[i++] = val; } mutex_unlock(&data->mutex); diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c index 09db89359544..90ab8a2d2846 100644 --- a/drivers/iio/health/max30100.c +++ b/drivers/iio/health/max30100.c @@ -238,12 +238,13 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private) mutex_lock(&data->lock); - while (cnt-- || (cnt = max30100_fifo_count(data) > 0)) { + while (cnt || (cnt = max30100_fifo_count(data) > 0)) { ret = max30100_read_measurement(data); if (ret) break; iio_push_to_buffers(data->indio_dev, data->buffer); + cnt--; } mutex_unlock(&data->lock); diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig index a7f557af4389..847455a2d6bb 100644 --- a/drivers/iio/imu/inv_mpu6050/Kconfig +++ b/drivers/iio/imu/inv_mpu6050/Kconfig @@ -9,9 +9,8 @@ config INV_MPU6050_IIO config INV_MPU6050_I2C tristate "Invensense MPU6050 devices (I2C)" - depends on I2C + depends on I2C_MUX select INV_MPU6050_IIO - select I2C_MUX select REGMAP_I2C help This driver supports the Invensense MPU6050 devices. diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index b976332d45d3..90462fcf5436 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -653,6 +653,7 @@ static int iio_verify_update(struct iio_dev *indio_dev, unsigned int modes; memset(config, 0, sizeof(*config)); + config->watermark = ~0; /* * If there is just one buffer and we are removing it there is nothing diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c index f6a07dc32ae4..a6af56ad10e1 100644 --- a/drivers/iio/light/apds9960.c +++ b/drivers/iio/light/apds9960.c @@ -769,7 +769,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data) mutex_lock(&data->lock); data->gesture_mode_running = 1; - while (cnt-- || (cnt = apds9660_fifo_is_empty(data) > 0)) { + while (cnt || (cnt = apds9660_fifo_is_empty(data) > 0)) { ret = regmap_bulk_read(data->regmap, APDS9960_REG_GFIFO_BASE, &data->buffer, 4); @@ -777,6 +777,7 @@ static void apds9960_read_gesture_fifo(struct apds9960_data *data) goto err_read; iio_push_to_buffers(data->indio_dev, data->buffer); + cnt--; } err_read: diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h index 06a4d9c35581..9daca4681922 100644 --- a/drivers/iio/magnetometer/st_magn.h +++ b/drivers/iio/magnetometer/st_magn.h @@ -44,6 +44,7 @@ static inline int st_magn_allocate_ring(struct iio_dev *indio_dev) static inline void st_magn_deallocate_ring(struct iio_dev *indio_dev) { } +#define ST_MAGN_TRIGGER_SET_STATE NULL #endif /* CONFIG_IIO_BUFFER */ #endif /* ST_MAGN_H */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 92745d755272..38f917a6c778 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -1992,7 +1992,6 @@ static int i40iw_addr_resolve_neigh(struct i40iw_device *iwdev, /** * i40iw_get_dst_ipv6 */ -#if IS_ENABLED(CONFIG_IPV6) static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, struct sockaddr_in6 *dst_addr) { @@ -2008,7 +2007,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, dst = ip6_route_output(&init_net, NULL, &fl6); return dst; } -#endif /** * i40iw_addr_resolve_neigh_ipv6 - resolve neighbor ipv6 address @@ -2016,7 +2014,6 @@ static struct dst_entry *i40iw_get_dst_ipv6(struct sockaddr_in6 *src_addr, * @dst_ip: remote ip address * @arpindex: if there is an arp entry */ -#if IS_ENABLED(CONFIG_IPV6) static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, u32 *src, u32 *dest, @@ -2089,7 +2086,6 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev, dst_release(dst); return rc; } -#endif /** * i40iw_ipv4_is_loopback - check if loopback @@ -2190,13 +2186,13 @@ static struct i40iw_cm_node *i40iw_make_cm_node( cm_info->loc_addr[0], cm_info->rem_addr[0], oldarpindex); -#if IS_ENABLED(CONFIG_IPV6) - else + else if (IS_ENABLED(CONFIG_IPV6)) arpindex = i40iw_addr_resolve_neigh_ipv6(iwdev, cm_info->loc_addr, cm_info->rem_addr, oldarpindex); -#endif + else + arpindex = -EINVAL; } if (arpindex < 0) { i40iw_pr_err("cm_node arpindex\n"); diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f16c818ad2e6..b46c25542a7c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -776,15 +776,6 @@ void mlx5_ib_qp_disable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp); void mlx5_ib_invalidate_range(struct ib_umem *umem, unsigned long start, unsigned long end); -int mlx5_ib_get_vf_config(struct ib_device *device, int vf, - u8 port, struct ifla_vf_info *info); -int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, - u8 port, int state); -int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, - u8 port, struct ifla_vf_stats *stats); -int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, - u64 guid, int type); - #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev) { @@ -801,6 +792,15 @@ static inline void mlx5_ib_qp_enable_pagefaults(struct mlx5_ib_qp *qp) {} #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */ +int mlx5_ib_get_vf_config(struct ib_device *device, int vf, + u8 port, struct ifla_vf_info *info); +int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf, + u8 port, int state); +int mlx5_ib_get_vf_stats(struct ib_device *device, int vf, + u8 port, struct ifla_vf_stats *stats); +int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port, + u64 guid, int type); + __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num, int index); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 0bd3cb2f3c67..8b42401d4795 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1264,26 +1264,40 @@ free_mem: */ static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch) { - struct se_session *se_sess; struct srpt_send_ioctx *ioctx; - int tag; + unsigned long flags; BUG_ON(!ch); - se_sess = ch->sess; - tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); - if (tag < 0) { - pr_err("Unable to obtain tag for srpt_send_ioctx\n"); - return NULL; + ioctx = NULL; + spin_lock_irqsave(&ch->spinlock, flags); + if (!list_empty(&ch->free_list)) { + ioctx = list_first_entry(&ch->free_list, + struct srpt_send_ioctx, free_list); + list_del(&ioctx->free_list); } - ioctx = &((struct srpt_send_ioctx *)se_sess->sess_cmd_map)[tag]; - memset(ioctx, 0, sizeof(struct srpt_send_ioctx)); - ioctx->ch = ch; + spin_unlock_irqrestore(&ch->spinlock, flags); + + if (!ioctx) + return ioctx; + + BUG_ON(ioctx->ch != ch); spin_lock_init(&ioctx->spinlock); ioctx->state = SRPT_STATE_NEW; + ioctx->n_rbuf = 0; + ioctx->rbufs = NULL; + ioctx->n_rdma = 0; + ioctx->n_rdma_wrs = 0; + ioctx->rdma_wrs = NULL; + ioctx->mapped_sg_count = 0; init_completion(&ioctx->tx_done); - - ioctx->cmd.map_tag = tag; + ioctx->queue_status_only = false; + /* + * transport_init_se_cmd() does not initialize all fields, so do it + * here. + */ + memset(&ioctx->cmd, 0, sizeof(ioctx->cmd)); + memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data)); return ioctx; } @@ -2021,7 +2035,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, struct ib_cm_rep_param *rep_param; struct srpt_rdma_ch *ch, *tmp_ch; u32 it_iu_len; - int ret = 0; + int i, ret = 0; unsigned char *p; WARN_ON_ONCE(irqs_disabled()); @@ -2143,6 +2157,12 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, if (!ch->ioctx_ring) goto free_ch; + INIT_LIST_HEAD(&ch->free_list); + for (i = 0; i < ch->rq_size; i++) { + ch->ioctx_ring[i]->ch = ch; + list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list); + } + ret = srpt_create_ch_ib(ch); if (ret) { rej->reason = cpu_to_be32( @@ -2173,8 +2193,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id, p = &ch->sess_name[0]; try_again: - ch->sess = target_alloc_session(&sport->port_tpg_1, ch->rq_size, - sizeof(struct srpt_send_ioctx), + ch->sess = target_alloc_session(&sport->port_tpg_1, 0, 0, TARGET_PROT_NORMAL, p, ch, NULL); if (IS_ERR(ch->sess)) { pr_info("Rejected login because no ACL has been" @@ -2881,7 +2900,7 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) struct srpt_send_ioctx *ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd); struct srpt_rdma_ch *ch = ioctx->ch; - struct se_session *se_sess = ch->sess; + unsigned long flags; WARN_ON(ioctx->state != SRPT_STATE_DONE); WARN_ON(ioctx->mapped_sg_count != 0); @@ -2892,7 +2911,9 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) ioctx->n_rbuf = 0; } - percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); + spin_lock_irqsave(&ch->spinlock, flags); + list_add(&ioctx->free_list, &ch->free_list); + spin_unlock_irqrestore(&ch->spinlock, flags); } /** diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index ca288f019315..af9b8b527340 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -179,6 +179,7 @@ struct srpt_recv_ioctx { * struct srpt_send_ioctx - SRPT send I/O context. * @ioctx: See above. * @ch: Channel pointer. + * @free_list: Node in srpt_rdma_ch.free_list. * @n_rbuf: Number of data buffers in the received SRP command. * @rbufs: Pointer to SRP data buffer array. * @single_rbuf: SRP data buffer if the command has only a single buffer. @@ -201,6 +202,7 @@ struct srpt_send_ioctx { struct srp_direct_buf *rbufs; struct srp_direct_buf single_rbuf; struct scatterlist *sg; + struct list_head free_list; spinlock_t spinlock; enum srpt_command_state state; struct se_cmd cmd; diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 72d6182666cb..58f2fe687a24 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, unsigned int s_length = sg_dma_len(s); unsigned int s_dma_len = s->length; - s->offset = s_offset; + s->offset += s_offset; s->length = s_length; sg_dma_address(s) = dma_addr + s_offset; dma_addr += s_dma_len; @@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) for_each_sg(sg, s, nents, i) { if (sg_dma_address(s) != DMA_ERROR_CODE) - s->offset = sg_dma_address(s); + s->offset += sg_dma_address(s); if (sg_dma_len(s)) s->length = sg_dma_len(s); sg_dma_address(s) = DMA_ERROR_CODE; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a2e1b7f14df2..e1852e845d21 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -2458,7 +2458,7 @@ static struct dmar_domain *get_domain_for_dev(struct device *dev, int gaw) } /* register PCI DMA alias device */ - if (req_id != dma_alias && dev_is_pci(dev)) { + if (dev_is_pci(dev) && req_id != dma_alias) { tmp = dmar_insert_one_dev_info(iommu, PCI_BUS_NUM(dma_alias), dma_alias & 0xff, NULL, domain); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index bfd4f7c3b1d8..b9df1411c894 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -848,7 +848,8 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (!group->default_domain) { group->default_domain = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); - group->domain = group->default_domain; + if (!group->domain) + group->domain = group->default_domain; } ret = iommu_group_add_device(group, dev); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index a6f593a0a29e..5710a06c3049 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -315,8 +315,8 @@ static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) int i; for (i = 0; i < iommu->num_mmu; i++) - active &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & - RK_MMU_STATUS_STALL_ACTIVE; + active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & + RK_MMU_STATUS_STALL_ACTIVE); return active; } @@ -327,8 +327,8 @@ static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) int i; for (i = 0; i < iommu->num_mmu; i++) - enable &= rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & - RK_MMU_STATUS_PAGING_ENABLED; + enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & + RK_MMU_STATUS_PAGING_ENABLED); return enable; } diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 0ddf638d60f3..043828d541f7 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -361,8 +361,6 @@ static int __init acpi_pcc_probe(void) struct acpi_generic_address *db_reg; struct acpi_pcct_hw_reduced *pcct_ss; pcc_mbox_channels[i].con_priv = pcct_entry; - pcct_entry = (struct acpi_subtable_header *) - ((unsigned long) pcct_entry + pcct_entry->length); /* If doorbell is in system memory cache the virt address */ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; @@ -370,6 +368,8 @@ static int __init acpi_pcc_probe(void) if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, db_reg->bit_width/8); + pcct_entry = (struct acpi_subtable_header *) + ((unsigned long) pcct_entry + pcct_entry->length); } pcc_mbox_ctrl.num_chans = count; diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7df6b4f1548a..3fe86b54d50b 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -322,7 +322,7 @@ __clear_page_buffers(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } static void free_buffers(struct page *page) { @@ -1673,6 +1673,9 @@ static void bitmap_free(struct bitmap *bitmap) if (!bitmap) /* there was no bitmap */ return; + if (bitmap->sysfs_can_clear) + sysfs_put(bitmap->sysfs_can_clear); + if (mddev_is_clustered(bitmap->mddev) && bitmap->mddev->cluster_info && bitmap->cluster_slot == md_cluster_ops->slot_number(bitmap->mddev)) md_cluster_stop(bitmap->mddev); @@ -1712,15 +1715,13 @@ void bitmap_destroy(struct mddev *mddev) if (mddev->thread) mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT; - if (bitmap->sysfs_can_clear) - sysfs_put(bitmap->sysfs_can_clear); - bitmap_free(bitmap); } /* * initialize the bitmap structure * if this returns an error, bitmap_destroy must be called to do clean up + * once mddev->bitmap is set */ struct bitmap *bitmap_create(struct mddev *mddev, int slot) { @@ -1865,8 +1866,10 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot, struct bitmap_counts *counts; struct bitmap *bitmap = bitmap_create(mddev, slot); - if (IS_ERR(bitmap)) + if (IS_ERR(bitmap)) { + bitmap_free(bitmap); return PTR_ERR(bitmap); + } rv = bitmap_init_from_disk(bitmap, 0); if (rv) @@ -2170,14 +2173,14 @@ location_store(struct mddev *mddev, const char *buf, size_t len) else { mddev->bitmap = bitmap; rv = bitmap_load(mddev); - if (rv) { - bitmap_destroy(mddev); + if (rv) mddev->bitmap_info.offset = 0; - } } mddev->pers->quiesce(mddev, 0); - if (rv) + if (rv) { + bitmap_destroy(mddev); return rv; + } } } } diff --git a/drivers/md/md.c b/drivers/md/md.c index c068f171b4eb..194580fba7fd 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -718,6 +718,7 @@ static void super_written(struct bio *bio) if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); + rdev_dec_pending(rdev, mddev); bio_put(bio); } @@ -732,6 +733,8 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, */ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); + atomic_inc(&rdev->nr_pending); + bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; bio->bi_iter.bi_sector = sector; bio_add_page(bio, page, size, 0); @@ -6883,7 +6886,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, case ADD_NEW_DISK: /* We can support ADD_NEW_DISK on read-only arrays - * on if we are re-adding a preexisting device. + * only if we are re-adding a preexisting device. * So require mddev->pers and MD_DISK_SYNC. */ if (mddev->pers) { diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 39fb21e048e6..a7f2b9c9f8a0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -570,7 +570,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if (best_dist_disk < 0) { if (is_badblock(rdev, this_sector, sectors, &first_bad, &bad_sectors)) { - if (first_bad < this_sector) + if (first_bad <= this_sector) /* Cannot use this */ continue; best_good_sectors = first_bad - this_sector; diff --git a/drivers/media/usb/au0828/au0828-cards.c b/drivers/media/usb/au0828/au0828-cards.c index ca861aea68a5..6b469e8c4c6e 100644 --- a/drivers/media/usb/au0828/au0828-cards.c +++ b/drivers/media/usb/au0828/au0828-cards.c @@ -228,10 +228,6 @@ void au0828_card_analog_fe_setup(struct au0828_dev *dev) "au8522", 0x8e >> 1, NULL); if (sd == NULL) pr_err("analog subdev registration failed\n"); -#ifdef CONFIG_MEDIA_CONTROLLER - if (sd) - dev->decoder = &sd->entity; -#endif } /* Setup tuners */ diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index 5dc82e8c8670..cc22b32776ad 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -137,8 +137,14 @@ static void au0828_unregister_media_device(struct au0828_dev *dev) #ifdef CONFIG_MEDIA_CONTROLLER if (dev->media_dev && media_devnode_is_registered(&dev->media_dev->devnode)) { + /* clear enable_source, disable_source */ + dev->media_dev->source_priv = NULL; + dev->media_dev->enable_source = NULL; + dev->media_dev->disable_source = NULL; + media_device_unregister(dev->media_dev); media_device_cleanup(dev->media_dev); + kfree(dev->media_dev); dev->media_dev = NULL; } #endif @@ -166,7 +172,7 @@ static void au0828_usb_disconnect(struct usb_interface *interface) Set the status so poll routines can check and avoid access after disconnect. */ - dev->dev_state = DEV_DISCONNECTED; + set_bit(DEV_DISCONNECTED, &dev->dev_state); au0828_rc_unregister(dev); /* Digital TV */ @@ -192,7 +198,7 @@ static int au0828_media_device_init(struct au0828_dev *dev, #ifdef CONFIG_MEDIA_CONTROLLER struct media_device *mdev; - mdev = media_device_get_devres(&udev->dev); + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); if (!mdev) return -ENOMEM; @@ -456,7 +462,8 @@ static int au0828_media_device_register(struct au0828_dev *dev, { #ifdef CONFIG_MEDIA_CONTROLLER int ret; - struct media_entity *entity, *demod = NULL, *tuner = NULL; + struct media_entity *entity, *demod = NULL; + struct media_link *link; if (!dev->media_dev) return 0; @@ -482,26 +489,37 @@ static int au0828_media_device_register(struct au0828_dev *dev, } /* - * Find tuner and demod to disable the link between - * the two to avoid disable step when tuner is requested - * by video or audio. Note that this step can't be done - * until dvb graph is created during dvb register. + * Find tuner, decoder and demod. + * + * The tuner and decoder should be cached, as they'll be used by + * au0828_enable_source. + * + * It also needs to disable the link between tuner and + * decoder/demod, to avoid disable step when tuner is requested + * by video or audio. Note that this step can't be done until dvb + * graph is created during dvb register. */ media_device_for_each_entity(entity, dev->media_dev) { - if (entity->function == MEDIA_ENT_F_DTV_DEMOD) + switch (entity->function) { + case MEDIA_ENT_F_TUNER: + dev->tuner = entity; + break; + case MEDIA_ENT_F_ATV_DECODER: + dev->decoder = entity; + break; + case MEDIA_ENT_F_DTV_DEMOD: demod = entity; - else if (entity->function == MEDIA_ENT_F_TUNER) - tuner = entity; + break; + } } - /* Disable link between tuner and demod */ - if (tuner && demod) { - struct media_link *link; - list_for_each_entry(link, &demod->links, list) { - if (link->sink->entity == demod && - link->source->entity == tuner) { + /* Disable link between tuner->demod and/or tuner->decoder */ + if (dev->tuner) { + list_for_each_entry(link, &dev->tuner->links, list) { + if (demod && link->sink->entity == demod) + media_entity_setup_link(link, 0); + if (dev->decoder && link->sink->entity == dev->decoder) media_entity_setup_link(link, 0); - } } } diff --git a/drivers/media/usb/au0828/au0828-input.c b/drivers/media/usb/au0828/au0828-input.c index b0f067971979..3d6687f0407d 100644 --- a/drivers/media/usb/au0828/au0828-input.c +++ b/drivers/media/usb/au0828/au0828-input.c @@ -130,7 +130,7 @@ static int au0828_get_key_au8522(struct au0828_rc *ir) bool first = true; /* do nothing if device is disconnected */ - if (ir->dev->dev_state == DEV_DISCONNECTED) + if (test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) return 0; /* Check IR int */ @@ -260,7 +260,7 @@ static void au0828_rc_stop(struct rc_dev *rc) cancel_delayed_work_sync(&ir->work); /* do nothing if device is disconnected */ - if (ir->dev->dev_state != DEV_DISCONNECTED) { + if (!test_bit(DEV_DISCONNECTED, &ir->dev->dev_state)) { /* Disable IR */ au8522_rc_clear(ir, 0xe0, 1 << 4); } diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c index 13f6dab9ccc2..32d7db96479c 100644 --- a/drivers/media/usb/au0828/au0828-video.c +++ b/drivers/media/usb/au0828/au0828-video.c @@ -106,14 +106,13 @@ static inline void print_err_status(struct au0828_dev *dev, static int check_dev(struct au0828_dev *dev) { - if (dev->dev_state & DEV_DISCONNECTED) { + if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) { pr_info("v4l2 ioctl: device not present\n"); return -ENODEV; } - if (dev->dev_state & DEV_MISCONFIGURED) { - pr_info("v4l2 ioctl: device is misconfigured; " - "close and open it again\n"); + if (test_bit(DEV_MISCONFIGURED, &dev->dev_state)) { + pr_info("v4l2 ioctl: device is misconfigured; close and open it again\n"); return -EIO; } return 0; @@ -521,8 +520,8 @@ static inline int au0828_isoc_copy(struct au0828_dev *dev, struct urb *urb) if (!dev) return 0; - if ((dev->dev_state & DEV_DISCONNECTED) || - (dev->dev_state & DEV_MISCONFIGURED)) + if (test_bit(DEV_DISCONNECTED, &dev->dev_state) || + test_bit(DEV_MISCONFIGURED, &dev->dev_state)) return 0; if (urb->status < 0) { @@ -824,10 +823,10 @@ static int au0828_stream_interrupt(struct au0828_dev *dev) int ret = 0; dev->stream_state = STREAM_INTERRUPT; - if (dev->dev_state == DEV_DISCONNECTED) + if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) return -ENODEV; else if (ret) { - dev->dev_state = DEV_MISCONFIGURED; + set_bit(DEV_MISCONFIGURED, &dev->dev_state); dprintk(1, "%s device is misconfigured!\n", __func__); return ret; } @@ -1026,7 +1025,7 @@ static int au0828_v4l2_open(struct file *filp) int ret; dprintk(1, - "%s called std_set %d dev_state %d stream users %d users %d\n", + "%s called std_set %d dev_state %ld stream users %d users %d\n", __func__, dev->std_set_in_tuner_core, dev->dev_state, dev->streaming_users, dev->users); @@ -1045,7 +1044,7 @@ static int au0828_v4l2_open(struct file *filp) au0828_analog_stream_enable(dev); au0828_analog_stream_reset(dev); dev->stream_state = STREAM_OFF; - dev->dev_state |= DEV_INITIALIZED; + set_bit(DEV_INITIALIZED, &dev->dev_state); } dev->users++; mutex_unlock(&dev->lock); @@ -1059,7 +1058,7 @@ static int au0828_v4l2_close(struct file *filp) struct video_device *vdev = video_devdata(filp); dprintk(1, - "%s called std_set %d dev_state %d stream users %d users %d\n", + "%s called std_set %d dev_state %ld stream users %d users %d\n", __func__, dev->std_set_in_tuner_core, dev->dev_state, dev->streaming_users, dev->users); @@ -1075,7 +1074,7 @@ static int au0828_v4l2_close(struct file *filp) del_timer_sync(&dev->vbi_timeout); } - if (dev->dev_state == DEV_DISCONNECTED) + if (test_bit(DEV_DISCONNECTED, &dev->dev_state)) goto end; if (dev->users == 1) { @@ -1135,7 +1134,7 @@ static void au0828_init_tuner(struct au0828_dev *dev) .type = V4L2_TUNER_ANALOG_TV, }; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); if (dev->std_set_in_tuner_core) @@ -1207,7 +1206,7 @@ static int vidioc_querycap(struct file *file, void *priv, struct video_device *vdev = video_devdata(file); struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); strlcpy(cap->driver, "au0828", sizeof(cap->driver)); @@ -1250,7 +1249,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); f->fmt.pix.width = dev->width; @@ -1269,7 +1268,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); return au0828_set_format(dev, VIDIOC_TRY_FMT, f); @@ -1281,7 +1280,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct au0828_dev *dev = video_drvdata(file); int rc; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); rc = check_dev(dev); @@ -1303,7 +1302,7 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm) { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); if (norm == dev->std) @@ -1335,7 +1334,7 @@ static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *norm) { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); *norm = dev->std; @@ -1357,7 +1356,7 @@ static int vidioc_enum_input(struct file *file, void *priv, [AU0828_VMUX_DVB] = "DVB", }; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); tmp = input->index; @@ -1387,7 +1386,7 @@ static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); *i = dev->ctrl_input; @@ -1398,7 +1397,7 @@ static void au0828_s_input(struct au0828_dev *dev, int index) { int i; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); switch (AUVI_INPUT(index).type) { @@ -1496,7 +1495,7 @@ static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); a->index = dev->ctrl_ainput; @@ -1516,7 +1515,7 @@ static int vidioc_s_audio(struct file *file, void *priv, const struct v4l2_audio if (a->index != dev->ctrl_ainput) return -EINVAL; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); return 0; } @@ -1534,7 +1533,7 @@ static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) if (ret) return ret; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); strcpy(t->name, "Auvitek tuner"); @@ -1554,7 +1553,7 @@ static int vidioc_s_tuner(struct file *file, void *priv, if (t->index != 0) return -EINVAL; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); au0828_init_tuner(dev); @@ -1576,7 +1575,7 @@ static int vidioc_g_frequency(struct file *file, void *priv, if (freq->tuner != 0) return -EINVAL; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); freq->frequency = dev->ctrl_freq; return 0; @@ -1591,7 +1590,7 @@ static int vidioc_s_frequency(struct file *file, void *priv, if (freq->tuner != 0) return -EINVAL; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); au0828_init_tuner(dev); @@ -1617,7 +1616,7 @@ static int vidioc_g_fmt_vbi_cap(struct file *file, void *priv, { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); format->fmt.vbi.samples_per_line = dev->vbi_width; @@ -1643,7 +1642,7 @@ static int vidioc_cropcap(struct file *file, void *priv, if (cc->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); cc->bounds.left = 0; @@ -1665,7 +1664,7 @@ static int vidioc_g_register(struct file *file, void *priv, { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); reg->val = au0828_read(dev, reg->reg); @@ -1678,7 +1677,7 @@ static int vidioc_s_register(struct file *file, void *priv, { struct au0828_dev *dev = video_drvdata(file); - dprintk(1, "%s called std_set %d dev_state %d\n", __func__, + dprintk(1, "%s called std_set %d dev_state %ld\n", __func__, dev->std_set_in_tuner_core, dev->dev_state); return au0828_writereg(dev, reg->reg, reg->val); diff --git a/drivers/media/usb/au0828/au0828.h b/drivers/media/usb/au0828/au0828.h index ff7f8510fb77..87f32846f1c0 100644 --- a/drivers/media/usb/au0828/au0828.h +++ b/drivers/media/usb/au0828/au0828.h @@ -21,6 +21,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/bitops.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> @@ -121,9 +122,9 @@ enum au0828_stream_state { /* device state */ enum au0828_dev_state { - DEV_INITIALIZED = 0x01, - DEV_DISCONNECTED = 0x02, - DEV_MISCONFIGURED = 0x04 + DEV_INITIALIZED = 0, + DEV_DISCONNECTED = 1, + DEV_MISCONFIGURED = 2 }; struct au0828_dev; @@ -247,7 +248,7 @@ struct au0828_dev { int input_type; int std_set_in_tuner_core; unsigned int ctrl_input; - enum au0828_dev_state dev_state; + long unsigned int dev_state; /* defined at enum au0828_dev_state */; enum au0828_stream_state stream_state; wait_queue_head_t open; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index df4c052c6bd6..f300f060b3f3 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -349,7 +349,7 @@ int videobuf_dma_free(struct videobuf_dmabuf *dma) if (dma->pages) { for (i = 0; i < dma->nr_pages; i++) - page_cache_release(dma->pages[i]); + put_page(dma->pages[i]); kfree(dma->pages); dma->pages = NULL; } diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c index e8b933111e0d..9c677f3f3c26 100644 --- a/drivers/misc/ibmasm/ibmasmfs.c +++ b/drivers/misc/ibmasm/ibmasmfs.c @@ -116,8 +116,8 @@ static int ibmasmfs_fill_super (struct super_block *sb, void *data, int silent) { struct inode *root; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = IBMASMFS_MAGIC; sb->s_op = &ibmasmfs_s_ops; sb->s_time_gran = 1; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index f42d9c4e4561..f84a4275ca29 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -728,7 +728,7 @@ static void qp_release_pages(struct page **pages, if (dirty) set_page_dirty(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); pages[i] = NULL; } } diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 1d94607611d8..6e4c55a4aab5 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -356,11 +356,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) * They have to set these according to their abilities. */ host->max_segs = 1; - host->max_seg_size = PAGE_CACHE_SIZE; + host->max_seg_size = PAGE_SIZE; - host->max_req_size = PAGE_CACHE_SIZE; + host->max_req_size = PAGE_SIZE; host->max_blk_size = 512; - host->max_blk_count = PAGE_CACHE_SIZE / 512; + host->max_blk_count = PAGE_SIZE / 512; return host; } diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 62aa5d0efcee..79e19017343e 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -390,6 +390,7 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot) slot->cd_idx = 0; slot->cd_override_level = true; if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD || + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXTM_SD || slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD) slot->host->mmc_host_ops.get_cd = bxt_get_cd; @@ -1173,6 +1174,30 @@ static const struct pci_device_id pci_ids[] = { { .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BXTM_EMMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BXTM_SDIO, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BXTM_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_APL_EMMC, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index d1a0b4db60db..89e7151684a1 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -28,6 +28,9 @@ #define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca #define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc #define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 +#define PCI_DEVICE_ID_INTEL_BXTM_SD 0x1aca +#define PCI_DEVICE_ID_INTEL_BXTM_EMMC 0x1acc +#define PCI_DEVICE_ID_INTEL_BXTM_SDIO 0x1ad0 #define PCI_DEVICE_ID_INTEL_APL_SD 0x5aca #define PCI_DEVICE_ID_INTEL_APL_EMMC 0x5acc #define PCI_DEVICE_ID_INTEL_APL_SDIO 0x5ad0 diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index aca439d3ca83..30132500aa1c 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -309,8 +309,30 @@ static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) __func__, uhs, ctrl_2); } +static void pxav3_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + u8 pwr = host->pwr; + + sdhci_set_power(host, mode, vdd); + + if (host->pwr == pwr) + return; + + if (host->pwr == 0) + vdd = 0; + + if (!IS_ERR(mmc->supply.vmmc)) { + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + } +} + static const struct sdhci_ops pxav3_sdhci_ops = { .set_clock = sdhci_set_clock, + .set_power = pxav3_set_power, .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .set_bus_width = sdhci_set_bus_width, diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 8670f162dec7..6bd3d1794966 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -1210,10 +1210,24 @@ clock_set: } EXPORT_SYMBOL_GPL(sdhci_set_clock); -static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, - unsigned short vdd) +static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) { struct mmc_host *mmc = host->mmc; + + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + spin_lock_irq(&host->lock); + + if (mode != MMC_POWER_OFF) + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); + else + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); +} + +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ u8 pwr = 0; if (mode != MMC_POWER_OFF) { @@ -1245,7 +1259,6 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) sdhci_runtime_pm_bus_off(host); - vdd = 0; } else { /* * Spec says that we should clear the power reg before setting @@ -1276,12 +1289,20 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) mdelay(10); } +} +EXPORT_SYMBOL_GPL(sdhci_set_power); - if (!IS_ERR(mmc->supply.vmmc)) { - spin_unlock_irq(&host->lock); - mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); - spin_lock_irq(&host->lock); - } +static void __sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + if (host->ops->set_power) + host->ops->set_power(host, mode, vdd); + else if (!IS_ERR(mmc->supply.vmmc)) + sdhci_set_power_reg(host, mode, vdd); + else + sdhci_set_power(host, mode, vdd); } /*****************************************************************************\ @@ -1431,7 +1452,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) } } - sdhci_set_power(host, ios->power_mode, ios->vdd); + __sdhci_set_power(host, ios->power_mode, ios->vdd); if (host->ops->platform_send_init_74_clocks) host->ops->platform_send_init_74_clocks(host, ios->power_mode); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 3bd28033dbd9..0f39f4f84d10 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -529,6 +529,8 @@ struct sdhci_ops { #endif void (*set_clock)(struct sdhci_host *host, unsigned int clock); + void (*set_power)(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); int (*enable_dma)(struct sdhci_host *host); unsigned int (*get_max_clock)(struct sdhci_host *host); @@ -660,6 +662,8 @@ static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) } void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); +void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd); void sdhci_set_bus_width(struct sdhci_host *host, int width); void sdhci_reset(struct sdhci_host *host, u8 mask); void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 8d870ce9f944..d9a655f47d41 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -1513,7 +1513,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; - mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; + mmc->max_req_size = PAGE_SIZE * mmc->max_segs; mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 675435873823..7fb0c034dcb6 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -63,7 +63,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) } } - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || (align & PAGE_MASK))) || !multiple) { ret = -EINVAL; goto pio; @@ -133,7 +133,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) } } - if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || + if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_SIZE || (align & PAGE_MASK))) || !multiple) { ret = -EINVAL; goto pio; diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 03f6e74c1906..0521b4662748 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -1125,7 +1125,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, mmc->caps2 |= pdata->capabilities2; mmc->max_segs = 32; mmc->max_blk_size = 512; - mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * + mmc->max_blk_count = (PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b2752fe711f2..807c06e203c3 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1789,7 +1789,7 @@ static int usdhi6_probe(struct platform_device *pdev) /* Set .max_segs to some random number. Feel free to adjust. */ mmc->max_segs = 32; mmc->max_blk_size = 512; - mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; + mmc->max_req_size = PAGE_SIZE * mmc->max_segs; mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; /* * Setting .max_seg_size to 1 page would simplify our page-mapping code, diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c index e2c0057737e6..7c887f111a7d 100644 --- a/drivers/mtd/devices/block2mtd.c +++ b/drivers/mtd/devices/block2mtd.c @@ -75,7 +75,7 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len) break; } - page_cache_release(page); + put_page(page); pages--; index++; } @@ -124,7 +124,7 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len, return PTR_ERR(page); memcpy(buf, page_address(page) + offset, cpylen); - page_cache_release(page); + put_page(page); if (retlen) *retlen += cpylen; @@ -164,7 +164,7 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf, unlock_page(page); balance_dirty_pages_ratelimited(mapping); } - page_cache_release(page); + put_page(page); if (retlen) *retlen += cpylen; diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index 1fd519503bb1..a58169a28741 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -1339,7 +1339,7 @@ static void put_pages(struct nandsim *ns) int i; for (i = 0; i < ns->held_cnt; i++) - page_cache_release(ns->held_pages[i]); + put_page(ns->held_pages[i]); } /* Get page cache pages in advance to provide NOFS memory allocation */ @@ -1349,8 +1349,8 @@ static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t struct page *page; struct address_space *mapping = file->f_mapping; - start_index = pos >> PAGE_CACHE_SHIFT; - end_index = (pos + count - 1) >> PAGE_CACHE_SHIFT; + start_index = pos >> PAGE_SHIFT; + end_index = (pos + count - 1) >> PAGE_SHIFT; if (end_index - start_index + 1 > NS_MAX_HELD_PAGES) return -EINVAL; ns->held_cnt = 0; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 06bc2d2e7a73..a2cdfc1261dc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -166,6 +166,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */ + CH_PCI_ID_TABLE_FENTRY(0x509c), /* Custom T520-CR*/ /* T6 adapters: */ diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 3fc7bde699ba..ae90d4f12b70 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3106,7 +3106,7 @@ static int e1000_maybe_stop_tx(struct net_device *netdev, return __e1000_maybe_stop_tx(netdev, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) +#define TXD_USE_COUNT(S, X) (((S) + ((1 << (X)) - 1)) >> (X)) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -3256,12 +3256,29 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, nr_frags, mss); if (count) { + /* The descriptors needed is higher than other Intel drivers + * due to a number of workarounds. The breakdown is below: + * Data descriptors: MAX_SKB_FRAGS + 1 + * Context Descriptor: 1 + * Keep head from touching tail: 2 + * Workarounds: 3 + */ + int desc_needed = MAX_SKB_FRAGS + 7; + netdev_sent_queue(netdev, skb->len); skb_tx_timestamp(skb); e1000_tx_queue(adapter, tx_ring, tx_flags, count); + + /* 82544 potentially requires twice as many data descriptors + * in order to guarantee buffers don't end on evenly-aligned + * dwords + */ + if (adapter->pcix_82544) + desc_needed += MAX_SKB_FRAGS + 1; + /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + e1000_maybe_stop_tx(netdev, tx_ring, desc_needed); if (!skb->xmit_more || netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 67006431726a..344912957cab 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8559,6 +8559,7 @@ static int i40e_sw_init(struct i40e_pf *pf) I40E_FLAG_OUTER_UDP_CSUM_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | + I40E_FLAG_NO_PCI_LINK_CHECK | I40E_FLAG_100M_SGMII_CAPABLE | I40E_FLAG_USE_SET_LLDP_MIB | I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 78464fa7fe1f..fcbd4be562e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -288,10 +288,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) (priv->pcs == STMMAC_PCS_RTBI)) goto out; - /* Never init EEE in case of a switch is attached */ - if (priv->phydev->is_pseudo_fixed_link) - goto out; - /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { int tx_lpi_timer = priv->tx_lpi_timer; @@ -771,10 +767,16 @@ static void stmmac_adjust_link(struct net_device *dev) spin_unlock_irqrestore(&priv->lock, flags); - /* At this stage, it could be needed to setup the EEE or adjust some - * MAC related HW registers. - */ - priv->eee_enabled = stmmac_eee_init(priv); + if (phydev->is_pseudo_fixed_link) + /* Stop PHY layer to call the hook to adjust the link in case + * of a switch is attached to the stmmac driver. + */ + phydev->irq = PHY_IGNORE_INTERRUPT; + else + /* At this stage, init the EEE if supported. + * Never called in case of fixed_link. + */ + priv->eee_enabled = stmmac_eee_init(priv); } /** @@ -865,10 +867,6 @@ static int stmmac_init_phy(struct net_device *dev) return -ENODEV; } - /* If attached to a switch, there is no reason to poll phy handler */ - if (phydev->is_pseudo_fixed_link) - phydev->irq = PHY_IGNORE_INTERRUPT; - pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 510e90a6bb26..2c9e45f50edb 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1015,7 +1015,6 @@ static void tun_net_init(struct net_device *dev) /* Zero header length */ dev->type = ARPHRD_NONE; dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; - dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ break; case IFF_TAP: @@ -1027,7 +1026,6 @@ static void tun_net_init(struct net_device *dev) eth_hw_addr_random(dev); - dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ break; } } @@ -1481,6 +1479,8 @@ static void tun_setup(struct net_device *dev) dev->ethtool_ops = &tun_ethtool_ops; dev->destructor = tun_free_netdev; + /* We prefer our own queue length */ + dev->tx_queue_len = TUN_READQ_SIZE; } /* Trivial set of netlink ops to allow deleting tun or tap diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index c32cbb593600..f068b6513cd2 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1204,7 +1204,7 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, { struct btt *btt = bdev->bd_disk->private_data; - btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector); + btt_do_bvec(btt, NULL, page, PAGE_SIZE, 0, rw, sector); page_endio(page, rw & WRITE, 0); return 0; } diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index fc82743aefb6..19f822d7f652 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -407,7 +407,7 @@ static const struct nd_cmd_desc __nd_cmd_dimm_descs[] = { [ND_CMD_IMPLEMENTED] = { }, [ND_CMD_SMART] = { .out_num = 2, - .out_sizes = { 4, 8, }, + .out_sizes = { 4, 128, }, }, [ND_CMD_SMART_THRESHOLD] = { .out_num = 2, diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 79646d0c3277..182a93fe3712 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -417,8 +417,8 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) set_badblock(bb, start_sector, num_sectors); } -static void namespace_add_poison(struct list_head *poison_list, - struct badblocks *bb, struct resource *res) +static void badblocks_populate(struct list_head *poison_list, + struct badblocks *bb, const struct resource *res) { struct nd_poison *pl; @@ -460,36 +460,35 @@ static void namespace_add_poison(struct list_head *poison_list, } /** - * nvdimm_namespace_add_poison() - Convert a list of poison ranges to badblocks - * @ndns: the namespace containing poison ranges - * @bb: badblocks instance to populate - * @offset: offset at the start of the namespace before 'sector 0' + * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks + * @region: parent region of the range to interrogate + * @bb: badblocks instance to populate + * @res: resource range to consider * - * The poison list generated during NFIT initialization may contain multiple, - * possibly overlapping ranges in the SPA (System Physical Address) space. - * Compare each of these ranges to the namespace currently being initialized, - * and add badblocks to the gendisk for all matching sub-ranges + * The poison list generated during bus initialization may contain + * multiple, possibly overlapping physical address ranges. Compare each + * of these ranges to the resource range currently being initialized, + * and add badblocks entries for all matching sub-ranges */ -void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, - struct badblocks *bb, resource_size_t offset) +void nvdimm_badblocks_populate(struct nd_region *nd_region, + struct badblocks *bb, const struct resource *res) { - struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); - struct nd_region *nd_region = to_nd_region(ndns->dev.parent); struct nvdimm_bus *nvdimm_bus; struct list_head *poison_list; - struct resource res = { - .start = nsio->res.start + offset, - .end = nsio->res.end, - }; - nvdimm_bus = to_nvdimm_bus(nd_region->dev.parent); + if (!is_nd_pmem(&nd_region->dev)) { + dev_WARN_ONCE(&nd_region->dev, 1, + "%s only valid for pmem regions\n", __func__); + return; + } + nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); poison_list = &nvdimm_bus->poison_list; nvdimm_bus_lock(&nvdimm_bus->dev); - namespace_add_poison(poison_list, bb, &res); + badblocks_populate(poison_list, bb, res); nvdimm_bus_unlock(&nvdimm_bus->dev); } -EXPORT_SYMBOL_GPL(nvdimm_namespace_add_poison); +EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate); static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length) { diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index 1799bd97a9ce..875c524fafb0 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -266,8 +266,8 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns); int nvdimm_namespace_detach_btt(struct nd_namespace_common *ndns); const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns, char *name); -void nvdimm_namespace_add_poison(struct nd_namespace_common *ndns, - struct badblocks *bb, resource_size_t offset); +void nvdimm_badblocks_populate(struct nd_region *nd_region, + struct badblocks *bb, const struct resource *res); int nd_blk_region_init(struct nd_region *nd_region); void __nd_iostat_start(struct bio *bio, unsigned long *start); static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 254d3bc13f70..e071e214feba 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -376,7 +376,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) } else { /* from init we validate */ if (memcmp(nd_pfn->uuid, pfn_sb->uuid, 16) != 0) - return -EINVAL; + return -ENODEV; } if (nd_pfn->align > nvdimm_namespace_capacity(ndns)) { diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index cc31c6f1f88e..8e09c544d892 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -151,7 +151,7 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, struct pmem_device *pmem = bdev->bd_disk->private_data; int rc; - rc = pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector); + rc = pmem_do_bvec(pmem, page, PAGE_SIZE, 0, rw, sector); if (rw & WRITE) wmb_pmem(); @@ -244,7 +244,9 @@ static void pmem_detach_disk(struct pmem_device *pmem) static int pmem_attach_disk(struct device *dev, struct nd_namespace_common *ndns, struct pmem_device *pmem) { + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); int nid = dev_to_node(dev); + struct resource bb_res; struct gendisk *disk; blk_queue_make_request(pmem->pmem_queue, pmem_make_request); @@ -271,8 +273,17 @@ static int pmem_attach_disk(struct device *dev, devm_exit_badblocks(dev, &pmem->bb); if (devm_init_badblocks(dev, &pmem->bb)) return -ENOMEM; - nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); - + bb_res.start = nsio->res.start + pmem->data_offset; + bb_res.end = nsio->res.end; + if (is_nd_pfn(dev)) { + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + + bb_res.start += __le32_to_cpu(pfn_sb->start_pad); + bb_res.end -= __le32_to_cpu(pfn_sb->end_trunc); + } + nvdimm_badblocks_populate(to_nd_region(dev->parent), &pmem->bb, + &bb_res); disk->bb = &pmem->bb; add_disk(disk); revalidate_disk(disk); @@ -553,7 +564,7 @@ static int nd_pmem_probe(struct device *dev) ndns->rw_bytes = pmem_rw_bytes; if (devm_init_badblocks(dev, &pmem->bb)) return -ENOMEM; - nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); + nvdimm_badblocks_populate(nd_region, &pmem->bb, &nsio->res); if (is_nd_btt(dev)) { /* btt allocates its own request_queue */ @@ -595,14 +606,25 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) { struct pmem_device *pmem = dev_get_drvdata(dev); struct nd_namespace_common *ndns = pmem->ndns; + struct nd_region *nd_region = to_nd_region(dev->parent); + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct resource res = { + .start = nsio->res.start + pmem->data_offset, + .end = nsio->res.end, + }; if (event != NVDIMM_REVALIDATE_POISON) return; - if (is_nd_btt(dev)) - nvdimm_namespace_add_poison(ndns, &pmem->bb, 0); - else - nvdimm_namespace_add_poison(ndns, &pmem->bb, pmem->data_offset); + if (is_nd_pfn(dev)) { + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + + res.start += __le32_to_cpu(pfn_sb->start_pad); + res.end -= __le32_to_cpu(pfn_sb->end_trunc); + } + + nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); } MODULE_ALIAS("pmem"); diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c index b48ac6300c79..a0e5260bd006 100644 --- a/drivers/oprofile/oprofilefs.c +++ b/drivers/oprofile/oprofilefs.c @@ -239,8 +239,8 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *root_inode; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = OPROFILEFS_MAGIC; sb->s_op = &s_ops; sb->s_time_gran = 1; diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 4c2fa05b4589..944674ee3464 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -56,6 +56,7 @@ struct db1x_pcmcia_sock { int stschg_irq; /* card-status-change irq */ int card_irq; /* card irq */ int eject_irq; /* db1200/pb1200 have these */ + int insert_gpio; /* db1000 carddetect gpio */ #define BOARD_TYPE_DEFAULT 0 /* most boards */ #define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */ @@ -83,7 +84,7 @@ static int db1200_card_inserted(struct db1x_pcmcia_sock *sock) /* carddetect gpio: low-active */ static int db1000_card_inserted(struct db1x_pcmcia_sock *sock) { - return !gpio_get_value(irq_to_gpio(sock->insert_irq)); + return !gpio_get_value(sock->insert_gpio); } static int db1x_card_inserted(struct db1x_pcmcia_sock *sock) @@ -457,9 +458,15 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev) r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "card"); sock->card_irq = r ? r->start : 0; - /* insert: irq which triggers on card insertion/ejection */ + /* insert: irq which triggers on card insertion/ejection + * BIG FAT NOTE: on DB1000/1100/1500/1550 we pass a GPIO here! + */ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "insert"); sock->insert_irq = r ? r->start : -1; + if (sock->board_type == BOARD_TYPE_DEFAULT) { + sock->insert_gpio = r ? r->start : -1; + sock->insert_irq = r ? gpio_to_irq(r->start) : -1; + } /* stschg: irq which trigger on card status change (optional) */ r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "stschg"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 46210512d8ec..9cfa544072b5 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -762,19 +762,18 @@ int imx_pinctrl_probe(struct platform_device *pdev, if (of_property_read_bool(dev_np, "fsl,input-sel")) { np = of_parse_phandle(dev_np, "fsl,input-sel", 0); - if (np) { - ipctl->input_sel_base = of_iomap(np, 0); - if (IS_ERR(ipctl->input_sel_base)) { - of_node_put(np); - dev_err(&pdev->dev, - "iomuxc input select base address not found\n"); - return PTR_ERR(ipctl->input_sel_base); - } - } else { + if (!np) { dev_err(&pdev->dev, "iomuxc fsl,input-sel property not found\n"); return -EINVAL; } + + ipctl->input_sel_base = of_iomap(np, 0); of_node_put(np); + if (!ipctl->input_sel_base) { + dev_err(&pdev->dev, + "iomuxc input select base address not found\n"); + return -ENOMEM; + } } imx_pinctrl_desc.name = dev_name(&pdev->dev); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 85536b467c25..6c2c816f8e5f 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -665,6 +665,35 @@ static void intel_gpio_irq_ack(struct irq_data *d) spin_unlock(&pctrl->lock); } +static void intel_gpio_irq_enable(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct intel_pinctrl *pctrl = gpiochip_get_data(gc); + const struct intel_community *community; + unsigned pin = irqd_to_hwirq(d); + unsigned long flags; + + spin_lock_irqsave(&pctrl->lock, flags); + + community = intel_get_community(pctrl, pin); + if (community) { + unsigned padno = pin_to_padno(community, pin); + unsigned gpp_size = community->gpp_size; + unsigned gpp_offset = padno % gpp_size; + unsigned gpp = padno / gpp_size; + u32 value; + + /* Clear interrupt status first to avoid unexpected interrupt */ + writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4); + + value = readl(community->regs + community->ie_offset + gpp * 4); + value |= BIT(gpp_offset); + writel(value, community->regs + community->ie_offset + gpp * 4); + } + + spin_unlock_irqrestore(&pctrl->lock, flags); +} + static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -741,8 +770,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) value |= PADCFG0_RXINV; } else if (type & IRQ_TYPE_EDGE_RISING) { value |= PADCFG0_RXEVCFG_EDGE << PADCFG0_RXEVCFG_SHIFT; - } else if (type & IRQ_TYPE_LEVEL_LOW) { - value |= PADCFG0_RXINV; + } else if (type & IRQ_TYPE_LEVEL_MASK) { + if (type & IRQ_TYPE_LEVEL_LOW) + value |= PADCFG0_RXINV; } else { value |= PADCFG0_RXEVCFG_DISABLED << PADCFG0_RXEVCFG_SHIFT; } @@ -852,6 +882,7 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) static struct irq_chip intel_gpio_irqchip = { .name = "intel-gpio", + .irq_enable = intel_gpio_irq_enable, .irq_ack = intel_gpio_irq_ack, .irq_mask = intel_gpio_irq_mask, .irq_unmask = intel_gpio_irq_unmask, diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index 352406108fa0..c8969dd49449 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -990,7 +990,7 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, int val; if (pull) - pullidx = data_out ? 1 : 2; + pullidx = data_out ? 2 : 1; seq_printf(s, " gpio-%-3d (%-20.20s) in %s %s", gpio, diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c index 856f736cb1a6..2673cd9d106e 100644 --- a/drivers/pinctrl/pinctrl-pistachio.c +++ b/drivers/pinctrl/pinctrl-pistachio.c @@ -469,27 +469,27 @@ static const char * const pistachio_mips_pll_lock_groups[] = { "mfio83", }; -static const char * const pistachio_sys_pll_lock_groups[] = { +static const char * const pistachio_audio_pll_lock_groups[] = { "mfio84", }; -static const char * const pistachio_wifi_pll_lock_groups[] = { +static const char * const pistachio_rpu_v_pll_lock_groups[] = { "mfio85", }; -static const char * const pistachio_bt_pll_lock_groups[] = { +static const char * const pistachio_rpu_l_pll_lock_groups[] = { "mfio86", }; -static const char * const pistachio_rpu_v_pll_lock_groups[] = { +static const char * const pistachio_sys_pll_lock_groups[] = { "mfio87", }; -static const char * const pistachio_rpu_l_pll_lock_groups[] = { +static const char * const pistachio_wifi_pll_lock_groups[] = { "mfio88", }; -static const char * const pistachio_audio_pll_lock_groups[] = { +static const char * const pistachio_bt_pll_lock_groups[] = { "mfio89", }; @@ -559,12 +559,12 @@ enum pistachio_mux_option { PISTACHIO_FUNCTION_DREQ4, PISTACHIO_FUNCTION_DREQ5, PISTACHIO_FUNCTION_MIPS_PLL_LOCK, + PISTACHIO_FUNCTION_AUDIO_PLL_LOCK, + PISTACHIO_FUNCTION_RPU_V_PLL_LOCK, + PISTACHIO_FUNCTION_RPU_L_PLL_LOCK, PISTACHIO_FUNCTION_SYS_PLL_LOCK, PISTACHIO_FUNCTION_WIFI_PLL_LOCK, PISTACHIO_FUNCTION_BT_PLL_LOCK, - PISTACHIO_FUNCTION_RPU_V_PLL_LOCK, - PISTACHIO_FUNCTION_RPU_L_PLL_LOCK, - PISTACHIO_FUNCTION_AUDIO_PLL_LOCK, PISTACHIO_FUNCTION_DEBUG_RAW_CCA_IND, PISTACHIO_FUNCTION_DEBUG_ED_SEC20_CCA_IND, PISTACHIO_FUNCTION_DEBUG_ED_SEC40_CCA_IND, @@ -620,12 +620,12 @@ static const struct pistachio_function pistachio_functions[] = { FUNCTION(dreq4), FUNCTION(dreq5), FUNCTION(mips_pll_lock), + FUNCTION(audio_pll_lock), + FUNCTION(rpu_v_pll_lock), + FUNCTION(rpu_l_pll_lock), FUNCTION(sys_pll_lock), FUNCTION(wifi_pll_lock), FUNCTION(bt_pll_lock), - FUNCTION(rpu_v_pll_lock), - FUNCTION(rpu_l_pll_lock), - FUNCTION(audio_pll_lock), FUNCTION(debug_raw_cca_ind), FUNCTION(debug_ed_sec20_cca_ind), FUNCTION(debug_ed_sec40_cca_ind), diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index 412c6b78140a..a13f2b6f6fc0 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -1573,6 +1573,22 @@ static int xway_gpio_dir_out(struct gpio_chip *chip, unsigned int pin, int val) return 0; } +/* + * gpiolib gpiod_to_irq callback function. + * Returns the mapped IRQ (external interrupt) number for a given GPIO pin. + */ +static int xway_gpio_to_irq(struct gpio_chip *chip, unsigned offset) +{ + struct ltq_pinmux_info *info = dev_get_drvdata(chip->parent); + int i; + + for (i = 0; i < info->num_exin; i++) + if (info->exin[i] == offset) + return ltq_eiu_get_irq(i); + + return -1; +} + static struct gpio_chip xway_chip = { .label = "gpio-xway", .direction_input = xway_gpio_dir_in, @@ -1581,6 +1597,7 @@ static struct gpio_chip xway_chip = { .set = xway_gpio_set, .request = gpiochip_generic_request, .free = gpiochip_generic_free, + .to_irq = xway_gpio_to_irq, .base = -1, }; diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c index b5d81ced6ce6..b68ae424cee2 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c @@ -237,7 +237,7 @@ DECLARE_QCA_GPIO_PINS(99); .pins = gpio##id##_pins, \ .npins = (unsigned)ARRAY_SIZE(gpio##id##_pins), \ .funcs = (int[]){ \ - qca_mux_NA, /* gpio mode */ \ + qca_mux_gpio, /* gpio mode */ \ qca_mux_##f1, \ qca_mux_##f2, \ qca_mux_##f3, \ @@ -254,11 +254,11 @@ DECLARE_QCA_GPIO_PINS(99); qca_mux_##f14 \ }, \ .nfuncs = 15, \ - .ctl_reg = 0x1000 + 0x10 * id, \ - .io_reg = 0x1004 + 0x10 * id, \ - .intr_cfg_reg = 0x1008 + 0x10 * id, \ - .intr_status_reg = 0x100c + 0x10 * id, \ - .intr_target_reg = 0x400 + 0x4 * id, \ + .ctl_reg = 0x0 + 0x1000 * id, \ + .io_reg = 0x4 + 0x1000 * id, \ + .intr_cfg_reg = 0x8 + 0x1000 * id, \ + .intr_status_reg = 0xc + 0x1000 * id, \ + .intr_target_reg = 0x8 + 0x1000 * id, \ .mux_bit = 2, \ .pull_bit = 0, \ .drv_bit = 6, \ @@ -414,7 +414,7 @@ static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { .nfunctions = ARRAY_SIZE(ipq4019_functions), .groups = ipq4019_groups, .ngroups = ARRAY_SIZE(ipq4019_groups), - .ngpios = 70, + .ngpios = 100, }; static int ipq4019_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index dc3609f0c60b..ee0c1f2567d9 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -546,7 +546,9 @@ static int sh_pfc_probe(struct platform_device *pdev) return ret; } - pinctrl_provide_dummies(); + /* Enable dummy states for those platforms without pinctrl support */ + if (!of_have_populated_dt()) + pinctrl_provide_dummies(); ret = sh_pfc_init_ranges(pfc); if (ret < 0) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c index 00265f0435a7..8b381d69df86 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a33.c @@ -485,6 +485,7 @@ static const struct sunxi_pinctrl_desc sun8i_a33_pinctrl_data = { .pins = sun8i_a33_pins, .npins = ARRAY_SIZE(sun8i_a33_pins), .irq_banks = 2, + .irq_bank_base = 1, }; static int sun8i_a33_pinctrl_probe(struct platform_device *pdev) diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 12a1dfabb1af..3b017dbd289c 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c @@ -579,7 +579,7 @@ static void sunxi_pinctrl_irq_release_resources(struct irq_data *d) static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); - u32 reg = sunxi_irq_cfg_reg(d->hwirq); + u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base); u8 index = sunxi_irq_cfg_offset(d->hwirq); unsigned long flags; u32 regval; @@ -626,7 +626,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type) static void sunxi_pinctrl_irq_ack(struct irq_data *d) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); - u32 status_reg = sunxi_irq_status_reg(d->hwirq); + u32 status_reg = sunxi_irq_status_reg(d->hwirq, + pctl->desc->irq_bank_base); u8 status_idx = sunxi_irq_status_offset(d->hwirq); /* Clear the IRQ */ @@ -636,7 +637,7 @@ static void sunxi_pinctrl_irq_ack(struct irq_data *d) static void sunxi_pinctrl_irq_mask(struct irq_data *d) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); - u32 reg = sunxi_irq_ctrl_reg(d->hwirq); + u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); unsigned long flags; u32 val; @@ -653,7 +654,7 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d) static void sunxi_pinctrl_irq_unmask(struct irq_data *d) { struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d); - u32 reg = sunxi_irq_ctrl_reg(d->hwirq); + u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base); u8 idx = sunxi_irq_ctrl_offset(d->hwirq); unsigned long flags; u32 val; @@ -745,7 +746,7 @@ static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) if (bank == pctl->desc->irq_banks) return; - reg = sunxi_irq_status_reg_from_bank(bank); + reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base); val = readl(pctl->membase + reg); if (val) { @@ -1024,9 +1025,11 @@ int sunxi_pinctrl_init(struct platform_device *pdev, for (i = 0; i < pctl->desc->irq_banks; i++) { /* Mask and clear all IRQs before registering a handler */ - writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i)); + writel(0, pctl->membase + sunxi_irq_ctrl_reg_from_bank(i, + pctl->desc->irq_bank_base)); writel(0xffffffff, - pctl->membase + sunxi_irq_status_reg_from_bank(i)); + pctl->membase + sunxi_irq_status_reg_from_bank(i, + pctl->desc->irq_bank_base)); irq_set_chained_handler_and_data(pctl->irq[i], sunxi_pinctrl_irq_handler, diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index e248e81a0f9e..0afce1ab12d0 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h @@ -97,6 +97,7 @@ struct sunxi_pinctrl_desc { int npins; unsigned pin_base; unsigned irq_banks; + unsigned irq_bank_base; bool irq_read_needs_mux; }; @@ -233,12 +234,12 @@ static inline u32 sunxi_pull_offset(u16 pin) return pin_num * PULL_PINS_BITS; } -static inline u32 sunxi_irq_cfg_reg(u16 irq) +static inline u32 sunxi_irq_cfg_reg(u16 irq, unsigned bank_base) { u8 bank = irq / IRQ_PER_BANK; u8 reg = (irq % IRQ_PER_BANK) / IRQ_CFG_IRQ_PER_REG * 0x04; - return IRQ_CFG_REG + bank * IRQ_MEM_SIZE + reg; + return IRQ_CFG_REG + (bank_base + bank) * IRQ_MEM_SIZE + reg; } static inline u32 sunxi_irq_cfg_offset(u16 irq) @@ -247,16 +248,16 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq) return irq_num * IRQ_CFG_IRQ_BITS; } -static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank) +static inline u32 sunxi_irq_ctrl_reg_from_bank(u8 bank, unsigned bank_base) { - return IRQ_CTRL_REG + bank * IRQ_MEM_SIZE; + return IRQ_CTRL_REG + (bank_base + bank) * IRQ_MEM_SIZE; } -static inline u32 sunxi_irq_ctrl_reg(u16 irq) +static inline u32 sunxi_irq_ctrl_reg(u16 irq, unsigned bank_base) { u8 bank = irq / IRQ_PER_BANK; - return sunxi_irq_ctrl_reg_from_bank(bank); + return sunxi_irq_ctrl_reg_from_bank(bank, bank_base); } static inline u32 sunxi_irq_ctrl_offset(u16 irq) @@ -265,16 +266,16 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq) return irq_num * IRQ_CTRL_IRQ_BITS; } -static inline u32 sunxi_irq_status_reg_from_bank(u8 bank) +static inline u32 sunxi_irq_status_reg_from_bank(u8 bank, unsigned bank_base) { - return IRQ_STATUS_REG + bank * IRQ_MEM_SIZE; + return IRQ_STATUS_REG + (bank_base + bank) * IRQ_MEM_SIZE; } -static inline u32 sunxi_irq_status_reg(u16 irq) +static inline u32 sunxi_irq_status_reg(u16 irq, unsigned bank_base) { u8 bank = irq / IRQ_PER_BANK; - return sunxi_irq_status_reg_from_bank(bank); + return sunxi_irq_status_reg_from_bank(bank, bank_base); } static inline u32 sunxi_irq_status_offset(u16 irq) diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index cdfd01f0adb8..8fad0a7044d3 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -1091,6 +1091,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = { RAPL_CPU(0x3f, rapl_defaults_hsw_server),/* Haswell servers */ RAPL_CPU(0x4f, rapl_defaults_hsw_server),/* Broadwell servers */ RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */ + RAPL_CPU(0x46, rapl_defaults_core),/* Haswell */ RAPL_CPU(0x47, rapl_defaults_core),/* Broadwell-H */ RAPL_CPU(0x4E, rapl_defaults_core),/* Skylake */ RAPL_CPU(0x4C, rapl_defaults_cht),/* Braswell/Cherryview */ diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 21a67ed047e8..ff6caab8cc8b 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -452,10 +452,11 @@ static int aac_slave_configure(struct scsi_device *sdev) else if (depth < 2) depth = 2; scsi_change_queue_depth(sdev, depth); - } else + } else { scsi_change_queue_depth(sdev, 1); sdev->tagged_supported = 1; + } return 0; } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 35968bdb4866..8fb9643fe6e3 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -289,7 +289,7 @@ static void context_reset(struct afu_cmd *cmd) atomic64_set(&afu->room, room); if (room) goto write_rrin; - udelay(nretry); + udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); pr_err("%s: no cmd_room to send reset\n", __func__); @@ -303,7 +303,7 @@ write_rrin: if (rrin != 0x1) break; /* Double delay each time */ - udelay(2 << nretry); + udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); } @@ -338,7 +338,7 @@ retry: atomic64_set(&afu->room, room); if (room) goto write_ioarrin; - udelay(nretry); + udelay(1 << nretry); } while (nretry++ < MC_ROOM_RETRY_CNT); dev_err(dev, "%s: no cmd_room to send 0x%X\n", @@ -352,7 +352,7 @@ retry: * afu->room. */ if (nretry++ < MC_ROOM_RETRY_CNT) { - udelay(nretry); + udelay(1 << nretry); goto retry; } @@ -683,28 +683,23 @@ static void stop_afu(struct cxlflash_cfg *cfg) } /** - * term_mc() - terminates the master context + * term_intr() - disables all AFU interrupts * @cfg: Internal structure associated with the host. * @level: Depth of allocation, where to begin waterfall tear down. * * Safe to call with AFU/MC in partially allocated/initialized state. */ -static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) +static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level) { - int rc = 0; struct afu *afu = cfg->afu; struct device *dev = &cfg->dev->dev; if (!afu || !cfg->mcctx) { - dev_err(dev, "%s: returning from term_mc with NULL afu or MC\n", - __func__); + dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); return; } switch (level) { - case UNDO_START: - rc = cxl_stop_context(cfg->mcctx); - BUG_ON(rc); case UNMAP_THREE: cxl_unmap_afu_irq(cfg->mcctx, 3, afu); case UNMAP_TWO: @@ -713,9 +708,34 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) cxl_unmap_afu_irq(cfg->mcctx, 1, afu); case FREE_IRQ: cxl_free_afu_irqs(cfg->mcctx); - case RELEASE_CONTEXT: - cfg->mcctx = NULL; + /* fall through */ + case UNDO_NOOP: + /* No action required */ + break; + } +} + +/** + * term_mc() - terminates the master context + * @cfg: Internal structure associated with the host. + * @level: Depth of allocation, where to begin waterfall tear down. + * + * Safe to call with AFU/MC in partially allocated/initialized state. + */ +static void term_mc(struct cxlflash_cfg *cfg) +{ + int rc = 0; + struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; + + if (!afu || !cfg->mcctx) { + dev_err(dev, "%s: returning with NULL afu or MC\n", __func__); + return; } + + rc = cxl_stop_context(cfg->mcctx); + WARN_ON(rc); + cfg->mcctx = NULL; } /** @@ -726,10 +746,20 @@ static void term_mc(struct cxlflash_cfg *cfg, enum undo_level level) */ static void term_afu(struct cxlflash_cfg *cfg) { + /* + * Tear down is carefully orchestrated to ensure + * no interrupts can come in when the problem state + * area is unmapped. + * + * 1) Disable all AFU interrupts + * 2) Unmap the problem state area + * 3) Stop the master context + */ + term_intr(cfg, UNMAP_THREE); if (cfg->afu) stop_afu(cfg); - term_mc(cfg, UNDO_START); + term_mc(cfg); pr_debug("%s: returning\n", __func__); } @@ -1597,41 +1627,24 @@ static int start_afu(struct cxlflash_cfg *cfg) } /** - * init_mc() - create and register as the master context + * init_intr() - setup interrupt handlers for the master context * @cfg: Internal structure associated with the host. * * Return: 0 on success, -errno on failure */ -static int init_mc(struct cxlflash_cfg *cfg) +static enum undo_level init_intr(struct cxlflash_cfg *cfg, + struct cxl_context *ctx) { - struct cxl_context *ctx; - struct device *dev = &cfg->dev->dev; struct afu *afu = cfg->afu; + struct device *dev = &cfg->dev->dev; int rc = 0; - enum undo_level level; - - ctx = cxl_get_context(cfg->dev); - if (unlikely(!ctx)) - return -ENOMEM; - cfg->mcctx = ctx; - - /* Set it up as a master with the CXL */ - cxl_set_master(ctx); - - /* During initialization reset the AFU to start from a clean slate */ - rc = cxl_afu_reset(cfg->mcctx); - if (unlikely(rc)) { - dev_err(dev, "%s: initial AFU reset failed rc=%d\n", - __func__, rc); - level = RELEASE_CONTEXT; - goto out; - } + enum undo_level level = UNDO_NOOP; rc = cxl_allocate_afu_irqs(ctx, 3); if (unlikely(rc)) { dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n", __func__, rc); - level = RELEASE_CONTEXT; + level = UNDO_NOOP; goto out; } @@ -1661,8 +1674,47 @@ static int init_mc(struct cxlflash_cfg *cfg) level = UNMAP_TWO; goto out; } +out: + return level; +} - rc = 0; +/** + * init_mc() - create and register as the master context + * @cfg: Internal structure associated with the host. + * + * Return: 0 on success, -errno on failure + */ +static int init_mc(struct cxlflash_cfg *cfg) +{ + struct cxl_context *ctx; + struct device *dev = &cfg->dev->dev; + int rc = 0; + enum undo_level level; + + ctx = cxl_get_context(cfg->dev); + if (unlikely(!ctx)) { + rc = -ENOMEM; + goto ret; + } + cfg->mcctx = ctx; + + /* Set it up as a master with the CXL */ + cxl_set_master(ctx); + + /* During initialization reset the AFU to start from a clean slate */ + rc = cxl_afu_reset(cfg->mcctx); + if (unlikely(rc)) { + dev_err(dev, "%s: initial AFU reset failed rc=%d\n", + __func__, rc); + goto ret; + } + + level = init_intr(cfg, ctx); + if (unlikely(level)) { + dev_err(dev, "%s: setting up interrupts failed rc=%d\n", + __func__, rc); + goto out; + } /* This performs the equivalent of the CXL_IOCTL_START_WORK. * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process @@ -1678,7 +1730,7 @@ ret: pr_debug("%s: returning rc=%d\n", __func__, rc); return rc; out: - term_mc(cfg, level); + term_intr(cfg, level); goto ret; } @@ -1751,7 +1803,8 @@ out: err2: kref_put(&afu->mapcount, afu_unmap); err1: - term_mc(cfg, UNDO_START); + term_intr(cfg, UNMAP_THREE); + term_mc(cfg); goto out; } @@ -2488,8 +2541,7 @@ static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, if (unlikely(rc)) dev_err(dev, "%s: Failed to mark user contexts!(%d)\n", __func__, rc); - stop_afu(cfg); - term_mc(cfg, UNDO_START); + term_afu(cfg); return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: cfg->state = STATE_FAILTERM; diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h index 0faed422c7f4..eb9d8f730b38 100644 --- a/drivers/scsi/cxlflash/main.h +++ b/drivers/scsi/cxlflash/main.h @@ -79,12 +79,11 @@ #define WWPN_BUF_LEN (WWPN_LEN + 1) enum undo_level { - RELEASE_CONTEXT = 0, + UNDO_NOOP = 0, FREE_IRQ, UNMAP_ONE, UNMAP_TWO, - UNMAP_THREE, - UNDO_START + UNMAP_THREE }; struct dev_dependent_vals { diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index a404a41e871c..8eaed0522aa3 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -1112,9 +1112,9 @@ static void alua_bus_detach(struct scsi_device *sdev) h->sdev = NULL; spin_unlock(&h->pg_lock); if (pg) { - spin_lock(&pg->lock); + spin_lock_irq(&pg->lock); list_del_rcu(&h->node); - spin_unlock(&pg->lock); + spin_unlock_irq(&pg->lock); kref_put(&pg->kref, release_port_group); } sdev->handler_data = NULL; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index e4db5fb3239a..8c44b9c424af 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5030,7 +5030,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, static int _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) { - int r, i; + int r, i, index; unsigned long flags; u32 reply_address; u16 smid; @@ -5039,8 +5039,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next; u8 hide_flag; struct adapter_reply_queue *reply_q; - long reply_post_free; - u32 reply_post_free_sz, index = 0; + Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, __func__)); @@ -5124,27 +5123,27 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) _base_assign_reply_queues(ioc); /* initialize Reply Post Free Queue */ - reply_post_free_sz = ioc->reply_post_queue_depth * - sizeof(Mpi2DefaultReplyDescriptor_t); - reply_post_free = (long)ioc->reply_post[index].reply_post_free; + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + reply_q->reply_post_host_index = 0; - reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) - reply_post_free; for (i = 0; i < ioc->reply_post_queue_depth; i++) reply_q->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); if (!_base_is_controller_msix_enabled(ioc)) goto skip_init_reply_post_free_queue; - /* - * If RDPQ is enabled, switch to the next allocation. - * Otherwise advance within the contiguous region. - */ - if (ioc->rdpq_array_enable) - reply_post_free = (long) - ioc->reply_post[++index].reply_post_free; - else - reply_post_free += reply_post_free_sz; } skip_init_reply_post_free_queue: diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index b1bf42b93fcc..1deb6adc411f 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -784,8 +784,9 @@ void scsi_attach_vpd(struct scsi_device *sdev) int pg83_supported = 0; unsigned char __rcu *vpd_buf, *orig_vpd_buf = NULL; - if (sdev->skip_vpd_pages) + if (!scsi_device_supports_vpd(sdev)) return; + retry_pg0: vpd_buf = kmalloc(vpd_len, GFP_KERNEL); if (!vpd_buf) diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 92ffd2406f97..2b642b145be1 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -81,6 +81,7 @@ const char *scsi_host_state_name(enum scsi_host_state state) return name; } +#ifdef CONFIG_SCSI_DH static const struct { unsigned char value; char *name; @@ -94,7 +95,7 @@ static const struct { { SCSI_ACCESS_STATE_TRANSITIONING, "transitioning" }, }; -const char *scsi_access_state_name(unsigned char state) +static const char *scsi_access_state_name(unsigned char state) { int i; char *name = NULL; @@ -107,6 +108,7 @@ const char *scsi_access_state_name(unsigned char state) } return name; } +#endif static int check_set(unsigned long long *val, char *src) { @@ -226,7 +228,7 @@ show_shost_state(struct device *dev, struct device_attribute *attr, char *buf) } /* DEVICE_ATTR(state) clashes with dev_attr_state for sdev */ -struct device_attribute dev_attr_hstate = +static struct device_attribute dev_attr_hstate = __ATTR(state, S_IRUGO | S_IWUSR, show_shost_state, store_shost_state); static ssize_t @@ -401,7 +403,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = { NULL }; -struct attribute_group scsi_shost_attr_group = { +static struct attribute_group scsi_shost_attr_group = { .attrs = scsi_sysfs_shost_attrs, }; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 5a5457ac9cdb..f52b74cf8d1e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1275,18 +1275,19 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk); struct scsi_device *sdp = sdkp->device; struct Scsi_Host *host = sdp->host; + sector_t capacity = logical_to_sectors(sdp, sdkp->capacity); int diskinfo[4]; /* default to most commonly used values */ - diskinfo[0] = 0x40; /* 1 << 6 */ - diskinfo[1] = 0x20; /* 1 << 5 */ - diskinfo[2] = sdkp->capacity >> 11; - + diskinfo[0] = 0x40; /* 1 << 6 */ + diskinfo[1] = 0x20; /* 1 << 5 */ + diskinfo[2] = capacity >> 11; + /* override with calculated, extended default, or driver values */ if (host->hostt->bios_param) - host->hostt->bios_param(sdp, bdev, sdkp->capacity, diskinfo); + host->hostt->bios_param(sdp, bdev, capacity, diskinfo); else - scsicam_bios_param(bdev, sdkp->capacity, diskinfo); + scsicam_bios_param(bdev, capacity, diskinfo); geo->heads = diskinfo[0]; geo->sectors = diskinfo[1]; @@ -2337,14 +2338,6 @@ got_data: if (sdkp->capacity > 0xffffffff) sdp->use_16_for_rw = 1; - /* Rescale capacity to 512-byte units */ - if (sector_size == 4096) - sdkp->capacity <<= 3; - else if (sector_size == 2048) - sdkp->capacity <<= 2; - else if (sector_size == 1024) - sdkp->capacity <<= 1; - blk_queue_physical_block_size(sdp->request_queue, sdkp->physical_block_size); sdkp->device->sector_size = sector_size; @@ -2795,28 +2788,6 @@ static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer) sdkp->ws10 = 1; } -static int sd_try_extended_inquiry(struct scsi_device *sdp) -{ - /* Attempt VPD inquiry if the device blacklist explicitly calls - * for it. - */ - if (sdp->try_vpd_pages) - return 1; - /* - * Although VPD inquiries can go to SCSI-2 type devices, - * some USB ones crash on receiving them, and the pages - * we currently ask for are for SPC-3 and beyond - */ - if (sdp->scsi_level > SCSI_SPC_2 && !sdp->skip_vpd_pages) - return 1; - return 0; -} - -static inline u32 logical_to_sectors(struct scsi_device *sdev, u32 blocks) -{ - return blocks << (ilog2(sdev->sector_size) - 9); -} - /** * sd_revalidate_disk - called the first time a new disk is seen, * performs disk spin up, read_capacity, etc. @@ -2856,7 +2827,7 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->media_present) { sd_read_capacity(sdkp, buffer); - if (sd_try_extended_inquiry(sdp)) { + if (scsi_device_supports_vpd(sdp)) { sd_read_block_provisioning(sdkp); sd_read_block_limits(sdkp); sd_read_block_characteristics(sdkp); @@ -2891,7 +2862,7 @@ static int sd_revalidate_disk(struct gendisk *disk) if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max && sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && - sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE) + sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) rw_max = q->limits.io_opt = sdkp->opt_xfer_blocks * sdp->sector_size; else @@ -2900,7 +2871,7 @@ static int sd_revalidate_disk(struct gendisk *disk) /* Combine with controller limits */ q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); - set_capacity(disk, sdkp->capacity); + set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); sd_config_write_same(sdkp); kfree(buffer); diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 5f2a84aff29f..654630bb7d0e 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -65,7 +65,7 @@ struct scsi_disk { struct device dev; struct gendisk *disk; atomic_t openers; - sector_t capacity; /* size in 512-byte sectors */ + sector_t capacity; /* size in logical blocks */ u32 max_xfer_blocks; u32 opt_xfer_blocks; u32 max_ws_blocks; @@ -146,6 +146,11 @@ static inline int scsi_medium_access_command(struct scsi_cmnd *scmd) return 0; } +static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blocks) +{ + return blocks << (ilog2(sdev->sector_size) - 9); +} + /* * A DIF-capable target device can be formatted with different * protection schemes. Currently 0 through 3 are defined: diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index 71c5138ddf94..dbf1882cfbac 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -4941,7 +4941,7 @@ static int sgl_map_user_pages(struct st_buffer *STbp, out_unmap: if (res > 0) { for (j=0; j < res; j++) - page_cache_release(pages[j]); + put_page(pages[j]); res = 0; } kfree(pages); @@ -4963,7 +4963,7 @@ static int sgl_unmap_user_pages(struct st_buffer *STbp, /* FIXME: cache flush missing for rw==READ * FIXME: call the correct reference counting function */ - page_cache_release(page); + put_page(page); } kfree(STbp->mapped_pages); STbp->mapped_pages = NULL; diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index e7a19be87c38..50769078e72e 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -211,11 +211,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(master); - unsigned int bpw = transfer->bits_per_word; + unsigned int bpw; if (!master->dma_rx) return false; + if (!transfer) + return false; + + bpw = transfer->bits_per_word; if (!bpw) bpw = spi->bits_per_word; @@ -333,8 +337,9 @@ static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, struct spi_imx_config *config) { - u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0; + u32 ctrl = MX51_ECSPI_CTRL_ENABLE; u32 clk = config->speed_hz, delay, reg; + u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); /* * The hardware seems to have a race condition when changing modes. The @@ -358,13 +363,20 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, if (config->mode & SPI_CPHA) cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); + else + cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs); if (config->mode & SPI_CPOL) { cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); + } else { + cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs); + cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs); } if (config->mode & SPI_CS_HIGH) cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); + else + cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs); if (spi_imx->usedma) ctrl |= MX51_ECSPI_CTRL_SMC; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 0caa3c8bef46..43a02e377b3b 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -423,16 +423,12 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi, if (mcspi_dma->dma_tx) { struct dma_async_tx_descriptor *tx; - struct scatterlist sg; dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); - sg_init_table(&sg, 1); - sg_dma_address(&sg) = xfer->tx_dma; - sg_dma_len(&sg) = xfer->len; - - tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl, + xfer->tx_sg.nents, DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (tx) { tx->callback = omap2_mcspi_tx_callback; tx->callback_param = spi; @@ -478,20 +474,15 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, if (mcspi_dma->dma_rx) { struct dma_async_tx_descriptor *tx; - struct scatterlist sg; dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0) dma_count -= es; - sg_init_table(&sg, 1); - sg_dma_address(&sg) = xfer->rx_dma; - sg_dma_len(&sg) = dma_count; - - tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, - DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | - DMA_CTRL_ACK); + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl, + xfer->rx_sg.nents, DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (tx) { tx->callback = omap2_mcspi_rx_callback; tx->callback_param = spi; @@ -505,8 +496,6 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer, omap2_mcspi_set_dma_req(spi, 1, 1); wait_for_completion(&mcspi_dma->dma_rx_completion); - dma_unmap_single(mcspi->dev, xfer->rx_dma, count, - DMA_FROM_DEVICE); if (mcspi->fifo_depth > 0) return count; @@ -619,8 +608,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) if (tx != NULL) { wait_for_completion(&mcspi_dma->dma_tx_completion); - dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len, - DMA_TO_DEVICE); if (mcspi->fifo_depth > 0) { irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS; @@ -1087,6 +1074,16 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) gpio_free(spi->cs_gpio); } +static bool omap2_mcspi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + if (xfer->len < DMA_MIN_BYTES) + return false; + + return true; +} + static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi, struct spi_device *spi, struct spi_transfer *t) { @@ -1268,32 +1265,6 @@ static int omap2_mcspi_transfer_one(struct spi_master *master, return -EINVAL; } - if (len < DMA_MIN_BYTES) - goto skip_dma_map; - - if (mcspi_dma->dma_tx && tx_buf != NULL) { - t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf, - len, DMA_TO_DEVICE); - if (dma_mapping_error(mcspi->dev, t->tx_dma)) { - dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", - 'T', len); - return -EINVAL; - } - } - if (mcspi_dma->dma_rx && rx_buf != NULL) { - t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(mcspi->dev, t->rx_dma)) { - dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", - 'R', len); - if (tx_buf != NULL) - dma_unmap_single(mcspi->dev, t->tx_dma, - len, DMA_TO_DEVICE); - return -EINVAL; - } - } - -skip_dma_map: return omap2_mcspi_work_one(mcspi, spi, t); } @@ -1377,6 +1348,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev) master->transfer_one = omap2_mcspi_transfer_one; master->set_cs = omap2_mcspi_set_cs; master->cleanup = omap2_mcspi_cleanup; + master->can_dma = omap2_mcspi_can_dma; master->dev.of_node = node; master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index 8f50a4020f6f..6c6c0013ec7a 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -534,7 +534,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs) if (WARN_ON(rs->speed > MAX_SCLK_OUT)) rs->speed = MAX_SCLK_OUT; - /* the minimum divsor is 2 */ + /* the minimum divisor is 2 */ if (rs->max_freq < 2 * rs->speed) { clk_set_rate(rs->spiclk, 2 * rs->speed); rs->max_freq = clk_get_rate(rs->spiclk); @@ -730,23 +730,27 @@ static int rockchip_spi_probe(struct platform_device *pdev) master->transfer_one = rockchip_spi_transfer_one; master->handle_err = rockchip_spi_handle_err; - rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); - if (IS_ERR_OR_NULL(rs->dma_tx.ch)) { + rs->dma_tx.ch = dma_request_chan(rs->dev, "tx"); + if (IS_ERR(rs->dma_tx.ch)) { /* Check tx to see if we need defer probing driver */ if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto err_get_fifo_len; } dev_warn(rs->dev, "Failed to request TX DMA channel\n"); + rs->dma_tx.ch = NULL; } - rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); - if (!rs->dma_rx.ch) { - if (rs->dma_tx.ch) { + rs->dma_rx.ch = dma_request_chan(rs->dev, "rx"); + if (IS_ERR(rs->dma_rx.ch)) { + if (PTR_ERR(rs->dma_rx.ch) == -EPROBE_DEFER) { dma_release_channel(rs->dma_tx.ch); rs->dma_tx.ch = NULL; + ret = -EPROBE_DEFER; + goto err_get_fifo_len; } dev_warn(rs->dev, "Failed to request RX DMA channel\n"); + rs->dma_rx.ch = NULL; } if (rs->dma_tx.ch && rs->dma_rx.ch) { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index de2f2f90d799..0239b45eed92 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1209,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work) struct spi_master *master = container_of(work, struct spi_master, pump_messages); - __spi_pump_messages(master, true, false); + __spi_pump_messages(master, true, master->bus_lock_flag); } static int spi_init_queue(struct spi_master *master) @@ -2853,7 +2853,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message, */ int spi_sync(struct spi_device *spi, struct spi_message *message) { - return __spi_sync(spi, message, 0); + return __spi_sync(spi, message, spi->master->bus_lock_flag); } EXPORT_SYMBOL_GPL(spi_sync); diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index cf84581287b9..5bac28a3944e 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -30,6 +30,8 @@ source "drivers/staging/wlan-ng/Kconfig" source "drivers/staging/comedi/Kconfig" +source "drivers/staging/olpc_dcon/Kconfig" + source "drivers/staging/rtl8192u/Kconfig" source "drivers/staging/rtl8192e/Kconfig" diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index 7d6448d20464..a954242b0f2c 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -4,6 +4,7 @@ obj-y += media/ obj-$(CONFIG_SLICOSS) += slicoss/ obj-$(CONFIG_PRISM2_USB) += wlan-ng/ obj-$(CONFIG_COMEDI) += comedi/ +obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/ obj-$(CONFIG_RTL8192U) += rtl8192u/ obj-$(CONFIG_RTL8192E) += rtl8192e/ obj-$(CONFIG_R8712U) += rtl8712/ diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h index dab486261154..13335437c69c 100644 --- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h +++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h @@ -88,7 +88,7 @@ do { \ } while (0) #ifndef LIBCFS_VMALLOC_SIZE -#define LIBCFS_VMALLOC_SIZE (2 << PAGE_CACHE_SHIFT) /* 2 pages */ +#define LIBCFS_VMALLOC_SIZE (2 << PAGE_SHIFT) /* 2 pages */ #endif #define LIBCFS_ALLOC_PRE(size, mask) \ diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h index 0f2fd79e5ec8..837eb22749c3 100644 --- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h +++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h @@ -57,7 +57,7 @@ #include "../libcfs_cpu.h" #endif -#define CFS_PAGE_MASK (~((__u64)PAGE_CACHE_SIZE-1)) +#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1)) #define page_index(p) ((p)->index) #define memory_pressure_get() (current->flags & PF_MEMALLOC) @@ -67,7 +67,7 @@ #if BITS_PER_LONG == 32 /* limit to lowmem on 32-bit systems */ #define NUM_CACHEPAGES \ - min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4) + min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4) #else #define NUM_CACHEPAGES totalram_pages #endif diff --git a/drivers/staging/lustre/include/linux/lnet/types.h b/drivers/staging/lustre/include/linux/lnet/types.h index 08f193c341c5..1c679cb72785 100644 --- a/drivers/staging/lustre/include/linux/lnet/types.h +++ b/drivers/staging/lustre/include/linux/lnet/types.h @@ -514,7 +514,7 @@ typedef struct { /** * Starting offset of the fragment within the page. Note that the * end of the fragment must not pass the end of the page; i.e., - * kiov_len + kiov_offset <= PAGE_CACHE_SIZE. + * kiov_len + kiov_offset <= PAGE_SIZE. */ unsigned int kiov_offset; } lnet_kiov_t; diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c index 3e1f24e77f64..d4ce06d0aeeb 100644 --- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c +++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c @@ -291,7 +291,7 @@ ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov, for (nob = i = 0; i < niov; i++) { if ((kiov[i].kiov_offset && i > 0) || - (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_CACHE_SIZE && i < niov - 1)) + (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) return NULL; pages[i] = kiov[i].kiov_page; diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c index c90e5102fe06..c3d628bac5b8 100644 --- a/drivers/staging/lustre/lnet/libcfs/debug.c +++ b/drivers/staging/lustre/lnet/libcfs/debug.c @@ -517,7 +517,7 @@ int libcfs_debug_init(unsigned long bufsize) max = TCD_MAX_PAGES; } else { max = max / num_possible_cpus(); - max <<= (20 - PAGE_CACHE_SHIFT); + max <<= (20 - PAGE_SHIFT); } rc = cfs_tracefile_init(max); diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c index ec3bc04bd89f..244eb89eef68 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.c +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c @@ -182,7 +182,7 @@ cfs_trace_get_tage_try(struct cfs_trace_cpu_data *tcd, unsigned long len) if (tcd->tcd_cur_pages > 0) { __LASSERT(!list_empty(&tcd->tcd_pages)); tage = cfs_tage_from_list(tcd->tcd_pages.prev); - if (tage->used + len <= PAGE_CACHE_SIZE) + if (tage->used + len <= PAGE_SIZE) return tage; } @@ -260,7 +260,7 @@ static struct cfs_trace_page *cfs_trace_get_tage(struct cfs_trace_cpu_data *tcd, * from here: this will lead to infinite recursion. */ - if (len > PAGE_CACHE_SIZE) { + if (len > PAGE_SIZE) { pr_err("cowardly refusing to write %lu bytes in a page\n", len); return NULL; } @@ -349,7 +349,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, for (i = 0; i < 2; i++) { tage = cfs_trace_get_tage(tcd, needed + known_size + 1); if (!tage) { - if (needed + known_size > PAGE_CACHE_SIZE) + if (needed + known_size > PAGE_SIZE) mask |= D_ERROR; cfs_trace_put_tcd(tcd); @@ -360,7 +360,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, string_buf = (char *)page_address(tage->page) + tage->used + known_size; - max_nob = PAGE_CACHE_SIZE - tage->used - known_size; + max_nob = PAGE_SIZE - tage->used - known_size; if (max_nob <= 0) { printk(KERN_EMERG "negative max_nob: %d\n", max_nob); @@ -424,7 +424,7 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata, __LASSERT(debug_buf == string_buf); tage->used += needed; - __LASSERT(tage->used <= PAGE_CACHE_SIZE); + __LASSERT(tage->used <= PAGE_SIZE); console: if ((mask & libcfs_printk) == 0) { @@ -835,7 +835,7 @@ EXPORT_SYMBOL(cfs_trace_copyout_string); int cfs_trace_allocate_string_buffer(char **str, int nob) { - if (nob > 2 * PAGE_CACHE_SIZE) /* string must be "sensible" */ + if (nob > 2 * PAGE_SIZE) /* string must be "sensible" */ return -EINVAL; *str = kmalloc(nob, GFP_KERNEL | __GFP_ZERO); @@ -951,7 +951,7 @@ int cfs_trace_set_debug_mb(int mb) } mb /= num_possible_cpus(); - pages = mb << (20 - PAGE_CACHE_SHIFT); + pages = mb << (20 - PAGE_SHIFT); cfs_tracefile_write_lock(); @@ -977,7 +977,7 @@ int cfs_trace_get_debug_mb(void) cfs_tracefile_read_unlock(); - return (total_pages >> (20 - PAGE_CACHE_SHIFT)) + 1; + return (total_pages >> (20 - PAGE_SHIFT)) + 1; } static int tracefiled(void *arg) diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.h b/drivers/staging/lustre/lnet/libcfs/tracefile.h index 4c77f9044dd3..ac84e7f4c859 100644 --- a/drivers/staging/lustre/lnet/libcfs/tracefile.h +++ b/drivers/staging/lustre/lnet/libcfs/tracefile.h @@ -87,7 +87,7 @@ void libcfs_unregister_panic_notifier(void); extern int libcfs_panic_in_progress; int cfs_trace_max_debug_mb(void); -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) +#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) #define TCD_STOCK_PAGES (TCD_MAX_PAGES) #define CFS_TRACEFILE_SIZE (500 << 20) @@ -96,7 +96,7 @@ int cfs_trace_max_debug_mb(void); /* * Private declare for tracefile */ -#define TCD_MAX_PAGES (5 << (20 - PAGE_CACHE_SHIFT)) +#define TCD_MAX_PAGES (5 << (20 - PAGE_SHIFT)) #define TCD_STOCK_PAGES (TCD_MAX_PAGES) #define CFS_TRACEFILE_SIZE (500 << 20) @@ -257,7 +257,7 @@ do { \ do { \ __LASSERT(tage); \ __LASSERT(tage->page); \ - __LASSERT(tage->used <= PAGE_CACHE_SIZE); \ + __LASSERT(tage->used <= PAGE_SIZE); \ __LASSERT(page_count(tage->page) > 0); \ } while (0) diff --git a/drivers/staging/lustre/lnet/lnet/lib-md.c b/drivers/staging/lustre/lnet/lnet/lib-md.c index c74514f99f90..75d31217bf92 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-md.c +++ b/drivers/staging/lustre/lnet/lnet/lib-md.c @@ -139,7 +139,7 @@ lnet_md_build(lnet_libmd_t *lmd, lnet_md_t *umd, int unlink) for (i = 0; i < (int)niov; i++) { /* We take the page pointer on trust */ if (lmd->md_iov.kiov[i].kiov_offset + - lmd->md_iov.kiov[i].kiov_len > PAGE_CACHE_SIZE) + lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE) return -EINVAL; /* invalid length */ total_length += lmd->md_iov.kiov[i].kiov_len; diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c index 0009a8de77d5..f19aa9320e34 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-move.c +++ b/drivers/staging/lustre/lnet/lnet/lib-move.c @@ -549,12 +549,12 @@ lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst, if (len <= frag_len) { dst->kiov_len = len; LASSERT(dst->kiov_offset + dst->kiov_len - <= PAGE_CACHE_SIZE); + <= PAGE_SIZE); return niov; } dst->kiov_len = frag_len; - LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_CACHE_SIZE); + LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE); len -= frag_len; dst++; @@ -887,7 +887,7 @@ lnet_msg2bufpool(lnet_msg_t *msg) rbp = &the_lnet.ln_rtrpools[cpt][0]; LASSERT(msg->msg_len <= LNET_MTU); - while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_CACHE_SIZE) { + while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) { rbp++; LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]); } diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c index cc0c2753dd63..891fd59401d7 100644 --- a/drivers/staging/lustre/lnet/lnet/lib-socket.c +++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c @@ -166,9 +166,9 @@ lnet_ipif_enumerate(char ***namesp) nalloc = 16; /* first guess at max interfaces */ toobig = 0; for (;;) { - if (nalloc * sizeof(*ifr) > PAGE_CACHE_SIZE) { + if (nalloc * sizeof(*ifr) > PAGE_SIZE) { toobig = 1; - nalloc = PAGE_CACHE_SIZE / sizeof(*ifr); + nalloc = PAGE_SIZE / sizeof(*ifr); CWARN("Too many interfaces: only enumerating first %d\n", nalloc); } diff --git a/drivers/staging/lustre/lnet/lnet/router.c b/drivers/staging/lustre/lnet/lnet/router.c index 61459cf9d58f..b01dc424c514 100644 --- a/drivers/staging/lustre/lnet/lnet/router.c +++ b/drivers/staging/lustre/lnet/lnet/router.c @@ -27,8 +27,8 @@ #define LNET_NRB_SMALL_PAGES 1 #define LNET_NRB_LARGE_MIN 256 /* min value for each CPT */ #define LNET_NRB_LARGE (LNET_NRB_LARGE_MIN * 4) -#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_CACHE_SIZE - 1) >> \ - PAGE_CACHE_SHIFT) +#define LNET_NRB_LARGE_PAGES ((LNET_MTU + PAGE_SIZE - 1) >> \ + PAGE_SHIFT) static char *forwarding = ""; module_param(forwarding, charp, 0444); @@ -1338,7 +1338,7 @@ lnet_new_rtrbuf(lnet_rtrbufpool_t *rbp, int cpt) return NULL; } - rb->rb_kiov[i].kiov_len = PAGE_CACHE_SIZE; + rb->rb_kiov[i].kiov_len = PAGE_SIZE; rb->rb_kiov[i].kiov_offset = 0; rb->rb_kiov[i].kiov_page = page; } diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c index eebc92412061..dcb6e506f592 100644 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c @@ -90,7 +90,7 @@ brw_client_init(sfw_test_instance_t *tsi) * NB: this is not going to work for variable page size, * but we have to keep it for compatibility */ - len = npg * PAGE_CACHE_SIZE; + len = npg * PAGE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; @@ -104,7 +104,7 @@ brw_client_init(sfw_test_instance_t *tsi) opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; } if (npg > LNET_MAX_IOV || npg <= 0) @@ -167,13 +167,13 @@ brw_fill_page(struct page *pg, int pattern, __u64 magic) if (pattern == LST_BRW_CHECK_SIMPLE) { memcpy(addr, &magic, BRW_MSIZE); - addr += PAGE_CACHE_SIZE - BRW_MSIZE; + addr += PAGE_SIZE - BRW_MSIZE; memcpy(addr, &magic, BRW_MSIZE); return; } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) + for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) memcpy(addr + i * BRW_MSIZE, &magic, BRW_MSIZE); return; } @@ -198,7 +198,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) if (data != magic) goto bad_data; - addr += PAGE_CACHE_SIZE - BRW_MSIZE; + addr += PAGE_SIZE - BRW_MSIZE; data = *((__u64 *)addr); if (data != magic) goto bad_data; @@ -207,7 +207,7 @@ brw_check_page(struct page *pg, int pattern, __u64 magic) } if (pattern == LST_BRW_CHECK_FULL) { - for (i = 0; i < PAGE_CACHE_SIZE / BRW_MSIZE; i++) { + for (i = 0; i < PAGE_SIZE / BRW_MSIZE; i++) { data = *(((__u64 *)addr) + i); if (data != magic) goto bad_data; @@ -278,7 +278,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, opc = breq->blk_opc; flags = breq->blk_flags; npg = breq->blk_npg; - len = npg * PAGE_CACHE_SIZE; + len = npg * PAGE_SIZE; } else { test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1; @@ -292,7 +292,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu, opc = breq->blk_opc; flags = breq->blk_flags; len = breq->blk_len; - npg = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; } rc = sfw_create_test_rpc(tsu, dest, sn->sn_features, npg, len, &rpc); @@ -463,10 +463,10 @@ brw_server_handle(struct srpc_server_rpc *rpc) reply->brw_status = EINVAL; return 0; } - npg = reqst->brw_len >> PAGE_CACHE_SHIFT; + npg = reqst->brw_len >> PAGE_SHIFT; } else { - npg = (reqst->brw_len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + npg = (reqst->brw_len + PAGE_SIZE - 1) >> PAGE_SHIFT; } replymsg->msg_ses_feats = reqstmsg->msg_ses_feats; diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c index 5c7cb72eac9a..79ee6c0bf7c1 100644 --- a/drivers/staging/lustre/lnet/selftest/conctl.c +++ b/drivers/staging/lustre/lnet/selftest/conctl.c @@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args) if (args->lstio_tes_param && (args->lstio_tes_param_len <= 0 || args->lstio_tes_param_len > - PAGE_CACHE_SIZE - sizeof(lstcon_test_t))) + PAGE_SIZE - sizeof(lstcon_test_t))) return -EINVAL; LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1); @@ -819,7 +819,7 @@ lstcon_ioctl_entry(unsigned int cmd, struct libcfs_ioctl_hdr *hdr) opc = data->ioc_u32[0]; - if (data->ioc_plen1 > PAGE_CACHE_SIZE) + if (data->ioc_plen1 > PAGE_SIZE) return -EINVAL; LIBCFS_ALLOC(buf, data->ioc_plen1); diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c index bcd78888f9cc..35a227d0c657 100644 --- a/drivers/staging/lustre/lnet/selftest/conrpc.c +++ b/drivers/staging/lustre/lnet/selftest/conrpc.c @@ -786,8 +786,8 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req) test_bulk_req_t *brq = &req->tsr_u.bulk_v0; brq->blk_opc = param->blk_opc; - brq->blk_npg = (param->blk_size + PAGE_CACHE_SIZE - 1) / - PAGE_CACHE_SIZE; + brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) / + PAGE_SIZE; brq->blk_flags = param->blk_flags; return 0; @@ -822,7 +822,7 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, if (transop == LST_TRANS_TSBCLIADD) { npg = sfw_id_pages(test->tes_span); nob = !(feats & LST_FEAT_BULK_LEN) ? - npg * PAGE_CACHE_SIZE : + npg * PAGE_SIZE : sizeof(lnet_process_id_packed_t) * test->tes_span; } @@ -851,8 +851,8 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats, LASSERT(nob > 0); len = !(feats & LST_FEAT_BULK_LEN) ? - PAGE_CACHE_SIZE : - min_t(int, nob, PAGE_CACHE_SIZE); + PAGE_SIZE : + min_t(int, nob, PAGE_SIZE); nob -= len; bulk->bk_iovs[i].kiov_offset = 0; diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c index 926c3970c498..e2c532399366 100644 --- a/drivers/staging/lustre/lnet/selftest/framework.c +++ b/drivers/staging/lustre/lnet/selftest/framework.c @@ -1161,7 +1161,7 @@ sfw_add_test(struct srpc_server_rpc *rpc) int len; if (!(sn->sn_features & LST_FEAT_BULK_LEN)) { - len = npg * PAGE_CACHE_SIZE; + len = npg * PAGE_SIZE; } else { len = sizeof(lnet_process_id_packed_t) * diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c index 69be7d6f48fa..7d7748d96332 100644 --- a/drivers/staging/lustre/lnet/selftest/rpc.c +++ b/drivers/staging/lustre/lnet/selftest/rpc.c @@ -90,7 +90,7 @@ void srpc_set_counters(const srpc_counters_t *cnt) static int srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob) { - nob = min_t(int, nob, PAGE_CACHE_SIZE); + nob = min_t(int, nob, PAGE_SIZE); LASSERT(nob > 0); LASSERT(i >= 0 && i < bk->bk_niov); diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h index 288522d4d7b9..e689ca1846e1 100644 --- a/drivers/staging/lustre/lnet/selftest/selftest.h +++ b/drivers/staging/lustre/lnet/selftest/selftest.h @@ -390,10 +390,10 @@ typedef struct sfw_test_instance { } tsi_u; } sfw_test_instance_t; -/* XXX: trailing (PAGE_CACHE_SIZE % sizeof(lnet_process_id_t)) bytes at - * the end of pages are not used */ +/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of + * pages are not used */ #define SFW_MAX_CONCUR LST_MAX_CONCUR -#define SFW_ID_PER_PAGE (PAGE_CACHE_SIZE / sizeof(lnet_process_id_packed_t)) +#define SFW_ID_PER_PAGE (PAGE_SIZE / sizeof(lnet_process_id_packed_t)) #define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE) #define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE) diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h index 33e0b99e1fb4..c6c7f54637fb 100644 --- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h +++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h @@ -52,7 +52,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) return; if (PagePrivate(page)) - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); cancel_dirty_page(page); ClearPageMappedToDisk(page); diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h index b5088b13a305..242bb1ef6245 100644 --- a/drivers/staging/lustre/lustre/include/lu_object.h +++ b/drivers/staging/lustre/lustre/include/lu_object.h @@ -1118,7 +1118,7 @@ struct lu_context_key { { \ type *value; \ \ - CLASSERT(PAGE_CACHE_SIZE >= sizeof (*value)); \ + CLASSERT(PAGE_SIZE >= sizeof (*value)); \ \ value = kzalloc(sizeof(*value), GFP_NOFS); \ if (!value) \ diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h index da8bc6eadd13..5aae1d06a5fa 100644 --- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h +++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h @@ -1022,16 +1022,16 @@ static inline int lu_dirent_size(struct lu_dirent *ent) * MDS_READPAGE page size * * This is the directory page size packed in MDS_READPAGE RPC. - * It's different than PAGE_CACHE_SIZE because the client needs to + * It's different than PAGE_SIZE because the client needs to * access the struct lu_dirpage header packed at the beginning of * the "page" and without this there isn't any way to know find the - * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ. + * lu_dirpage header is if client and server PAGE_SIZE differ. */ #define LU_PAGE_SHIFT 12 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT) #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1)) -#define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT)) +#define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT)) /** @} lu_dir */ diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h index df94f9f3bef2..af77eb359c43 100644 --- a/drivers/staging/lustre/lustre/include/lustre_mdc.h +++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h @@ -155,12 +155,12 @@ static inline void mdc_update_max_ea_from_body(struct obd_export *exp, if (cli->cl_max_mds_easize < body->max_mdsize) { cli->cl_max_mds_easize = body->max_mdsize; cli->cl_default_mds_easize = - min_t(__u32, body->max_mdsize, PAGE_CACHE_SIZE); + min_t(__u32, body->max_mdsize, PAGE_SIZE); } if (cli->cl_max_mds_cookiesize < body->max_cookiesize) { cli->cl_max_mds_cookiesize = body->max_cookiesize; cli->cl_default_mds_cookiesize = - min_t(__u32, body->max_cookiesize, PAGE_CACHE_SIZE); + min_t(__u32, body->max_cookiesize, PAGE_SIZE); } } } diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h index 4fa1a18b7d15..69586a522eb7 100644 --- a/drivers/staging/lustre/lustre/include/lustre_net.h +++ b/drivers/staging/lustre/lustre/include/lustre_net.h @@ -99,21 +99,21 @@ */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" # endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" +# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_SIZE" # endif # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h index 4a0f2e8b19f6..4264d97650ec 100644 --- a/drivers/staging/lustre/lustre/include/obd.h +++ b/drivers/staging/lustre/lustre/include/obd.h @@ -272,7 +272,7 @@ struct client_obd { int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine - * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) + * the extent size. A chunk is max(PAGE_SIZE, OST block size) */ int cl_chunkbits; int cl_chunk; @@ -1318,7 +1318,7 @@ bad_format: static inline int cli_brw_size(struct obd_device *obd) { - return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT; } #endif /* __OBD_H */ diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h index 225262fa67b6..f8ee3a3254ba 100644 --- a/drivers/staging/lustre/lustre/include/obd_support.h +++ b/drivers/staging/lustre/lustre/include/obd_support.h @@ -500,7 +500,7 @@ extern char obd_jobid_var[]; #ifdef POISON_BULK #define POISON_PAGE(page, val) do { \ - memset(kmap(page), val, PAGE_CACHE_SIZE); \ + memset(kmap(page), val, PAGE_SIZE); \ kunmap(page); \ } while (0) #else diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c index aced41ab93a1..96141d17d07f 100644 --- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c +++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c @@ -758,9 +758,9 @@ int ccc_prep_size(const struct lu_env *env, struct cl_object *obj, * --bug 17336 */ loff_t size = cl_isize_read(inode); - loff_t cur_index = start >> PAGE_CACHE_SHIFT; + loff_t cur_index = start >> PAGE_SHIFT; loff_t size_index = (size - 1) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((size == 0 && cur_index != 0) || size_index < cur_index) diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c index b586d5a88d00..7dd7df59aa1f 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c @@ -307,8 +307,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) cli->cl_avail_grant = 0; /* FIXME: Should limit this for the sum of all cl_dirty_max. */ cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; - if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) - cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); + if (cli->cl_dirty_max >> PAGE_SHIFT > totalram_pages / 8) + cli->cl_dirty_max = totalram_pages << (PAGE_SHIFT - 3); INIT_LIST_HEAD(&cli->cl_cache_waiters); INIT_LIST_HEAD(&cli->cl_loi_ready_list); INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); @@ -353,15 +353,15 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) * In the future this should likely be increased. LU-1431 */ cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES, - LNET_MTU >> PAGE_CACHE_SHIFT); + LNET_MTU >> PAGE_SHIFT); if (!strcmp(name, LUSTRE_MDC_NAME)) { cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) { cli->cl_max_rpcs_in_flight = 2; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 256 /* MB */) { cli->cl_max_rpcs_in_flight = 3; - } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) { + } else if (totalram_pages >> (20 - PAGE_SHIFT) <= 512 /* MB */) { cli->cl_max_rpcs_in_flight = 4; } else { cli->cl_max_rpcs_in_flight = OSC_MAX_RIF_DEFAULT; diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c index 3e937b050203..b913ba9cf97c 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c @@ -107,7 +107,7 @@ /* * 50 ldlm locks for 1MB of RAM. */ -#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_CACHE_SHIFT)) * 50) +#define LDLM_POOL_HOST_L ((NUM_CACHEPAGES >> (20 - PAGE_SHIFT)) * 50) /* * Maximal possible grant step plan in %. diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c index c7904a96f9af..74e193e52cd6 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c @@ -546,7 +546,7 @@ static inline int ldlm_req_handles_avail(int req_size, int off) { int avail; - avail = min_t(int, LDLM_MAXREQSIZE, PAGE_CACHE_SIZE - 512) - req_size; + avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size; if (likely(avail >= 0)) avail /= (int)sizeof(struct lustre_handle); else diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c index 4e0a3e583330..e4c82883e580 100644 --- a/drivers/staging/lustre/lustre/llite/dir.c +++ b/drivers/staging/lustre/lustre/llite/dir.c @@ -134,9 +134,8 @@ * a header lu_dirpage which describes the start/end hash, and whether this * page is empty (contains no dir entry) or hash collide with next page. * After client receives reply, several pages will be integrated into dir page - * in PAGE_CACHE_SIZE (if PAGE_CACHE_SIZE greater than LU_PAGE_SIZE), and the - * lu_dirpage for this integrated page will be adjusted. See - * lmv_adjust_dirpages(). + * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the lu_dirpage + * for this integrated page will be adjusted. See lmv_adjust_dirpages(). * */ @@ -153,7 +152,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) struct page **page_pool; struct page *page; struct lu_dirpage *dp; - int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_CACHE_SHIFT; + int max_pages = ll_i2sbi(inode)->ll_md_brw_size >> PAGE_SHIFT; int nrdpgs = 0; /* number of pages read actually */ int npages; int i; @@ -193,8 +192,8 @@ static int ll_dir_filler(void *_hash, struct page *page0) if (body->valid & OBD_MD_FLSIZE) cl_isize_write(inode, body->size); - nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_CACHE_SIZE-1) - >> PAGE_CACHE_SHIFT; + nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1) + >> PAGE_SHIFT; SetPageUptodate(page0); } unlock_page(page0); @@ -209,7 +208,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) page = page_pool[i]; if (rc < 0 || i >= nrdpgs) { - page_cache_release(page); + put_page(page); continue; } @@ -230,7 +229,7 @@ static int ll_dir_filler(void *_hash, struct page *page0) CDEBUG(D_VFSTRACE, "page %lu add to page cache failed: %d\n", offset, ret); } - page_cache_release(page); + put_page(page); } if (page_pool != &page0) @@ -247,7 +246,7 @@ void ll_release_page(struct page *page, int remove) truncate_complete_page(page->mapping, page); unlock_page(page); } - page_cache_release(page); + put_page(page); } /* @@ -273,7 +272,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, if (found > 0 && !radix_tree_exceptional_entry(page)) { struct lu_dirpage *dp; - page_cache_get(page); + get_page(page); spin_unlock_irq(&mapping->tree_lock); /* * In contrast to find_lock_page() we are sure that directory @@ -313,7 +312,7 @@ static struct page *ll_dir_page_locate(struct inode *dir, __u64 *hash, page = NULL; } } else { - page_cache_release(page); + put_page(page); page = ERR_PTR(-EIO); } @@ -1507,7 +1506,7 @@ skip_lmm: st.st_gid = body->gid; st.st_rdev = body->rdev; st.st_size = body->size; - st.st_blksize = PAGE_CACHE_SIZE; + st.st_blksize = PAGE_SIZE; st.st_blocks = body->blocks; st.st_atime = body->atime; st.st_mtime = body->mtime; diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 3e1572cb457b..e3c0f1dd4d31 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -310,10 +310,10 @@ static inline struct ll_inode_info *ll_i2info(struct inode *inode) /* default to about 40meg of readahead on a given system. That much tied * up in 512k readahead requests serviced at 40ms each is about 1GB/s. */ -#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_CACHE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_MAX (40UL << (20 - PAGE_SHIFT)) /* default to read-ahead full files smaller than 2MB on the second read */ -#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_CACHE_SHIFT)) +#define SBI_DEFAULT_READAHEAD_WHOLE_MAX (2UL << (20 - PAGE_SHIFT)) enum ra_stat { RA_STAT_HIT = 0, @@ -975,13 +975,13 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr, static inline void ll_invalidate_page(struct page *vmpage) { struct address_space *mapping = vmpage->mapping; - loff_t offset = vmpage->index << PAGE_CACHE_SHIFT; + loff_t offset = vmpage->index << PAGE_SHIFT; LASSERT(PageLocked(vmpage)); if (!mapping) return; - ll_teardown_mmaps(mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE); truncate_complete_page(mapping, vmpage); } diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 6d6bb33e3655..b57a992688a8 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c @@ -85,7 +85,7 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) si_meminfo(&si); pages = si.totalram - si.totalhigh; - if (pages >> (20 - PAGE_CACHE_SHIFT) < 512) + if (pages >> (20 - PAGE_SHIFT) < 512) lru_page_max = pages / 2; else lru_page_max = (pages / 4) * 3; @@ -272,12 +272,12 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, valid != CLIENT_CONNECT_MDT_REQD) { char *buf; - buf = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto out_md_fid; } - obd_connect_flags2str(buf, PAGE_CACHE_SIZE, + obd_connect_flags2str(buf, PAGE_SIZE, valid ^ CLIENT_CONNECT_MDT_REQD, ","); LCONSOLE_ERROR_MSG(0x170, "Server %s does not support feature(s) needed for correct operation of this client (%s). Please upgrade server or downgrade client.\n", sbi->ll_md_exp->exp_obd->obd_name, buf); @@ -335,7 +335,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, if (data->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) sbi->ll_md_brw_size = data->ocd_brw_size; else - sbi->ll_md_brw_size = PAGE_CACHE_SIZE; + sbi->ll_md_brw_size = PAGE_SIZE; if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) { LCONSOLE_INFO("Layout lock feature supported.\n"); diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c index 69445a9f2011..5b484e62ffd0 100644 --- a/drivers/staging/lustre/lustre/llite/llite_mmap.c +++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c @@ -58,7 +58,7 @@ void policy_from_vma(ldlm_policy_data_t *policy, size_t count) { policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) + - (vma->vm_pgoff << PAGE_CACHE_SHIFT); + (vma->vm_pgoff << PAGE_SHIFT); policy->l_extent.end = (policy->l_extent.start + count - 1) | ~CFS_PAGE_MASK; } @@ -321,7 +321,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf) vmpage = vio->u.fault.ft_vmpage; if (result != 0 && vmpage) { - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; } } @@ -360,7 +360,7 @@ restart: lock_page(vmpage); if (unlikely(!vmpage->mapping)) { /* unlucky */ unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); vmf->page = NULL; if (!printed && ++count > 16) { @@ -457,7 +457,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last) LASSERTF(last > first, "last %llu first %llu\n", last, first); if (mapping_mapped(mapping)) { rc = 0; - unmap_mapping_range(mapping, first + PAGE_CACHE_SIZE - 1, + unmap_mapping_range(mapping, first + PAGE_SIZE - 1, last - first + 1, 0); } diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c index b725fc16cf49..f169c0db63b4 100644 --- a/drivers/staging/lustre/lustre/llite/lloop.c +++ b/drivers/staging/lustre/lustre/llite/lloop.c @@ -218,7 +218,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset; bio_for_each_segment(bvec, bio, iter) { BUG_ON(bvec.bv_offset != 0); - BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE); + BUG_ON(bvec.bv_len != PAGE_SIZE); pages[page_count] = bvec.bv_page; offsets[page_count] = offset; @@ -232,7 +232,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head) (rw == WRITE) ? LPROC_LL_BRW_WRITE : LPROC_LL_BRW_READ, page_count); - pvec->ldp_size = page_count << PAGE_CACHE_SHIFT; + pvec->ldp_size = page_count << PAGE_SHIFT; pvec->ldp_nr = page_count; /* FIXME: in ll_direct_rw_pages, it has to allocate many cl_page{}s to @@ -507,7 +507,7 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); - lo->lo_blocksize = PAGE_CACHE_SIZE; + lo->lo_blocksize = PAGE_SIZE; lo->lo_device = bdev; lo->lo_flags = lo_flags; lo->lo_backing_file = file; @@ -525,11 +525,11 @@ static int loop_set_fd(struct lloop_device *lo, struct file *unused, lo->lo_queue->queuedata = lo; /* queue parameters */ - CLASSERT(PAGE_CACHE_SIZE < (1 << (sizeof(unsigned short) * 8))); + CLASSERT(PAGE_SIZE < (1 << (sizeof(unsigned short) * 8))); blk_queue_logical_block_size(lo->lo_queue, - (unsigned short)PAGE_CACHE_SIZE); + (unsigned short)PAGE_SIZE); blk_queue_max_hw_sectors(lo->lo_queue, - LLOOP_MAX_SEGMENTS << (PAGE_CACHE_SHIFT - 9)); + LLOOP_MAX_SEGMENTS << (PAGE_SHIFT - 9)); blk_queue_max_segments(lo->lo_queue, LLOOP_MAX_SEGMENTS); set_capacity(disks[lo->lo_number], size); diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c index 45941a6600fe..27ab1261400e 100644 --- a/drivers/staging/lustre/lustre/llite/lproc_llite.c +++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c @@ -233,7 +233,7 @@ static ssize_t max_read_ahead_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -251,12 +251,12 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj, if (rc) return rc; - pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ + pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number > totalram_pages / 2) { CERROR("can't set file readahead more than %lu MB\n", - totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/ + totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/ return -ERANGE; } @@ -281,7 +281,7 @@ static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_pages_per_file; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -326,7 +326,7 @@ static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj, pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages; spin_unlock(&sbi->ll_lock); - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult); } @@ -349,7 +349,7 @@ static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj, */ if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) { CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n", - sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_CACHE_SHIFT)); + sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT)); return -ERANGE; } @@ -366,7 +366,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) struct super_block *sb = m->private; struct ll_sb_info *sbi = ll_s2sbi(sb); struct cl_client_cache *cache = &sbi->ll_cache; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; int max_cached_mb; int unused_mb; @@ -405,7 +405,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, return -EFAULT; kernbuf[count] = 0; - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) - kernbuf; rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); @@ -415,7 +415,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, if (pages_number < 0 || pages_number > totalram_pages) { CERROR("%s: can't set max cache more than %lu MB\n", ll_get_fsname(sb, NULL, 0), - totalram_pages >> (20 - PAGE_CACHE_SHIFT)); + totalram_pages >> (20 - PAGE_SHIFT)); return -ERANGE; } diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c index 34614acf3f8e..edab6c5b7e50 100644 --- a/drivers/staging/lustre/lustre/llite/rw.c +++ b/drivers/staging/lustre/lustre/llite/rw.c @@ -146,10 +146,10 @@ static struct ll_cl_context *ll_cl_init(struct file *file, */ io->ci_lockreq = CILR_NEVER; - pos = vmpage->index << PAGE_CACHE_SHIFT; + pos = vmpage->index << PAGE_SHIFT; /* Create a temp IO to serve write. */ - result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_CACHE_SIZE); + result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE); if (result == 0) { cio->cui_fd = LUSTRE_FPRIVATE(file); cio->cui_iter = NULL; @@ -498,7 +498,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, } if (rc != 1) unlock_page(vmpage); - page_cache_release(vmpage); + put_page(vmpage); } else { which = RA_STAT_FAILED_GRAB_PAGE; msg = "g_c_p_n failed"; @@ -521,13 +521,13 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io, * striped over, rather than having a constant value for all files here. */ -/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_CACHE_SHIFT)). +/* RAS_INCREASE_STEP should be (1UL << (inode->i_blkbits - PAGE_SHIFT)). * Temporarily set RAS_INCREASE_STEP to 1MB. After 4MB RPC is enabled * by default, this should be adjusted corresponding with max_read_ahead_mb * and max_read_ahead_per_file_mb otherwise the readahead budget can be used * up quickly which will affect read performance significantly. See LU-2816 */ -#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_CACHE_SHIFT) +#define RAS_INCREASE_STEP(inode) (ONE_MB_BRW_SIZE >> PAGE_SHIFT) static inline int stride_io_mode(struct ll_readahead_state *ras) { @@ -739,7 +739,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, end = rpc_boundary; /* Truncate RA window to end of file */ - end = min(end, (unsigned long)((kms - 1) >> PAGE_CACHE_SHIFT)); + end = min(end, (unsigned long)((kms - 1) >> PAGE_SHIFT)); ras->ras_next_readahead = max(end, end + 1); RAS_CDEBUG(ras); @@ -776,7 +776,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io, if (reserved != 0) ll_ra_count_put(ll_i2sbi(inode), reserved); - if (ra_end == end + 1 && ra_end == (kms >> PAGE_CACHE_SHIFT)) + if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT)) ll_ra_stats_inc(mapping, RA_STAT_EOF); /* if we didn't get to the end of the region we reserved from @@ -985,8 +985,8 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode, if (ras->ras_requests == 2 && !ras->ras_request_index) { __u64 kms_pages; - kms_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + kms_pages = (i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT; CDEBUG(D_READA, "kmsp %llu mwp %lu mp %lu\n", kms_pages, ra->ra_max_read_ahead_whole_pages, ra->ra_max_pages_per_file); @@ -1173,7 +1173,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc) * PageWriteback or clean the page. */ result = cl_sync_file_range(inode, offset, - offset + PAGE_CACHE_SIZE - 1, + offset + PAGE_SIZE - 1, CL_FSYNC_LOCAL, 1); if (result > 0) { /* actually we may have written more than one page. @@ -1211,7 +1211,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) int ignore_layout = 0; if (wbc->range_cyclic) { - start = mapping->writeback_index << PAGE_CACHE_SHIFT; + start = mapping->writeback_index << PAGE_SHIFT; end = OBD_OBJECT_EOF; } else { start = wbc->range_start; @@ -1241,7 +1241,7 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc) if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) { if (end == OBD_OBJECT_EOF) end = i_size_read(inode); - mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1; + mapping->writeback_index = (end >> PAGE_SHIFT) + 1; } return result; } diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c index 7a5db67bc680..69aa15e8e3ef 100644 --- a/drivers/staging/lustre/lustre/llite/rw26.c +++ b/drivers/staging/lustre/lustre/llite/rw26.c @@ -87,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset, * below because they are run with page locked and all our io is * happening with locked page too */ - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0 && length == PAGE_SIZE) { env = cl_env_get(&refcheck); if (!IS_ERR(env)) { inode = vmpage->mapping->host; @@ -193,8 +193,8 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr, return -EFBIG; } - *max_pages = (user_addr + size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - *max_pages -= user_addr >> PAGE_CACHE_SHIFT; + *max_pages = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT; + *max_pages -= user_addr >> PAGE_SHIFT; *pages = libcfs_kvzalloc(*max_pages * sizeof(**pages), GFP_NOFS); if (*pages) { @@ -217,7 +217,7 @@ static void ll_free_user_pages(struct page **pages, int npages, int do_dirty) for (i = 0; i < npages; i++) { if (do_dirty) set_page_dirty_lock(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } kvfree(pages); } @@ -357,7 +357,7 @@ static ssize_t ll_direct_IO_26_seg(const struct lu_env *env, struct cl_io *io, * up to 22MB for 128kB kmalloc and up to 682MB for 4MB kmalloc. */ #define MAX_DIO_SIZE ((KMALLOC_MAX_SIZE / sizeof(struct brw_page) * \ - PAGE_CACHE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) + PAGE_SIZE) & ~(DT_MAX_BRW_SIZE - 1)) static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, loff_t file_offset) { @@ -382,8 +382,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n", inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE, - file_offset, file_offset, count >> PAGE_CACHE_SHIFT, - MAX_DIO_SIZE >> PAGE_CACHE_SHIFT); + file_offset, file_offset, count >> PAGE_SHIFT, + MAX_DIO_SIZE >> PAGE_SHIFT); /* Check that all user buffers are aligned as well */ if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK) @@ -432,8 +432,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter, * page worth of page pointers = 4MB on i386. */ if (result == -ENOMEM && - size > (PAGE_CACHE_SIZE / sizeof(*pages)) * - PAGE_CACHE_SIZE) { + size > (PAGE_SIZE / sizeof(*pages)) * + PAGE_SIZE) { size = ((((size / 2) - 1) | ~CFS_PAGE_MASK) + 1) & CFS_PAGE_MASK; @@ -474,10 +474,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int rc; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -488,7 +488,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping, rc = ll_prepare_write(file, page, from, from + len); if (rc) { unlock_page(page); - page_cache_release(page); + put_page(page); } return rc; } @@ -497,12 +497,12 @@ static int ll_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int rc; rc = ll_commit_write(file, page, from, from + copied); unlock_page(page); - page_cache_release(page); + put_page(page); return rc ?: copied; } diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index fb0c26ee7ff3..85a835976174 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -512,9 +512,9 @@ static int vvp_io_read_start(const struct lu_env *env, vio->cui_ra_window_set = 1; bead->lrr_start = cl_index(obj, pos); /* - * XXX: explicit PAGE_CACHE_SIZE + * XXX: explicit PAGE_SIZE */ - bead->lrr_count = cl_index(obj, tot + PAGE_CACHE_SIZE - 1); + bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1); ll_ra_read_in(file, bead); } @@ -959,7 +959,7 @@ static int vvp_io_prepare_write(const struct lu_env *env, * We're completely overwriting an existing page, so _don't_ * set it up to date until commit_write */ - if (from == 0 && to == PAGE_CACHE_SIZE) { + if (from == 0 && to == PAGE_SIZE) { CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n"); POISON_PAGE(page, 0x11); } else @@ -1022,7 +1022,7 @@ static int vvp_io_commit_write(const struct lu_env *env, set_page_dirty(vmpage); vvp_write_pending(cl2ccc(obj), cp); } else if (result == -EDQUOT) { - pgoff_t last_index = i_size_read(inode) >> PAGE_CACHE_SHIFT; + pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT; bool need_clip = true; /* @@ -1040,7 +1040,7 @@ static int vvp_io_commit_write(const struct lu_env *env, * being. */ if (last_index > pg->cp_index) { - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; need_clip = false; } else if (last_index == pg->cp_index) { int size_to = i_size_read(inode) & ~CFS_PAGE_MASK; diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c index 850bae734075..33ca3eb34965 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_page.c +++ b/drivers/staging/lustre/lustre/llite/vvp_page.c @@ -57,7 +57,7 @@ static void vvp_page_fini_common(struct ccc_page *cp) struct page *vmpage = cp->cpg_page; LASSERT(vmpage); - page_cache_release(vmpage); + put_page(vmpage); } static void vvp_page_fini(const struct lu_env *env, @@ -164,12 +164,12 @@ static int vvp_page_unmap(const struct lu_env *env, LASSERT(vmpage); LASSERT(PageLocked(vmpage)); - offset = vmpage->index << PAGE_CACHE_SHIFT; + offset = vmpage->index << PAGE_SHIFT; /* * XXX is it safe to call this with the page lock held? */ - ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_CACHE_SIZE); + ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE); return 0; } @@ -537,7 +537,7 @@ int vvp_page_init(const struct lu_env *env, struct cl_object *obj, CLOBINVRNT(env, obj, ccc_object_invariant(obj)); cpg->cpg_page = vmpage; - page_cache_get(vmpage); + get_page(vmpage); INIT_LIST_HEAD(&cpg->cpg_pending_linkage); if (page->cp_type == CPT_CACHEABLE) { diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c index 0f776cf8a5aa..9abb7c2b9231 100644 --- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c +++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c @@ -2017,7 +2017,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * |s|e|f|p|ent| 0 | ... | 0 | * '----------------- -----' * - * However, on hosts where the native VM page size (PAGE_CACHE_SIZE) is + * However, on hosts where the native VM page size (PAGE_SIZE) is * larger than LU_PAGE_SIZE, a single host page may contain multiple * lu_dirpages. After reading the lu_dirpages from the MDS, the * ldp_hash_end of the first lu_dirpage refers to the one immediately @@ -2048,7 +2048,7 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span * to the first entry of the next lu_dirpage. */ -#if PAGE_CACHE_SIZE > LU_PAGE_SIZE +#if PAGE_SIZE > LU_PAGE_SIZE static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) { int i; @@ -2101,7 +2101,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs) } #else #define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0) -#endif /* PAGE_CACHE_SIZE > LU_PAGE_SIZE */ +#endif /* PAGE_SIZE > LU_PAGE_SIZE */ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct page **pages, struct ptlrpc_request **request) @@ -2110,7 +2110,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, struct lmv_obd *lmv = &obd->u.lmv; __u64 offset = op_data->op_offset; int rc; - int ncfspgs; /* pages read in PAGE_CACHE_SIZE */ + int ncfspgs; /* pages read in PAGE_SIZE */ int nlupgs; /* pages read in LU_PAGE_SIZE */ struct lmv_tgt_desc *tgt; @@ -2129,8 +2129,8 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, if (rc != 0) return rc; - ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1) - >> PAGE_CACHE_SHIFT; + ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1) + >> PAGE_SHIFT; nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); LASSERT(ncfspgs > 0 && ncfspgs <= op_data->op_npages); diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c index 55dd8ef9525b..b91d3ff18b02 100644 --- a/drivers/staging/lustre/lustre/mdc/mdc_request.c +++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c @@ -1002,10 +1002,10 @@ restart_bulk: /* NB req now owns desc and will free it when it gets freed */ for (i = 0; i < op_data->op_npages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); mdc_readdir_pack(req, op_data->op_offset, - PAGE_CACHE_SIZE * op_data->op_npages, + PAGE_SIZE * op_data->op_npages, &op_data->op_fid1); ptlrpc_request_set_replen(req); @@ -1037,7 +1037,7 @@ restart_bulk: if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { CERROR("Unexpected # bytes transferred: %d (%ld expected)\n", req->rq_bulk->bd_nob_transferred, - PAGE_CACHE_SIZE * op_data->op_npages); + PAGE_SIZE * op_data->op_npages); ptlrpc_req_finished(req); return -EPROTO; } diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c index b7dc87248032..3924b095bfb0 100644 --- a/drivers/staging/lustre/lustre/mgc/mgc_request.c +++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c @@ -1113,7 +1113,7 @@ static int mgc_import_event(struct obd_device *obd, } enum { - CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_CACHE_SHIFT), + CONFIG_READ_NRPAGES_INIT = 1 << (20 - PAGE_SHIFT), CONFIG_READ_NRPAGES = 4 }; @@ -1137,19 +1137,19 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, LASSERT(cfg->cfg_instance); LASSERT(cfg->cfg_sb == cfg->cfg_instance); - inst = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + inst = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!inst) return -ENOMEM; - pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance); - if (pos >= PAGE_CACHE_SIZE) { + pos = snprintf(inst, PAGE_SIZE, "%p", cfg->cfg_instance); + if (pos >= PAGE_SIZE) { kfree(inst); return -E2BIG; } ++pos; buf = inst + pos; - bufsz = PAGE_CACHE_SIZE - pos; + bufsz = PAGE_SIZE - pos; while (datalen > 0) { int entry_len = sizeof(*entry); @@ -1181,7 +1181,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc, /* Keep this swab for normal mixed endian handling. LU-1644 */ if (mne_swab) lustre_swab_mgs_nidtbl_entry(entry); - if (entry->mne_length > PAGE_CACHE_SIZE) { + if (entry->mne_length > PAGE_SIZE) { CERROR("MNE too large (%u)\n", entry->mne_length); break; } @@ -1371,7 +1371,7 @@ again: } body->mcb_offset = cfg->cfg_last_idx + 1; body->mcb_type = cld->cld_type; - body->mcb_bits = PAGE_CACHE_SHIFT; + body->mcb_bits = PAGE_SHIFT; body->mcb_units = nrpages; /* allocate bulk transfer descriptor */ @@ -1383,7 +1383,7 @@ again: } for (i = 0; i < nrpages; i++) - ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_CACHE_SIZE); + ptlrpc_prep_bulk_page_pin(desc, pages[i], 0, PAGE_SIZE); ptlrpc_request_set_replen(req); rc = ptlrpc_queue_wait(req); @@ -1411,7 +1411,7 @@ again: goto out; } - if (ealen > nrpages << PAGE_CACHE_SHIFT) { + if (ealen > nrpages << PAGE_SHIFT) { rc = -EINVAL; goto out; } @@ -1439,7 +1439,7 @@ again: ptr = kmap(pages[i]); rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr, - min_t(int, ealen, PAGE_CACHE_SIZE), + min_t(int, ealen, PAGE_SIZE), mne_swab); kunmap(pages[i]); if (rc2 < 0) { @@ -1448,7 +1448,7 @@ again: break; } - ealen -= PAGE_CACHE_SIZE; + ealen -= PAGE_SIZE; } out: diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c index 231a2f26c693..394580016638 100644 --- a/drivers/staging/lustre/lustre/obdclass/cl_page.c +++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c @@ -1477,7 +1477,7 @@ loff_t cl_offset(const struct cl_object *obj, pgoff_t idx) /* * XXX for now. */ - return (loff_t)idx << PAGE_CACHE_SHIFT; + return (loff_t)idx << PAGE_SHIFT; } EXPORT_SYMBOL(cl_offset); @@ -1489,13 +1489,13 @@ pgoff_t cl_index(const struct cl_object *obj, loff_t offset) /* * XXX for now. */ - return offset >> PAGE_CACHE_SHIFT; + return offset >> PAGE_SHIFT; } EXPORT_SYMBOL(cl_index); int cl_page_size(const struct cl_object *obj) { - return 1 << PAGE_CACHE_SHIFT; + return 1 << PAGE_SHIFT; } EXPORT_SYMBOL(cl_page_size); diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c index 1a938e1376f9..c2cf015962dd 100644 --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c +++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c @@ -461,9 +461,9 @@ static int obd_init_checks(void) CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len); ret = -EINVAL; } - if ((u64val & ~CFS_PAGE_MASK) >= PAGE_CACHE_SIZE) { + if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) { CWARN("mask failed: u64val %llu >= %llu\n", u64val, - (__u64)PAGE_CACHE_SIZE); + (__u64)PAGE_SIZE); ret = -EINVAL; } @@ -509,7 +509,7 @@ static int __init obdclass_init(void) * For clients with less memory, a larger fraction is needed * for other purposes (mostly for BGL). */ - if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT)) + if (totalram_pages <= 512 << (20 - PAGE_SHIFT)) obd_max_dirty_pages = totalram_pages / 4; else obd_max_dirty_pages = totalram_pages / 2; diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c index 9496c09b2b69..b41b65e2f021 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c @@ -47,7 +47,6 @@ #include "../../include/lustre/lustre_idl.h" #include <linux/fs.h> -#include <linux/pagemap.h> /* for PAGE_CACHE_SIZE */ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) { @@ -71,8 +70,8 @@ void obdo_refresh_inode(struct inode *dst, struct obdo *src, u32 valid) if (valid & OBD_MD_FLBLKSZ && src->o_blksize > (1 << dst->i_blkbits)) dst->i_blkbits = ffs(src->o_blksize) - 1; - if (dst->i_blkbits < PAGE_CACHE_SHIFT) - dst->i_blkbits = PAGE_CACHE_SHIFT; + if (dst->i_blkbits < PAGE_SHIFT) + dst->i_blkbits = PAGE_SHIFT; /* allocation of space */ if (valid & OBD_MD_FLBLOCKS && src->o_blocks > dst->i_blocks) diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c index fd333b9e968c..e6bf414a4444 100644 --- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c +++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c @@ -100,7 +100,7 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj, struct attribute *attr, char *buf) { return sprintf(buf, "%ul\n", - obd_max_dirty_pages / (1 << (20 - PAGE_CACHE_SHIFT))); + obd_max_dirty_pages / (1 << (20 - PAGE_SHIFT))); } static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, @@ -113,14 +113,14 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, struct attribute *attr, if (rc) return rc; - val *= 1 << (20 - PAGE_CACHE_SHIFT); /* convert to pages */ + val *= 1 << (20 - PAGE_SHIFT); /* convert to pages */ if (val > ((totalram_pages / 10) * 9)) { /* Somebody wants to assign too much memory to dirty pages */ return -EINVAL; } - if (val < 4 << (20 - PAGE_CACHE_SHIFT)) { + if (val < 4 << (20 - PAGE_SHIFT)) { /* Less than 4 Mb for dirty cache is also bad */ return -EINVAL; } diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c index 65a4746c89ca..978568ada8e9 100644 --- a/drivers/staging/lustre/lustre/obdclass/lu_object.c +++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c @@ -840,8 +840,8 @@ static int lu_htable_order(void) #if BITS_PER_LONG == 32 /* limit hashtable size for lowmem systems to low RAM */ - if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT)) - cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4; + if (cache_size > 1 << (30 - PAGE_SHIFT)) + cache_size = 1 << (30 - PAGE_SHIFT) * 3 / 4; #endif /* clear off unreasonable cache setting. */ @@ -853,7 +853,7 @@ static int lu_htable_order(void) lu_cache_percent = LU_CACHE_PERCENT_DEFAULT; } cache_size = cache_size / 100 * lu_cache_percent * - (PAGE_CACHE_SIZE / 1024); + (PAGE_SIZE / 1024); for (bits = 1; (1 << bits) < cache_size; ++bits) { ; diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c index 64ffe243f870..1e83669c204d 100644 --- a/drivers/staging/lustre/lustre/obdecho/echo_client.c +++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c @@ -278,7 +278,7 @@ static void echo_page_fini(const struct lu_env *env, struct page *vmpage = ep->ep_vmpage; atomic_dec(&eco->eo_npages); - page_cache_release(vmpage); + put_page(vmpage); } static int echo_page_prep(const struct lu_env *env, @@ -373,7 +373,7 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj, struct echo_object *eco = cl2echo_obj(obj); ep->ep_vmpage = vmpage; - page_cache_get(vmpage); + get_page(vmpage); mutex_init(&ep->ep_lock); cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops); atomic_inc(&eco->eo_npages); @@ -1138,7 +1138,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset, LASSERT(rc == 0); rc = cl_echo_enqueue0(env, eco, offset, - offset + npages * PAGE_CACHE_SIZE - 1, + offset + npages * PAGE_SIZE - 1, rw == READ ? LCK_PR : LCK_PW, &lh.cookie, CEF_NEVER); if (rc < 0) @@ -1311,11 +1311,11 @@ echo_client_page_debug_setup(struct page *page, int rw, u64 id, int delta; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { if (rw == OBD_BRW_WRITE) { stripe_off = offset + delta; stripe_id = id; @@ -1341,11 +1341,11 @@ static int echo_client_page_debug_check(struct page *page, u64 id, int rc2; /* no partial pages on the client */ - LASSERT(count == PAGE_CACHE_SIZE); + LASSERT(count == PAGE_SIZE); addr = kmap(page); - for (rc = delta = 0; delta < PAGE_CACHE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { + for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) { stripe_off = offset + delta; stripe_id = id; @@ -1391,7 +1391,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, return -EINVAL; /* XXX think again with misaligned I/O */ - npages = count >> PAGE_CACHE_SHIFT; + npages = count >> PAGE_SHIFT; if (rw == OBD_BRW_WRITE) brw_flags = OBD_BRW_ASYNC; @@ -1408,7 +1408,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, for (i = 0, pgp = pga, off = offset; i < npages; - i++, pgp++, off += PAGE_CACHE_SIZE) { + i++, pgp++, off += PAGE_SIZE) { LASSERT(!pgp->pg); /* for cleanup */ @@ -1418,7 +1418,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa, goto out; pages[i] = pgp->pg; - pgp->count = PAGE_CACHE_SIZE; + pgp->count = PAGE_SIZE; pgp->off = off; pgp->flag = brw_flags; @@ -1473,8 +1473,8 @@ static int echo_client_prep_commit(const struct lu_env *env, if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0) return -EINVAL; - npages = batch >> PAGE_CACHE_SHIFT; - tot_pages = count >> PAGE_CACHE_SHIFT; + npages = batch >> PAGE_SHIFT; + tot_pages = count >> PAGE_SHIFT; lnb = kcalloc(npages, sizeof(struct niobuf_local), GFP_NOFS); rnb = kcalloc(npages, sizeof(struct niobuf_remote), GFP_NOFS); @@ -1497,9 +1497,9 @@ static int echo_client_prep_commit(const struct lu_env *env, if (tot_pages < npages) npages = tot_pages; - for (i = 0; i < npages; i++, off += PAGE_CACHE_SIZE) { + for (i = 0; i < npages; i++, off += PAGE_SIZE) { rnb[i].offset = off; - rnb[i].len = PAGE_CACHE_SIZE; + rnb[i].len = PAGE_SIZE; rnb[i].flags = brw_flags; } @@ -1878,7 +1878,7 @@ static int __init obdecho_init(void) { LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n"); - LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); + LASSERT(PAGE_SIZE % OBD_ECHO_BLOCK_SIZE == 0); return echo_client_init(); } diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c index 57c43c506ef2..a3358c39b2f1 100644 --- a/drivers/staging/lustre/lustre/osc/lproc_osc.c +++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c @@ -162,15 +162,15 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj, if (rc) return rc; - pages_number *= 1 << (20 - PAGE_CACHE_SHIFT); /* MB -> pages */ + pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */ if (pages_number <= 0 || - pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) || + pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_SHIFT) || pages_number > totalram_pages / 4) /* 1/4 of RAM */ return -ERANGE; client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_dirty_max = (u32)(pages_number << PAGE_CACHE_SHIFT); + cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT); osc_wake_cache_waiters(cli); client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -182,7 +182,7 @@ static int osc_cached_mb_seq_show(struct seq_file *m, void *v) { struct obd_device *dev = m->private; struct client_obd *cli = &dev->u.cli; - int shift = 20 - PAGE_CACHE_SHIFT; + int shift = 20 - PAGE_SHIFT; seq_printf(m, "used_mb: %d\n" @@ -211,7 +211,7 @@ static ssize_t osc_cached_mb_seq_write(struct file *file, return -EFAULT; kernbuf[count] = 0; - mult = 1 << (20 - PAGE_CACHE_SHIFT); + mult = 1 << (20 - PAGE_SHIFT); buffer += lprocfs_find_named_value(kernbuf, "used_mb:", &count) - kernbuf; rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult); @@ -569,12 +569,12 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj, /* if the max_pages is specified in bytes, convert to pages */ if (val >= ONE_MB_BRW_SIZE) - val >>= PAGE_CACHE_SHIFT; + val >>= PAGE_SHIFT; - chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_CACHE_SHIFT)) - 1); + chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1); /* max_pages_per_rpc must be chunk aligned */ val = (val + ~chunk_mask) & chunk_mask; - if (val == 0 || val > ocd->ocd_brw_size >> PAGE_CACHE_SHIFT) { + if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) { return -ERANGE; } client_obd_list_lock(&cli->cl_loi_list_lock); diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 63363111380c..5f25bf83dcfc 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -544,7 +544,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur, return -ERANGE; LASSERT(cur->oe_osclock == victim->oe_osclock); - ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT; + ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT; chunk_start = cur->oe_start >> ppc_bits; chunk_end = cur->oe_end >> ppc_bits; if (chunk_start != (victim->oe_end >> ppc_bits) + 1 && @@ -647,8 +647,8 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env, lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0); LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE); - LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT); - ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + LASSERT(cli->cl_chunkbits >= PAGE_SHIFT); + ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; chunk_mask = ~((1 << ppc_bits) - 1); chunksize = 1 << cli->cl_chunkbits; chunk = index >> ppc_bits; @@ -871,8 +871,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (!sent) { lost_grant = ext->oe_grants; - } else if (blocksize < PAGE_CACHE_SIZE && - last_count != PAGE_CACHE_SIZE) { + } else if (blocksize < PAGE_SIZE && + last_count != PAGE_SIZE) { /* For short writes we shouldn't count parts of pages that * span a whole chunk on the OST side, or our accounting goes * wrong. Should match the code in filter_grant_check. @@ -884,7 +884,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext, if (end) count += blocksize - end; - lost_grant = PAGE_CACHE_SIZE - count; + lost_grant = PAGE_SIZE - count; } if (ext->oe_grants > 0) osc_free_grant(cli, nr_pages, lost_grant); @@ -967,7 +967,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index, struct osc_async_page *oap; struct osc_async_page *tmp; int pages_in_chunk = 0; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; __u64 trunc_chunk = trunc_index >> ppc_bits; int grants = 0; int nr_pages = 0; @@ -1125,7 +1125,7 @@ static int osc_extent_make_ready(const struct lu_env *env, if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) { last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE); LASSERT(last->oap_count > 0); - LASSERT(last->oap_page_off + last->oap_count <= PAGE_CACHE_SIZE); + LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE); last->oap_async_flags |= ASYNC_COUNT_STABLE; } @@ -1134,7 +1134,7 @@ static int osc_extent_make_ready(const struct lu_env *env, */ list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) { if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) { - oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off; + oap->oap_count = PAGE_SIZE - oap->oap_page_off; oap->oap_async_flags |= ASYNC_COUNT_STABLE; } } @@ -1158,7 +1158,7 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants) struct osc_object *obj = ext->oe_obj; struct client_obd *cli = osc_cli(obj); struct osc_extent *next; - int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT; + int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT; pgoff_t chunk = index >> ppc_bits; pgoff_t end_chunk; pgoff_t end_index; @@ -1293,9 +1293,9 @@ static int osc_refresh_count(const struct lu_env *env, return 0; else if (cl_offset(obj, page->cp_index + 1) > kms) /* catch sub-page write at end of file */ - return kms % PAGE_CACHE_SIZE; + return kms % PAGE_SIZE; else - return PAGE_CACHE_SIZE; + return PAGE_SIZE; } static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, @@ -1376,10 +1376,10 @@ static void osc_consume_write_grant(struct client_obd *cli, assert_spin_locked(&cli->cl_loi_list_lock.lock); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); atomic_inc(&obd_dirty_pages); - cli->cl_dirty += PAGE_CACHE_SIZE; + cli->cl_dirty += PAGE_SIZE; pga->flag |= OBD_BRW_FROM_GRANT; CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n", - PAGE_CACHE_SIZE, pga, pga->pg); + PAGE_SIZE, pga, pga->pg); osc_update_next_shrink(cli); } @@ -1396,11 +1396,11 @@ static void osc_release_write_grant(struct client_obd *cli, pga->flag &= ~OBD_BRW_FROM_GRANT; atomic_dec(&obd_dirty_pages); - cli->cl_dirty -= PAGE_CACHE_SIZE; + cli->cl_dirty -= PAGE_SIZE; if (pga->flag & OBD_BRW_NOCACHE) { pga->flag &= ~OBD_BRW_NOCACHE; atomic_dec(&obd_dirty_transit_pages); - cli->cl_dirty_transit -= PAGE_CACHE_SIZE; + cli->cl_dirty_transit -= PAGE_SIZE; } } @@ -1456,7 +1456,7 @@ static void osc_unreserve_grant(struct client_obd *cli, * used, we should return these grants to OST. There're two cases where grants * can be lost: * 1. truncate; - * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was + * 2. blocksize at OST is less than PAGE_SIZE and a partial page was * written. In this case OST may use less chunks to serve this partial * write. OSTs don't actually know the page size on the client side. so * clients have to calculate lost grant by the blocksize on the OST. @@ -1469,7 +1469,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, client_obd_list_lock(&cli->cl_loi_list_lock); atomic_sub(nr_pages, &obd_dirty_pages); - cli->cl_dirty -= nr_pages << PAGE_CACHE_SHIFT; + cli->cl_dirty -= nr_pages << PAGE_SHIFT; cli->cl_lost_grant += lost_grant; if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) { /* borrow some grant from truncate to avoid the case that @@ -1512,11 +1512,11 @@ static int osc_enter_cache_try(struct client_obd *cli, if (rc < 0) return 0; - if (cli->cl_dirty + PAGE_CACHE_SIZE <= cli->cl_dirty_max && + if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max && atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) { osc_consume_write_grant(cli, &oap->oap_brw_page); if (transient) { - cli->cl_dirty_transit += PAGE_CACHE_SIZE; + cli->cl_dirty_transit += PAGE_SIZE; atomic_inc(&obd_dirty_transit_pages); oap->oap_brw_flags |= OBD_BRW_NOCACHE; } @@ -1562,7 +1562,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, * of queued writes and create a discontiguous rpc stream */ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) || - cli->cl_dirty_max < PAGE_CACHE_SIZE || + cli->cl_dirty_max < PAGE_SIZE || cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) { rc = -EDQUOT; goto out; @@ -1632,7 +1632,7 @@ void osc_wake_cache_waiters(struct client_obd *cli) ocw->ocw_rc = -EDQUOT; /* we can't dirty more */ - if ((cli->cl_dirty + PAGE_CACHE_SIZE > cli->cl_dirty_max) || + if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) || (atomic_read(&obd_dirty_pages) + 1 > obd_max_dirty_pages)) { CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n", diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index d720b1a1c18c..ce9ddd515f64 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -410,7 +410,7 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj, int result; opg->ops_from = 0; - opg->ops_to = PAGE_CACHE_SIZE; + opg->ops_to = PAGE_SIZE; result = osc_prep_async_page(osc, opg, vmpage, cl_offset(obj, page->cp_index)); @@ -487,9 +487,9 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this * number of pages to avoid running out of LRU budget, and.. */ -static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ +static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */ /* free this number at most otherwise it will take too long time to finish. */ -static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ +static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */ /* Check if we can free LRU slots from this OSC. If there exists LRU waiters, * we should free slots aggressively. In this way, slots are freed in a steady diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index 74805f1ae888..30526ebcad04 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -826,7 +826,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_undirty = 0; } else { long max_in_flight = (cli->cl_max_pages_per_rpc << - PAGE_CACHE_SHIFT)* + PAGE_SHIFT)* (cli->cl_max_rpcs_in_flight + 1); oa->o_undirty = max(cli->cl_dirty_max, max_in_flight); } @@ -909,11 +909,11 @@ static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) static int osc_shrink_grant(struct client_obd *cli) { __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * - (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); + (cli->cl_max_pages_per_rpc << PAGE_SHIFT); client_obd_list_lock(&cli->cl_loi_list_lock); if (cli->cl_avail_grant <= target_bytes) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; client_obd_list_unlock(&cli->cl_loi_list_lock); return osc_shrink_grant_to_target(cli, target_bytes); @@ -929,8 +929,8 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) * We don't want to shrink below a single RPC, as that will negatively * impact block allocation and long-term performance. */ - if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT) - target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT) + target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT; if (target_bytes >= cli->cl_avail_grant) { client_obd_list_unlock(&cli->cl_loi_list_lock); @@ -978,7 +978,7 @@ static int osc_should_shrink_grant(struct client_obd *client) * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export) * Keep comment here so that it can be found by searching. */ - int brw_size = client->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; + int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT; if (client->cl_import->imp_state == LUSTRE_IMP_FULL && client->cl_avail_grant > brw_size) @@ -1052,7 +1052,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) } /* determine the appropriate chunk size used by osc_extent. */ - cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); + cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize); client_obd_list_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n", @@ -1317,9 +1317,9 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli, LASSERT(pg->count > 0); /* make sure there is no gap in the middle of page array */ LASSERTF(page_count == 1 || - (ergo(i == 0, poff + pg->count == PAGE_CACHE_SIZE) && + (ergo(i == 0, poff + pg->count == PAGE_SIZE) && ergo(i > 0 && i < page_count - 1, - poff == 0 && pg->count == PAGE_CACHE_SIZE) && + poff == 0 && pg->count == PAGE_SIZE) && ergo(i == page_count - 1, poff == 0)), "i: %d/%d pg: %p off: %llu, count: %u\n", i, page_count, pg, pg->off, pg->count); @@ -1877,7 +1877,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, oap->oap_count; else LASSERT(oap->oap_page_off + oap->oap_count == - PAGE_CACHE_SIZE); + PAGE_SIZE); } } @@ -1993,7 +1993,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, tmp->oap_request = ptlrpc_request_addref(req); client_obd_list_lock(&cli->cl_loi_list_lock); - starting_offset >>= PAGE_CACHE_SHIFT; + starting_offset >>= PAGE_SHIFT; if (cmd == OBD_BRW_READ) { cli->cl_r_in_flight++; lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count); @@ -2790,12 +2790,12 @@ out: CFS_PAGE_MASK; if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <= - fm_key->fiemap.fm_start + PAGE_CACHE_SIZE - 1) + fm_key->fiemap.fm_start + PAGE_SIZE - 1) policy.l_extent.end = OBD_OBJECT_EOF; else policy.l_extent.end = (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length + - PAGE_CACHE_SIZE - 1) & CFS_PAGE_MASK; + PAGE_SIZE - 1) & CFS_PAGE_MASK; ostid_build_res_name(&fm_key->oa.o_oi, &res_id); mode = ldlm_lock_match(exp->exp_obd->obd_namespace, diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c index 1b7673eec4d7..cf3ac8eee9ee 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/client.c +++ b/drivers/staging/lustre/lustre/ptlrpc/client.c @@ -174,12 +174,12 @@ void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, LASSERT(page); LASSERT(pageoffset >= 0); LASSERT(len > 0); - LASSERT(pageoffset + len <= PAGE_CACHE_SIZE); + LASSERT(pageoffset + len <= PAGE_SIZE); desc->bd_nob += len; if (pin) - page_cache_get(page); + get_page(page); ptlrpc_add_bulk_page(desc, page, pageoffset, len); } @@ -206,7 +206,7 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin) if (unpin) { for (i = 0; i < desc->bd_iov_count; i++) - page_cache_release(desc->bd_iov[i].kiov_page); + put_page(desc->bd_iov[i].kiov_page); } kfree(desc); diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c index b4eddf291269..cd94fed0ffdf 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/import.c +++ b/drivers/staging/lustre/lustre/ptlrpc/import.c @@ -1092,7 +1092,7 @@ finish: if (ocd->ocd_connect_flags & OBD_CONNECT_BRW_SIZE) cli->cl_max_pages_per_rpc = - min(ocd->ocd_brw_size >> PAGE_CACHE_SHIFT, + min(ocd->ocd_brw_size >> PAGE_SHIFT, cli->cl_max_pages_per_rpc); else if (imp->imp_connect_op == MDS_CONNECT || imp->imp_connect_op == MGS_CONNECT) diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c index cee04efb6fb5..c95a91ce26c9 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c +++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c @@ -308,7 +308,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, * hose a kernel by allowing the request history to grow too * far. */ - bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + bufpages = (svc->srv_buf_size + PAGE_SIZE - 1) >> PAGE_SHIFT; if (val > totalram_pages / (2 * bufpages)) return -ERANGE; @@ -1226,7 +1226,7 @@ int lprocfs_wr_import(struct file *file, const char __user *buffer, const char prefix[] = "connection="; const int prefix_len = sizeof(prefix) - 1; - if (count > PAGE_CACHE_SIZE - 1 || count <= prefix_len) + if (count > PAGE_SIZE - 1 || count <= prefix_len) return -EINVAL; kbuf = kzalloc(count + 1, GFP_NOFS); diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c index 5f27d9c2e4ef..30d9a164e52d 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/recover.c +++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c @@ -195,7 +195,7 @@ int ptlrpc_resend(struct obd_import *imp) } list_for_each_entry_safe(req, next, &imp->imp_sending_list, rq_list) { - LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, + LASSERTF((long)req > PAGE_SIZE && req != LP_POISON, "req %p bad\n", req); LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); if (!ptlrpc_no_resend(req)) diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c index 72d5b9bf5b29..d3872b8c9a6e 100644 --- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c +++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c @@ -58,7 +58,7 @@ * bulk encryption page pools * ****************************************/ -#define POINTERS_PER_PAGE (PAGE_CACHE_SIZE / sizeof(void *)) +#define POINTERS_PER_PAGE (PAGE_SIZE / sizeof(void *)) #define PAGES_PER_POOL (POINTERS_PER_PAGE) #define IDLE_IDX_MAX (100) diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig new file mode 100644 index 000000000000..d277f048789e --- /dev/null +++ b/drivers/staging/olpc_dcon/Kconfig @@ -0,0 +1,35 @@ +config FB_OLPC_DCON + tristate "One Laptop Per Child Display CONtroller support" + depends on OLPC && FB + depends on I2C + depends on (GPIO_CS5535 || GPIO_CS5535=n) + select BACKLIGHT_CLASS_DEVICE + ---help--- + In order to support very low power operation, the XO laptop uses a + secondary Display CONtroller, or DCON. This secondary controller + is present in the video pipeline between the primary display + controller (integrate into the processor or chipset) and the LCD + panel. It allows the main processor/display controller to be + completely powered off while still retaining an image on the display. + This controller is only available on OLPC platforms. Unless you have + one of these platforms, you will want to say 'N'. + +config FB_OLPC_DCON_1 + bool "OLPC XO-1 DCON support" + depends on FB_OLPC_DCON && GPIO_CS5535 + default y + ---help--- + Enable support for the DCON in XO-1 model laptops. The kernel + communicates with the DCON using model-specific code. If you + have an XO-1 (or if you're unsure what model you have), you should + say 'Y'. + +config FB_OLPC_DCON_1_5 + bool "OLPC XO-1.5 DCON support" + depends on FB_OLPC_DCON && ACPI + default y + ---help--- + Enable support for the DCON in XO-1.5 model laptops. The kernel + communicates with the DCON using model-specific code. If you + have an XO-1.5 (or if you're unsure what model you have), you + should say 'Y'. diff --git a/drivers/staging/olpc_dcon/Makefile b/drivers/staging/olpc_dcon/Makefile new file mode 100644 index 000000000000..36c7e67fec20 --- /dev/null +++ b/drivers/staging/olpc_dcon/Makefile @@ -0,0 +1,6 @@ +olpc-dcon-objs += olpc_dcon.o +olpc-dcon-$(CONFIG_FB_OLPC_DCON_1) += olpc_dcon_xo_1.o +olpc-dcon-$(CONFIG_FB_OLPC_DCON_1_5) += olpc_dcon_xo_1_5.o +obj-$(CONFIG_FB_OLPC_DCON) += olpc-dcon.o + + diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO new file mode 100644 index 000000000000..61c2e65ac354 --- /dev/null +++ b/drivers/staging/olpc_dcon/TODO @@ -0,0 +1,9 @@ +TODO: + - see if vx855 gpio API can be made similar enough to cs5535 so we can + share more code + - allow simultaneous XO-1 and XO-1.5 support + +Please send patches to Greg Kroah-Hartman <greg@kroah.com> and +copy: + Daniel Drake <dsd@laptop.org> + Jens Frederich <jfrederich@gmail.com> diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c new file mode 100644 index 000000000000..f45b2ef05f48 --- /dev/null +++ b/drivers/staging/olpc_dcon/olpc_dcon.c @@ -0,0 +1,813 @@ +/* + * Mainly by David Woodhouse, somewhat modified by Jordan Crouse + * + * Copyright © 2006-2007 Red Hat, Inc. + * Copyright © 2006-2007 Advanced Micro Devices, Inc. + * Copyright © 2009 VIA Technology, Inc. + * Copyright (c) 2010-2011 Andres Salomon <dilinger@queued.net> + * + * This program is free software. You can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/kernel.h> +#include <linux/fb.h> +#include <linux/console.h> +#include <linux/i2c.h> +#include <linux/platform_device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/backlight.h> +#include <linux/device.h> +#include <linux/uaccess.h> +#include <linux/ctype.h> +#include <linux/reboot.h> +#include <linux/olpc-ec.h> +#include <asm/tsc.h> +#include <asm/olpc.h> + +#include "olpc_dcon.h" + +/* Module definitions */ + +static ushort resumeline = 898; +module_param(resumeline, ushort, 0444); + +static struct dcon_platform_data *pdata; + +/* I2C structures */ + +/* Platform devices */ +static struct platform_device *dcon_device; + +static unsigned short normal_i2c[] = { 0x0d, I2C_CLIENT_END }; + +static s32 dcon_write(struct dcon_priv *dcon, u8 reg, u16 val) +{ + return i2c_smbus_write_word_data(dcon->client, reg, val); +} + +static s32 dcon_read(struct dcon_priv *dcon, u8 reg) +{ + return i2c_smbus_read_word_data(dcon->client, reg); +} + +/* ===== API functions - these are called by a variety of users ==== */ + +static int dcon_hw_init(struct dcon_priv *dcon, int is_init) +{ + u16 ver; + int rc = 0; + + ver = dcon_read(dcon, DCON_REG_ID); + if ((ver >> 8) != 0xDC) { + pr_err("DCON ID not 0xDCxx: 0x%04x instead.\n", ver); + rc = -ENXIO; + goto err; + } + + if (is_init) { + pr_info("Discovered DCON version %x\n", ver & 0xFF); + rc = pdata->init(dcon); + if (rc != 0) { + pr_err("Unable to init.\n"); + goto err; + } + } + + if (ver < 0xdc02) { + dev_err(&dcon->client->dev, + "DCON v1 is unsupported, giving up..\n"); + rc = -ENODEV; + goto err; + } + + /* SDRAM setup/hold time */ + dcon_write(dcon, 0x3a, 0xc040); + dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */ + dcon_write(dcon, DCON_REG_MEM_OPT_A, + MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN); + dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET); + + /* Colour swizzle, AA, no passthrough, backlight */ + if (is_init) { + dcon->disp_mode = MODE_PASSTHRU | MODE_BL_ENABLE | + MODE_CSWIZZLE | MODE_COL_AA; + } + dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); + + /* Set the scanline to interrupt on during resume */ + dcon_write(dcon, DCON_REG_SCAN_INT, resumeline); + +err: + return rc; +} + +/* + * The smbus doesn't always come back due to what is believed to be + * hardware (power rail) bugs. For older models where this is known to + * occur, our solution is to attempt to wait for the bus to stabilize; + * if it doesn't happen, cut power to the dcon, repower it, and wait + * for the bus to stabilize. Rinse, repeat until we have a working + * smbus. For newer models, we simply BUG(); we want to know if this + * still happens despite the power fixes that have been made! + */ +static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down) +{ + unsigned long timeout; + u8 pm; + int x; + +power_up: + if (is_powered_down) { + pm = 1; + x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0); + if (x) { + pr_warn("unable to force dcon to power up: %d!\n", x); + return x; + } + usleep_range(10000, 11000); /* we'll be conservative */ + } + + pdata->bus_stabilize_wiggle(); + + for (x = -1, timeout = 50; timeout && x < 0; timeout--) { + usleep_range(1000, 1100); + x = dcon_read(dcon, DCON_REG_ID); + } + if (x < 0) { + pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n"); + BUG_ON(olpc_board_at_least(olpc_board(0xc2))); + pm = 0; + olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0); + msleep(100); + is_powered_down = 1; + goto power_up; /* argh, stupid hardware.. */ + } + + if (is_powered_down) + return dcon_hw_init(dcon, 0); + return 0; +} + +static void dcon_set_backlight(struct dcon_priv *dcon, u8 level) +{ + dcon->bl_val = level; + dcon_write(dcon, DCON_REG_BRIGHT, dcon->bl_val); + + /* Purposely turn off the backlight when we go to level 0 */ + if (dcon->bl_val == 0) { + dcon->disp_mode &= ~MODE_BL_ENABLE; + dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); + } else if (!(dcon->disp_mode & MODE_BL_ENABLE)) { + dcon->disp_mode |= MODE_BL_ENABLE; + dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); + } +} + +/* Set the output type to either color or mono */ +static int dcon_set_mono_mode(struct dcon_priv *dcon, bool enable_mono) +{ + if (dcon->mono == enable_mono) + return 0; + + dcon->mono = enable_mono; + + if (enable_mono) { + dcon->disp_mode &= ~(MODE_CSWIZZLE | MODE_COL_AA); + dcon->disp_mode |= MODE_MONO_LUMA; + } else { + dcon->disp_mode &= ~(MODE_MONO_LUMA); + dcon->disp_mode |= MODE_CSWIZZLE | MODE_COL_AA; + } + + dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode); + return 0; +} + +/* For now, this will be really stupid - we need to address how + * DCONLOAD works in a sleep and account for it accordingly + */ + +static void dcon_sleep(struct dcon_priv *dcon, bool sleep) +{ + int x; + + /* Turn off the backlight and put the DCON to sleep */ + + if (dcon->asleep == sleep) + return; + + if (!olpc_board_at_least(olpc_board(0xc2))) + return; + + if (sleep) { + u8 pm = 0; + + x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0); + if (x) + pr_warn("unable to force dcon to power down: %d!\n", x); + else + dcon->asleep = sleep; + } else { + /* Only re-enable the backlight if the backlight value is set */ + if (dcon->bl_val != 0) + dcon->disp_mode |= MODE_BL_ENABLE; + x = dcon_bus_stabilize(dcon, 1); + if (x) + pr_warn("unable to reinit dcon hardware: %d!\n", x); + else + dcon->asleep = sleep; + + /* Restore backlight */ + dcon_set_backlight(dcon, dcon->bl_val); + } + + /* We should turn off some stuff in the framebuffer - but what? */ +} + +/* the DCON seems to get confused if we change DCONLOAD too + * frequently -- i.e., approximately faster than frame time. + * normally we don't change it this fast, so in general we won't + * delay here. + */ +static void dcon_load_holdoff(struct dcon_priv *dcon) +{ + ktime_t delta_t, now; + + while (1) { + now = ktime_get(); + delta_t = ktime_sub(now, dcon->load_time); + if (ktime_to_ns(delta_t) > NSEC_PER_MSEC * 20) + break; + mdelay(4); + } +} + +static bool dcon_blank_fb(struct dcon_priv *dcon, bool blank) +{ + int err; + + console_lock(); + if (!lock_fb_info(dcon->fbinfo)) { + console_unlock(); + dev_err(&dcon->client->dev, "unable to lock framebuffer\n"); + return false; + } + + dcon->ignore_fb_events = true; + err = fb_blank(dcon->fbinfo, + blank ? FB_BLANK_POWERDOWN : FB_BLANK_UNBLANK); + dcon->ignore_fb_events = false; + unlock_fb_info(dcon->fbinfo); + console_unlock(); + + if (err) { + dev_err(&dcon->client->dev, "couldn't %sblank framebuffer\n", + blank ? "" : "un"); + return false; + } + return true; +} + +/* Set the source of the display (CPU or DCON) */ +static void dcon_source_switch(struct work_struct *work) +{ + struct dcon_priv *dcon = container_of(work, struct dcon_priv, + switch_source); + int source = dcon->pending_src; + + if (dcon->curr_src == source) + return; + + dcon_load_holdoff(dcon); + + dcon->switched = false; + + switch (source) { + case DCON_SOURCE_CPU: + pr_info("dcon_source_switch to CPU\n"); + /* Enable the scanline interrupt bit */ + if (dcon_write(dcon, DCON_REG_MODE, + dcon->disp_mode | MODE_SCAN_INT)) + pr_err("couldn't enable scanline interrupt!\n"); + else + /* Wait up to one second for the scanline interrupt */ + wait_event_timeout(dcon->waitq, dcon->switched, HZ); + + if (!dcon->switched) + pr_err("Timeout entering CPU mode; expect a screen glitch.\n"); + + /* Turn off the scanline interrupt */ + if (dcon_write(dcon, DCON_REG_MODE, dcon->disp_mode)) + pr_err("couldn't disable scanline interrupt!\n"); + + /* + * Ideally we'd like to disable interrupts here so that the + * fb unblanking and DCON turn on happen at a known time value; + * however, we can't do that right now with fb_blank + * messing with semaphores. + * + * For now, we just hope.. + */ + if (!dcon_blank_fb(dcon, false)) { + pr_err("Failed to enter CPU mode\n"); + dcon->pending_src = DCON_SOURCE_DCON; + return; + } + + /* And turn off the DCON */ + pdata->set_dconload(1); + dcon->load_time = ktime_get(); + + pr_info("The CPU has control\n"); + break; + case DCON_SOURCE_DCON: + { + ktime_t delta_t; + + pr_info("dcon_source_switch to DCON\n"); + + /* Clear DCONLOAD - this implies that the DCON is in control */ + pdata->set_dconload(0); + dcon->load_time = ktime_get(); + + wait_event_timeout(dcon->waitq, dcon->switched, HZ/2); + + if (!dcon->switched) { + pr_err("Timeout entering DCON mode; expect a screen glitch.\n"); + } else { + /* sometimes the DCON doesn't follow its own rules, + * and doesn't wait for two vsync pulses before + * ack'ing the frame load with an IRQ. the result + * is that the display shows the *previously* + * loaded frame. we can detect this by looking at + * the time between asserting DCONLOAD and the IRQ -- + * if it's less than 20msec, then the DCON couldn't + * have seen two VSYNC pulses. in that case we + * deassert and reassert, and hope for the best. + * see http://dev.laptop.org/ticket/9664 + */ + delta_t = ktime_sub(dcon->irq_time, dcon->load_time); + if (dcon->switched && ktime_to_ns(delta_t) + < NSEC_PER_MSEC * 20) { + pr_err("missed loading, retrying\n"); + pdata->set_dconload(1); + mdelay(41); + pdata->set_dconload(0); + dcon->load_time = ktime_get(); + mdelay(41); + } + } + + dcon_blank_fb(dcon, true); + pr_info("The DCON has control\n"); + break; + } + default: + BUG(); + } + + dcon->curr_src = source; +} + +static void dcon_set_source(struct dcon_priv *dcon, int arg) +{ + if (dcon->pending_src == arg) + return; + + dcon->pending_src = arg; + + if (dcon->curr_src != arg) + schedule_work(&dcon->switch_source); +} + +static void dcon_set_source_sync(struct dcon_priv *dcon, int arg) +{ + dcon_set_source(dcon, arg); + flush_scheduled_work(); +} + +static ssize_t dcon_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcon_priv *dcon = dev_get_drvdata(dev); + + return sprintf(buf, "%4.4X\n", dcon->disp_mode); +} + +static ssize_t dcon_sleep_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcon_priv *dcon = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", dcon->asleep); +} + +static ssize_t dcon_freeze_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcon_priv *dcon = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", dcon->curr_src == DCON_SOURCE_DCON ? 1 : 0); +} + +static ssize_t dcon_mono_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dcon_priv *dcon = dev_get_drvdata(dev); + + return sprintf(buf, "%d\n", dcon->mono); +} + +static ssize_t dcon_resumeline_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", resumeline); +} + +static ssize_t dcon_mono_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long enable_mono; + int rc; + + rc = kstrtoul(buf, 10, &enable_mono); + if (rc) + return rc; + + dcon_set_mono_mode(dev_get_drvdata(dev), enable_mono ? true : false); + + return count; +} + +static ssize_t dcon_freeze_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct dcon_priv *dcon = dev_get_drvdata(dev); + unsigned long output; + int ret; + + ret = kstrtoul(buf, 10, &output); + if (ret) + return ret; + + pr_info("dcon_freeze_store: %lu\n", output); + + switch (output) { + case 0: + dcon_set_source(dcon, DCON_SOURCE_CPU); + break; + case 1: + dcon_set_source_sync(dcon, DCON_SOURCE_DCON); + break; + case 2: /* normally unused */ + dcon_set_source(dcon, DCON_SOURCE_DCON); + break; + default: + return -EINVAL; + } + + return count; +} + +static ssize_t dcon_resumeline_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned short rl; + int rc; + + rc = kstrtou16(buf, 10, &rl); + if (rc) + return rc; + + resumeline = rl; + dcon_write(dev_get_drvdata(dev), DCON_REG_SCAN_INT, resumeline); + + return count; +} + +static ssize_t dcon_sleep_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long output; + int ret; + + ret = kstrtoul(buf, 10, &output); + if (ret) + return ret; + + dcon_sleep(dev_get_drvdata(dev), output ? true : false); + return count; +} + +static struct device_attribute dcon_device_files[] = { + __ATTR(mode, 0444, dcon_mode_show, NULL), + __ATTR(sleep, 0644, dcon_sleep_show, dcon_sleep_store), + __ATTR(freeze, 0644, dcon_freeze_show, dcon_freeze_store), + __ATTR(monochrome, 0644, dcon_mono_show, dcon_mono_store), + __ATTR(resumeline, 0644, dcon_resumeline_show, dcon_resumeline_store), +}; + +static int dcon_bl_update(struct backlight_device *dev) +{ + struct dcon_priv *dcon = bl_get_data(dev); + u8 level = dev->props.brightness & 0x0F; + + if (dev->props.power != FB_BLANK_UNBLANK) + level = 0; + + if (level != dcon->bl_val) + dcon_set_backlight(dcon, level); + + /* power down the DCON when the screen is blanked */ + if (!dcon->ignore_fb_events) + dcon_sleep(dcon, !!(dev->props.state & BL_CORE_FBBLANK)); + + return 0; +} + +static int dcon_bl_get(struct backlight_device *dev) +{ + struct dcon_priv *dcon = bl_get_data(dev); + + return dcon->bl_val; +} + +static const struct backlight_ops dcon_bl_ops = { + .update_status = dcon_bl_update, + .get_brightness = dcon_bl_get, +}; + +static struct backlight_properties dcon_bl_props = { + .max_brightness = 15, + .type = BACKLIGHT_RAW, + .power = FB_BLANK_UNBLANK, +}; + +static int dcon_reboot_notify(struct notifier_block *nb, + unsigned long foo, void *bar) +{ + struct dcon_priv *dcon = container_of(nb, struct dcon_priv, reboot_nb); + + if (!dcon || !dcon->client) + return NOTIFY_DONE; + + /* Turn off the DCON. Entirely. */ + dcon_write(dcon, DCON_REG_MODE, 0x39); + dcon_write(dcon, DCON_REG_MODE, 0x32); + return NOTIFY_DONE; +} + +static int unfreeze_on_panic(struct notifier_block *nb, + unsigned long e, void *p) +{ + pdata->set_dconload(1); + return NOTIFY_DONE; +} + +static struct notifier_block dcon_panic_nb = { + .notifier_call = unfreeze_on_panic, +}; + +static int dcon_detect(struct i2c_client *client, struct i2c_board_info *info) +{ + strlcpy(info->type, "olpc_dcon", I2C_NAME_SIZE); + + return 0; +} + +static int dcon_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct dcon_priv *dcon; + int rc, i, j; + + if (!pdata) + return -ENXIO; + + dcon = kzalloc(sizeof(*dcon), GFP_KERNEL); + if (!dcon) + return -ENOMEM; + + dcon->client = client; + init_waitqueue_head(&dcon->waitq); + INIT_WORK(&dcon->switch_source, dcon_source_switch); + dcon->reboot_nb.notifier_call = dcon_reboot_notify; + dcon->reboot_nb.priority = -1; + + i2c_set_clientdata(client, dcon); + + if (num_registered_fb < 1) { + dev_err(&client->dev, "DCON driver requires a registered fb\n"); + rc = -EIO; + goto einit; + } + dcon->fbinfo = registered_fb[0]; + + rc = dcon_hw_init(dcon, 1); + if (rc) + goto einit; + + /* Add the DCON device */ + + dcon_device = platform_device_alloc("dcon", -1); + + if (!dcon_device) { + pr_err("Unable to create the DCON device\n"); + rc = -ENOMEM; + goto eirq; + } + rc = platform_device_add(dcon_device); + platform_set_drvdata(dcon_device, dcon); + + if (rc) { + pr_err("Unable to add the DCON device\n"); + goto edev; + } + + for (i = 0; i < ARRAY_SIZE(dcon_device_files); i++) { + rc = device_create_file(&dcon_device->dev, + &dcon_device_files[i]); + if (rc) { + dev_err(&dcon_device->dev, "Cannot create sysfs file\n"); + goto ecreate; + } + } + + dcon->bl_val = dcon_read(dcon, DCON_REG_BRIGHT) & 0x0F; + + /* Add the backlight device for the DCON */ + dcon_bl_props.brightness = dcon->bl_val; + dcon->bl_dev = backlight_device_register("dcon-bl", &dcon_device->dev, + dcon, &dcon_bl_ops, &dcon_bl_props); + if (IS_ERR(dcon->bl_dev)) { + dev_err(&client->dev, "cannot register backlight dev (%ld)\n", + PTR_ERR(dcon->bl_dev)); + dcon->bl_dev = NULL; + } + + register_reboot_notifier(&dcon->reboot_nb); + atomic_notifier_chain_register(&panic_notifier_list, &dcon_panic_nb); + + return 0; + + ecreate: + for (j = 0; j < i; j++) + device_remove_file(&dcon_device->dev, &dcon_device_files[j]); + edev: + platform_device_unregister(dcon_device); + dcon_device = NULL; + eirq: + free_irq(DCON_IRQ, dcon); + einit: + kfree(dcon); + return rc; +} + +static int dcon_remove(struct i2c_client *client) +{ + struct dcon_priv *dcon = i2c_get_clientdata(client); + + unregister_reboot_notifier(&dcon->reboot_nb); + atomic_notifier_chain_unregister(&panic_notifier_list, &dcon_panic_nb); + + free_irq(DCON_IRQ, dcon); + + backlight_device_unregister(dcon->bl_dev); + + if (dcon_device) + platform_device_unregister(dcon_device); + cancel_work_sync(&dcon->switch_source); + + kfree(dcon); + + return 0; +} + +#ifdef CONFIG_PM +static int dcon_suspend(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct dcon_priv *dcon = i2c_get_clientdata(client); + + if (!dcon->asleep) { + /* Set up the DCON to have the source */ + dcon_set_source_sync(dcon, DCON_SOURCE_DCON); + } + + return 0; +} + +static int dcon_resume(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct dcon_priv *dcon = i2c_get_clientdata(client); + + if (!dcon->asleep) { + dcon_bus_stabilize(dcon, 0); + dcon_set_source(dcon, DCON_SOURCE_CPU); + } + + return 0; +} + +#else + +#define dcon_suspend NULL +#define dcon_resume NULL + +#endif /* CONFIG_PM */ + +irqreturn_t dcon_interrupt(int irq, void *id) +{ + struct dcon_priv *dcon = id; + u8 status; + + if (pdata->read_status(&status)) + return IRQ_NONE; + + switch (status & 3) { + case 3: + pr_debug("DCONLOAD_MISSED interrupt\n"); + break; + + case 2: /* switch to DCON mode */ + case 1: /* switch to CPU mode */ + dcon->switched = true; + dcon->irq_time = ktime_get(); + wake_up(&dcon->waitq); + break; + + case 0: + /* workaround resume case: the DCON (on 1.5) doesn't + * ever assert status 0x01 when switching to CPU mode + * during resume. this is because DCONLOAD is de-asserted + * _immediately_ upon exiting S3, so the actual release + * of the DCON happened long before this point. + * see http://dev.laptop.org/ticket/9869 + */ + if (dcon->curr_src != dcon->pending_src && !dcon->switched) { + dcon->switched = true; + dcon->irq_time = ktime_get(); + wake_up(&dcon->waitq); + pr_debug("switching w/ status 0/0\n"); + } else { + pr_debug("scanline interrupt w/CPU\n"); + } + } + + return IRQ_HANDLED; +} + +static const struct dev_pm_ops dcon_pm_ops = { + .suspend = dcon_suspend, + .resume = dcon_resume, +}; + +static const struct i2c_device_id dcon_idtable[] = { + { "olpc_dcon", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, dcon_idtable); + +static struct i2c_driver dcon_driver = { + .driver = { + .name = "olpc_dcon", + .pm = &dcon_pm_ops, + }, + .class = I2C_CLASS_DDC | I2C_CLASS_HWMON, + .id_table = dcon_idtable, + .probe = dcon_probe, + .remove = dcon_remove, + .detect = dcon_detect, + .address_list = normal_i2c, +}; + +static int __init olpc_dcon_init(void) +{ +#ifdef CONFIG_FB_OLPC_DCON_1_5 + /* XO-1.5 */ + if (olpc_board_at_least(olpc_board(0xd0))) + pdata = &dcon_pdata_xo_1_5; +#endif +#ifdef CONFIG_FB_OLPC_DCON_1 + if (!pdata) + pdata = &dcon_pdata_xo_1; +#endif + + return i2c_add_driver(&dcon_driver); +} + +static void __exit olpc_dcon_exit(void) +{ + i2c_del_driver(&dcon_driver); +} + +module_init(olpc_dcon_init); +module_exit(olpc_dcon_exit); + +MODULE_LICENSE("GPL"); diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h new file mode 100644 index 000000000000..215e7ec4dea2 --- /dev/null +++ b/drivers/staging/olpc_dcon/olpc_dcon.h @@ -0,0 +1,111 @@ +#ifndef OLPC_DCON_H_ +#define OLPC_DCON_H_ + +#include <linux/notifier.h> +#include <linux/workqueue.h> + +/* DCON registers */ + +#define DCON_REG_ID 0 +#define DCON_REG_MODE 1 + +#define MODE_PASSTHRU (1<<0) +#define MODE_SLEEP (1<<1) +#define MODE_SLEEP_AUTO (1<<2) +#define MODE_BL_ENABLE (1<<3) +#define MODE_BLANK (1<<4) +#define MODE_CSWIZZLE (1<<5) +#define MODE_COL_AA (1<<6) +#define MODE_MONO_LUMA (1<<7) +#define MODE_SCAN_INT (1<<8) +#define MODE_CLOCKDIV (1<<9) +#define MODE_DEBUG (1<<14) +#define MODE_SELFTEST (1<<15) + +#define DCON_REG_HRES 0x2 +#define DCON_REG_HTOTAL 0x3 +#define DCON_REG_HSYNC_WIDTH 0x4 +#define DCON_REG_VRES 0x5 +#define DCON_REG_VTOTAL 0x6 +#define DCON_REG_VSYNC_WIDTH 0x7 +#define DCON_REG_TIMEOUT 0x8 +#define DCON_REG_SCAN_INT 0x9 +#define DCON_REG_BRIGHT 0xa +#define DCON_REG_MEM_OPT_A 0x41 +#define DCON_REG_MEM_OPT_B 0x42 + +/* Load Delay Locked Loop (DLL) settings for clock delay */ +#define MEM_DLL_CLOCK_DELAY (1<<0) +/* Memory controller power down function */ +#define MEM_POWER_DOWN (1<<8) +/* Memory controller software reset */ +#define MEM_SOFT_RESET (1<<0) + +/* Status values */ + +#define DCONSTAT_SCANINT 0 +#define DCONSTAT_SCANINT_DCON 1 +#define DCONSTAT_DISPLAYLOAD 2 +#define DCONSTAT_MISSED 3 + +/* Source values */ + +#define DCON_SOURCE_DCON 0 +#define DCON_SOURCE_CPU 1 + +/* Interrupt */ +#define DCON_IRQ 6 + +struct dcon_priv { + struct i2c_client *client; + struct fb_info *fbinfo; + struct backlight_device *bl_dev; + + wait_queue_head_t waitq; + struct work_struct switch_source; + struct notifier_block reboot_nb; + + /* Shadow register for the DCON_REG_MODE register */ + u8 disp_mode; + + /* The current backlight value - this saves us some smbus traffic */ + u8 bl_val; + + /* Current source, initialized at probe time */ + int curr_src; + + /* Desired source */ + int pending_src; + + /* Variables used during switches */ + bool switched; + ktime_t irq_time; + ktime_t load_time; + + /* Current output type; true == mono, false == color */ + bool mono; + bool asleep; + /* This get set while controlling fb blank state from the driver */ + bool ignore_fb_events; +}; + +struct dcon_platform_data { + int (*init)(struct dcon_priv *); + void (*bus_stabilize_wiggle)(void); + void (*set_dconload)(int); + int (*read_status)(u8 *); +}; + +#include <linux/interrupt.h> + +irqreturn_t dcon_interrupt(int irq, void *id); + +#ifdef CONFIG_FB_OLPC_DCON_1 +extern struct dcon_platform_data dcon_pdata_xo_1; +#endif + +#ifdef CONFIG_FB_OLPC_DCON_1_5 +extern struct dcon_platform_data dcon_pdata_xo_1_5; +#endif + +#endif diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c new file mode 100644 index 000000000000..0c5a10c69401 --- /dev/null +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c @@ -0,0 +1,205 @@ +/* + * Mainly by David Woodhouse, somewhat modified by Jordan Crouse + * + * Copyright © 2006-2007 Red Hat, Inc. + * Copyright © 2006-2007 Advanced Micro Devices, Inc. + * Copyright © 2009 VIA Technology, Inc. + * Copyright (c) 2010 Andres Salomon <dilinger@queued.net> + * + * This program is free software. You can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/cs5535.h> +#include <linux/gpio.h> +#include <linux/delay.h> +#include <asm/olpc.h> + +#include "olpc_dcon.h" + +static int dcon_init_xo_1(struct dcon_priv *dcon) +{ + unsigned char lob; + + if (gpio_request(OLPC_GPIO_DCON_STAT0, "OLPC-DCON")) { + pr_err("failed to request STAT0 GPIO\n"); + return -EIO; + } + if (gpio_request(OLPC_GPIO_DCON_STAT1, "OLPC-DCON")) { + pr_err("failed to request STAT1 GPIO\n"); + goto err_gp_stat1; + } + if (gpio_request(OLPC_GPIO_DCON_IRQ, "OLPC-DCON")) { + pr_err("failed to request IRQ GPIO\n"); + goto err_gp_irq; + } + if (gpio_request(OLPC_GPIO_DCON_LOAD, "OLPC-DCON")) { + pr_err("failed to request LOAD GPIO\n"); + goto err_gp_load; + } + if (gpio_request(OLPC_GPIO_DCON_BLANK, "OLPC-DCON")) { + pr_err("failed to request BLANK GPIO\n"); + goto err_gp_blank; + } + + /* Turn off the event enable for GPIO7 just to be safe */ + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); + + /* + * Determine the current state by reading the GPIO bit; earlier + * stages of the boot process have established the state. + * + * Note that we read GPIO_OUTPUT_VAL rather than GPIO_READ_BACK here; + * this is because OFW will disable input for the pin and set a value.. + * READ_BACK will only contain a valid value if input is enabled and + * then a value is set. So, future readings of the pin can use + * READ_BACK, but the first one cannot. Awesome, huh? + */ + dcon->curr_src = cs5535_gpio_isset(OLPC_GPIO_DCON_LOAD, GPIO_OUTPUT_VAL) + ? DCON_SOURCE_CPU + : DCON_SOURCE_DCON; + dcon->pending_src = dcon->curr_src; + + /* Set the directions for the GPIO pins */ + gpio_direction_input(OLPC_GPIO_DCON_STAT0); + gpio_direction_input(OLPC_GPIO_DCON_STAT1); + gpio_direction_input(OLPC_GPIO_DCON_IRQ); + gpio_direction_input(OLPC_GPIO_DCON_BLANK); + gpio_direction_output(OLPC_GPIO_DCON_LOAD, + dcon->curr_src == DCON_SOURCE_CPU); + + /* Set up the interrupt mappings */ + + /* Set the IRQ to pair 2 */ + cs5535_gpio_setup_event(OLPC_GPIO_DCON_IRQ, 2, 0); + + /* Enable group 2 to trigger the DCON interrupt */ + cs5535_gpio_set_irq(2, DCON_IRQ); + + /* Select edge level for interrupt (in PIC) */ + lob = inb(0x4d0); + lob &= ~(1 << DCON_IRQ); + outb(lob, 0x4d0); + + /* Register the interrupt handler */ + if (request_irq(DCON_IRQ, &dcon_interrupt, 0, "DCON", dcon)) { + pr_err("failed to request DCON's irq\n"); + goto err_req_irq; + } + + /* Clear INV_EN for GPIO7 (DCONIRQ) */ + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_INVERT); + + /* Enable filter for GPIO12 (DCONBLANK) */ + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_FILTER); + + /* Disable filter for GPIO7 */ + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_FILTER); + + /* Disable event counter for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ + cs5535_gpio_clear(OLPC_GPIO_DCON_IRQ, GPIO_INPUT_EVENT_COUNT); + cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_INPUT_EVENT_COUNT); + + /* Add GPIO12 to the Filter Event Pair #7 */ + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_FE7_SEL); + + /* Turn off negative Edge Enable for GPIO12 */ + cs5535_gpio_clear(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_EN); + + /* Enable negative Edge Enable for GPIO7 */ + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_EN); + + /* Zero the filter amount for Filter Event Pair #7 */ + cs5535_gpio_set(0, GPIO_FLTR7_AMOUNT); + + /* Clear the negative edge status for GPIO7 and GPIO12 */ + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_NEGATIVE_EDGE_STS); + + /* FIXME: Clear the positive status as well, just to be sure */ + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_POSITIVE_EDGE_STS); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_POSITIVE_EDGE_STS); + + /* Enable events for GPIO7 (DCONIRQ) and GPIO12 (DCONBLANK) */ + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_EVENTS_ENABLE); + cs5535_gpio_set(OLPC_GPIO_DCON_BLANK, GPIO_EVENTS_ENABLE); + + return 0; + +err_req_irq: + gpio_free(OLPC_GPIO_DCON_BLANK); +err_gp_blank: + gpio_free(OLPC_GPIO_DCON_LOAD); +err_gp_load: + gpio_free(OLPC_GPIO_DCON_IRQ); +err_gp_irq: + gpio_free(OLPC_GPIO_DCON_STAT1); +err_gp_stat1: + gpio_free(OLPC_GPIO_DCON_STAT0); + return -EIO; +} + +static void dcon_wiggle_xo_1(void) +{ + int x; + + /* + * According to HiMax, when powering the DCON up we should hold + * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON + * state machine to reset to a (sane) initial state. Mitch Bradley + * did some testing and discovered that holding for 16 SMB_CLK cycles + * worked a lot more reliably, so that's what we do here. + * + * According to the cs5536 spec, to set GPIO14 to SMB_CLK we must + * simultaneously set AUX1 IN/OUT to GPIO14; ditto for SMB_DATA and + * GPIO15. + */ + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_VAL); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_ENABLE); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_ENABLE); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); + cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX2); + cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX2); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); + cs5535_gpio_clear(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); + + for (x = 0; x < 16; x++) { + udelay(5); + cs5535_gpio_clear(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); + udelay(5); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_VAL); + } + udelay(5); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_OUTPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_OUTPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_CLK, GPIO_INPUT_AUX1); + cs5535_gpio_set(OLPC_GPIO_SMB_DATA, GPIO_INPUT_AUX1); +} + +static void dcon_set_dconload_1(int val) +{ + gpio_set_value(OLPC_GPIO_DCON_LOAD, val); +} + +static int dcon_read_status_xo_1(u8 *status) +{ + *status = gpio_get_value(OLPC_GPIO_DCON_STAT0); + *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1; + + /* Clear the negative edge status for GPIO7 */ + cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS); + + return 0; +} + +struct dcon_platform_data dcon_pdata_xo_1 = { + .init = dcon_init_xo_1, + .bus_stabilize_wiggle = dcon_wiggle_xo_1, + .set_dconload = dcon_set_dconload_1, + .read_status = dcon_read_status_xo_1, +}; diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c new file mode 100644 index 000000000000..6a4d379c16a3 --- /dev/null +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2009,2010 One Laptop per Child + * + * This program is free software. You can redistribute it and/or + * modify it under the terms of version 2 of the GNU General Public + * License as published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/delay.h> +#include <linux/gpio.h> +#include <asm/olpc.h> + +/* TODO: this eventually belongs in linux/vx855.h */ +#define NR_VX855_GPI 14 +#define NR_VX855_GPO 13 +#define NR_VX855_GPIO 15 + +#define VX855_GPI(n) (n) +#define VX855_GPO(n) (NR_VX855_GPI + (n)) +#define VX855_GPIO(n) (NR_VX855_GPI + NR_VX855_GPO + (n)) + +#include "olpc_dcon.h" + +/* Hardware setup on the XO 1.5: + * DCONLOAD connects to VX855_GPIO1 (not SMBCK2) + * DCONBLANK connects to VX855_GPIO8 (not SSPICLK) unused in driver + * DCONSTAT0 connects to VX855_GPI10 (not SSPISDI) + * DCONSTAT1 connects to VX855_GPI11 (not nSSPISS) + * DCONIRQ connects to VX855_GPIO12 + * DCONSMBDATA connects to VX855 graphics CRTSPD + * DCONSMBCLK connects to VX855 graphics CRTSPCLK + */ + +#define VX855_GENL_PURPOSE_OUTPUT 0x44c /* PMIO_Rx4c-4f */ +#define VX855_GPI_STATUS_CHG 0x450 /* PMIO_Rx50 */ +#define VX855_GPI_SCI_SMI 0x452 /* PMIO_Rx52 */ +#define BIT_GPIO12 0x40 + +#define PREFIX "OLPC DCON:" + +static void dcon_clear_irq(void) +{ + /* irq status will appear in PMIO_Rx50[6] (RW1C) on gpio12 */ + outb(BIT_GPIO12, VX855_GPI_STATUS_CHG); +} + +static int dcon_was_irq(void) +{ + u_int8_t tmp; + + /* irq status will appear in PMIO_Rx50[6] on gpio12 */ + tmp = inb(VX855_GPI_STATUS_CHG); + return !!(tmp & BIT_GPIO12); + + return 0; +} + +static int dcon_init_xo_1_5(struct dcon_priv *dcon) +{ + unsigned int irq; + + dcon_clear_irq(); + + /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */ + outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI); + + /* Determine the current state of DCONLOAD, likely set by firmware */ + /* GPIO1 */ + dcon->curr_src = (inl(VX855_GENL_PURPOSE_OUTPUT) & 0x1000) ? + DCON_SOURCE_CPU : DCON_SOURCE_DCON; + dcon->pending_src = dcon->curr_src; + + /* we're sharing the IRQ with ACPI */ + irq = acpi_gbl_FADT.sci_interrupt; + if (request_irq(irq, &dcon_interrupt, IRQF_SHARED, "DCON", dcon)) { + pr_err("DCON (IRQ%d) allocation failed\n", irq); + return 1; + } + + return 0; +} + +static void set_i2c_line(int sda, int scl) +{ + unsigned char tmp; + unsigned int port = 0x26; + + /* FIXME: This directly accesses the CRT GPIO controller !!! */ + outb(port, 0x3c4); + tmp = inb(0x3c5); + + if (scl) + tmp |= 0x20; + else + tmp &= ~0x20; + + if (sda) + tmp |= 0x10; + else + tmp &= ~0x10; + + tmp |= 0x01; + + outb(port, 0x3c4); + outb(tmp, 0x3c5); +} + + +static void dcon_wiggle_xo_1_5(void) +{ + int x; + + /* + * According to HiMax, when powering the DCON up we should hold + * SMB_DATA high for 8 SMB_CLK cycles. This will force the DCON + * state machine to reset to a (sane) initial state. Mitch Bradley + * did some testing and discovered that holding for 16 SMB_CLK cycles + * worked a lot more reliably, so that's what we do here. + */ + set_i2c_line(1, 1); + + for (x = 0; x < 16; x++) { + udelay(5); + set_i2c_line(1, 0); + udelay(5); + set_i2c_line(1, 1); + } + udelay(5); + + /* set PMIO_Rx52[6] to enable SCI/SMI on gpio12 */ + outb(inb(VX855_GPI_SCI_SMI)|BIT_GPIO12, VX855_GPI_SCI_SMI); +} + +static void dcon_set_dconload_xo_1_5(int val) +{ + gpio_set_value(VX855_GPIO(1), val); +} + +static int dcon_read_status_xo_1_5(u8 *status) +{ + if (!dcon_was_irq()) + return -1; + + /* i believe this is the same as "inb(0x44b) & 3" */ + *status = gpio_get_value(VX855_GPI(10)); + *status |= gpio_get_value(VX855_GPI(11)) << 1; + + dcon_clear_irq(); + + return 0; +} + +struct dcon_platform_data dcon_pdata_xo_1_5 = { + .init = dcon_init_xo_1_5, + .bus_stabilize_wiggle = dcon_wiggle_xo_1_5, + .set_dconload = dcon_set_dconload_xo_1_5, + .read_status = dcon_read_status_xo_1_5, +}; diff --git a/drivers/staging/rdma/hfi1/Kconfig b/drivers/staging/rdma/hfi1/Kconfig index 3e668d852f03..a925fb0db706 100644 --- a/drivers/staging/rdma/hfi1/Kconfig +++ b/drivers/staging/rdma/hfi1/Kconfig @@ -2,6 +2,7 @@ config INFINIBAND_HFI1 tristate "Intel OPA Gen1 support" depends on X86_64 && INFINIBAND_RDMAVT select MMU_NOTIFIER + select CRC32 default m ---help--- This is a low-level driver for Intel OPA Gen1 adapter. diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 8d26ed79bb4c..9b04d72e752e 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2049,14 +2049,13 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode, if (tty) { mutex_unlock(&tty_mutex); retval = tty_lock_interruptible(tty); + tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */ if (retval) { if (retval == -EINTR) retval = -ERESTARTSYS; tty = ERR_PTR(retval); goto out; } - /* safe to drop the kref from tty_driver_lookup_tty() */ - tty_kref_put(tty); retval = tty_reopen(tty); if (retval < 0) { tty_unlock(tty); @@ -2158,7 +2157,7 @@ retry_open: read_lock(&tasklist_lock); spin_lock_irq(¤t->sighand->siglock); noctty = (filp->f_flags & O_NOCTTY) || - device == MKDEV(TTY_MAJOR, 0) || + (IS_ENABLED(CONFIG_VT) && device == MKDEV(TTY_MAJOR, 0)) || device == MKDEV(TTYAUX_MAJOR, 1) || (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 5eb1a87228b4..31ccdccd7a04 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -75,8 +75,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, * be the first thing immediately following the endpoint descriptor. */ desc = (struct usb_ss_ep_comp_descriptor *) buffer; - buffer += desc->bLength; - size -= desc->bLength; if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || size < USB_DT_SS_EP_COMP_SIZE) { @@ -100,7 +98,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ep->desc.wMaxPacketSize; return; } - + buffer += desc->bLength; + size -= desc->bLength; memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); /* Check the various values */ @@ -146,12 +145,6 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, ep->ss_ep_comp.bmAttributes = 2; } - /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */ - if (usb_endpoint_xfer_isoc(&ep->desc) && - USB_SS_SSP_ISOC_COMP(desc->bmAttributes)) - usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum, - ep, buffer, size); - if (usb_endpoint_xfer_isoc(&ep->desc)) max_tx = (desc->bMaxBurst + 1) * (USB_SS_MULT(desc->bmAttributes)) * @@ -171,6 +164,11 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, max_tx); ep->ss_ep_comp.wBytesPerInterval = cpu_to_le16(max_tx); } + /* Parse a possible SuperSpeedPlus isoc ep companion descriptor */ + if (usb_endpoint_xfer_isoc(&ep->desc) && + USB_SS_SSP_ISOC_COMP(desc->bmAttributes)) + usb_parse_ssp_isoc_endpoint_companion(ddev, cfgno, inum, asnum, + ep, buffer, size); } static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index e9940dd004e4..818f158232bb 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2254,6 +2254,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, { u32 intmsk; u32 val; + u32 usbcfg; /* Kill any ep0 requests as controller will be reinitialized */ kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET); @@ -2267,10 +2268,16 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, * set configuration. */ + /* keep other bits untouched (so e.g. forced modes are not lost) */ + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | + GUSBCFG_HNPCAP); + /* set the PLL on, remove the HNP/SRP and set the PHY */ val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; - dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | - (val << GUSBCFG_USBTRDTIM_SHIFT), hsotg->regs + GUSBCFG); + usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | + (val << GUSBCFG_USBTRDTIM_SHIFT); + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); dwc2_hsotg_init_fifo(hsotg); @@ -3031,6 +3038,7 @@ static struct usb_ep_ops dwc2_hsotg_ep_ops = { static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) { u32 trdtim; + u32 usbcfg; /* unmask subset of endpoint interrupts */ dwc2_writel(DIEPMSK_TIMEOUTMSK | DIEPMSK_AHBERRMSK | @@ -3054,11 +3062,16 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg) dwc2_hsotg_init_fifo(hsotg); + /* keep other bits untouched (so e.g. forced modes are not lost) */ + usbcfg = dwc2_readl(hsotg->regs + GUSBCFG); + usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP | + GUSBCFG_HNPCAP); + /* set the PLL on, remove the HNP/SRP and set the PHY */ trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5; - dwc2_writel(hsotg->phyif | GUSBCFG_TOUTCAL(7) | - (trdtim << GUSBCFG_USBTRDTIM_SHIFT), - hsotg->regs + GUSBCFG); + usbcfg |= hsotg->phyif | GUSBCFG_TOUTCAL(7) | + (trdtim << GUSBCFG_USBTRDTIM_SHIFT); + dwc2_writel(usbcfg, hsotg->regs + GUSBCFG); if (using_dma(hsotg)) __orr32(hsotg->regs + GAHBCFG, GAHBCFG_DMA_EN); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 17fd81447c9f..fa20f5a99d12 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -67,23 +67,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode) static int dwc3_core_soft_reset(struct dwc3 *dwc) { u32 reg; + int retries = 1000; int ret; - /* Before Resetting PHY, put Core in Reset */ - reg = dwc3_readl(dwc->regs, DWC3_GCTL); - reg |= DWC3_GCTL_CORESOFTRESET; - dwc3_writel(dwc->regs, DWC3_GCTL, reg); - - /* Assert USB3 PHY reset */ - reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); - reg |= DWC3_GUSB3PIPECTL_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); - - /* Assert USB2 PHY reset */ - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - reg |= DWC3_GUSB2PHYCFG_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); - usb_phy_init(dwc->usb2_phy); usb_phy_init(dwc->usb3_phy); ret = phy_init(dwc->usb2_generic_phy); @@ -95,26 +81,28 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc) phy_exit(dwc->usb2_generic_phy); return ret; } - mdelay(100); - /* Clear USB3 PHY reset */ - reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)); - reg &= ~DWC3_GUSB3PIPECTL_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg); + /* + * We're resetting only the device side because, if we're in host mode, + * XHCI driver will reset the host block. If dwc3 was configured for + * host-only mode, then we can return early. + */ + if (dwc->dr_mode == USB_DR_MODE_HOST) + return 0; - /* Clear USB2 PHY reset */ - reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); - reg &= ~DWC3_GUSB2PHYCFG_PHYSOFTRST; - dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg); + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + reg |= DWC3_DCTL_CSFTRST; + dwc3_writel(dwc->regs, DWC3_DCTL, reg); - mdelay(100); + do { + reg = dwc3_readl(dwc->regs, DWC3_DCTL); + if (!(reg & DWC3_DCTL_CSFTRST)) + return 0; - /* After PHYs are stable we can take Core out of reset state */ - reg = dwc3_readl(dwc->regs, DWC3_GCTL); - reg &= ~DWC3_GCTL_CORESOFTRESET; - dwc3_writel(dwc->regs, DWC3_GCTL, reg); + udelay(1); + } while (--retries); - return 0; + return -ETIMEDOUT; } /** diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c index 2be268d2423d..72664700b8a2 100644 --- a/drivers/usb/dwc3/dwc3-keystone.c +++ b/drivers/usb/dwc3/dwc3-keystone.c @@ -39,8 +39,6 @@ #define USBSS_IRQ_COREIRQ_EN BIT(0) #define USBSS_IRQ_COREIRQ_CLR BIT(0) -static u64 kdwc3_dma_mask; - struct dwc3_keystone { struct device *dev; struct clk *clk; @@ -108,9 +106,6 @@ static int kdwc3_probe(struct platform_device *pdev) if (IS_ERR(kdwc->usbss)) return PTR_ERR(kdwc->usbss); - kdwc3_dma_mask = dma_get_mask(dev); - dev->dma_mask = &kdwc3_dma_mask; - kdwc->clk = devm_clk_get(kdwc->dev, "usb"); error = clk_prepare_enable(kdwc->clk); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 009d83048c8c..adc1e8a624cb 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -35,6 +35,7 @@ #define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 #define PCI_DEVICE_ID_INTEL_SPTH 0xa130 #define PCI_DEVICE_ID_INTEL_BXT 0x0aaa +#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa #define PCI_DEVICE_ID_INTEL_APL 0x5aaa static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; @@ -213,6 +214,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 3ac170f9d94d..d54a028cdfeb 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -568,7 +568,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, dwc3_writel(dwc->regs, DWC3_DALEPENA, reg); if (!usb_endpoint_xfer_isoc(desc)) - return 0; + goto out; /* Link TRB for ISOC. The HWO bit is never reset */ trb_st_hw = &dep->trb_pool[0]; @@ -582,9 +582,10 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, trb_link->ctrl |= DWC3_TRB_CTRL_HWO; } +out: switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_CONTROL: - strlcat(dep->name, "-control", sizeof(dep->name)); + /* don't change name */ break; case USB_ENDPOINT_XFER_ISOC: strlcat(dep->name, "-isoc", sizeof(dep->name)); @@ -2487,7 +2488,11 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) * implemented. */ - dwc->gadget_driver->resume(&dwc->gadget); + if (dwc->gadget_driver && dwc->gadget_driver->resume) { + spin_unlock(&dwc->lock); + dwc->gadget_driver->resume(&dwc->gadget); + spin_lock(&dwc->lock); + } } static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc, diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index a5c62093c26c..de9ffd60fcfa 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -656,7 +656,8 @@ static int bos_desc(struct usb_composite_dev *cdev) ssp_cap->bmAttributes = cpu_to_le32(1); /* Min RX/TX Lane Count = 1 */ - ssp_cap->wFunctionalitySupport = (1 << 8) | (1 << 12); + ssp_cap->wFunctionalitySupport = + cpu_to_le16((1 << 8) | (1 << 12)); /* * bmSublinkSpeedAttr[0]: @@ -666,7 +667,7 @@ static int bos_desc(struct usb_composite_dev *cdev) * LSM = 10 (10 Gbps) */ ssp_cap->bmSublinkSpeedAttr[0] = - (3 << 4) | (1 << 14) | (0xa << 16); + cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16)); /* * bmSublinkSpeedAttr[1] = * ST = Symmetric, TX @@ -675,7 +676,8 @@ static int bos_desc(struct usb_composite_dev *cdev) * LSM = 10 (10 Gbps) */ ssp_cap->bmSublinkSpeedAttr[1] = - (3 << 4) | (1 << 14) | (0xa << 16) | (1 << 7); + cpu_to_le32((3 << 4) | (1 << 14) | + (0xa << 16) | (1 << 7)); } return le16_to_cpu(bos->wTotalLength); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 8cfce105c7ee..e21ca2bd6839 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1147,8 +1147,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent) ffs->sb = sb; data->ffs_data = NULL; sb->s_fs_info = ffs; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = FUNCTIONFS_MAGIC; sb->s_op = &ffs_sb_operations; sb->s_time_gran = 1; diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 84c0ee5ebd1e..58fc199a18ec 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/device.h> #include <linux/kfifo.h> +#include <linux/spinlock.h> #include <sound/core.h> #include <sound/initval.h> @@ -89,6 +90,7 @@ struct f_midi { unsigned int buflen, qlen; /* This fifo is used as a buffer ring for pre-allocated IN usb_requests */ DECLARE_KFIFO_PTR(in_req_fifo, struct usb_request *); + spinlock_t transmit_lock; unsigned int in_last_port; struct gmidi_in_port in_ports_array[/* in_ports */]; @@ -358,7 +360,9 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt) /* allocate a bunch of read buffers and queue them all at once. */ for (i = 0; i < midi->qlen && err == 0; i++) { struct usb_request *req = - midi_alloc_ep_req(midi->out_ep, midi->buflen); + midi_alloc_ep_req(midi->out_ep, + max_t(unsigned, midi->buflen, + bulk_out_desc.wMaxPacketSize)); if (req == NULL) return -ENOMEM; @@ -597,17 +601,24 @@ static void f_midi_transmit(struct f_midi *midi) { struct usb_ep *ep = midi->in_ep; int ret; + unsigned long flags; /* We only care about USB requests if IN endpoint is enabled */ if (!ep || !ep->enabled) goto drop_out; + spin_lock_irqsave(&midi->transmit_lock, flags); + do { ret = f_midi_do_transmit(midi, ep); - if (ret < 0) + if (ret < 0) { + spin_unlock_irqrestore(&midi->transmit_lock, flags); goto drop_out; + } } while (ret); + spin_unlock_irqrestore(&midi->transmit_lock, flags); + return; drop_out: @@ -1201,6 +1212,8 @@ static struct usb_function *f_midi_alloc(struct usb_function_instance *fi) if (status) goto setup_fail; + spin_lock_init(&midi->transmit_lock); + ++opts->refcnt; mutex_unlock(&opts->lock); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index 5cdaf0150a4e..e64479f882a5 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -1954,8 +1954,8 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) return -ENODEV; /* superblock */ - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = GADGETFS_MAGIC; sb->s_op = &gadget_fs_operations; sb->s_time_gran = 1; diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index 81d42cce885a..18569de06b04 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1045,20 +1045,6 @@ static void reset_all_endpoints(struct usba_udc *udc) list_del_init(&req->queue); request_complete(ep, req, -ECONNRESET); } - - /* NOTE: normally, the next call to the gadget driver is in - * charge of disabling endpoints... usually disconnect(). - * The exception would be entering a high speed test mode. - * - * FIXME remove this code ... and retest thoroughly. - */ - list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) { - if (ep->ep.desc) { - spin_unlock(&udc->lock); - usba_ep_disable(&ep->ep); - spin_lock(&udc->lock); - } - } } static struct usba_ep *get_ep_by_addr(struct usba_udc *udc, u16 wIndex) diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index 4151597e9d28..e4e70e11d0f6 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c @@ -371,12 +371,6 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, INIT_WORK(&gadget->work, usb_gadget_state_work); gadget->dev.parent = parent; -#ifdef CONFIG_HAS_DMA - dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask); - gadget->dev.dma_parms = parent->dma_parms; - gadget->dev.dma_mask = parent->dma_mask; -#endif - if (release) gadget->dev.release = release; else diff --git a/drivers/usb/phy/phy-qcom-8x16-usb.c b/drivers/usb/phy/phy-qcom-8x16-usb.c index 579587d97217..3d7af85aecb9 100644 --- a/drivers/usb/phy/phy-qcom-8x16-usb.c +++ b/drivers/usb/phy/phy-qcom-8x16-usb.c @@ -65,9 +65,7 @@ struct phy_8x16 { void __iomem *regs; struct clk *core_clk; struct clk *iface_clk; - struct regulator *v3p3; - struct regulator *v1p8; - struct regulator *vdd; + struct regulator_bulk_data regulator[3]; struct reset_control *phy_reset; @@ -78,51 +76,6 @@ struct phy_8x16 { struct notifier_block reboot_notify; }; -static int phy_8x16_regulators_enable(struct phy_8x16 *qphy) -{ - int ret; - - ret = regulator_set_voltage(qphy->vdd, HSPHY_VDD_MIN, HSPHY_VDD_MAX); - if (ret) - return ret; - - ret = regulator_enable(qphy->vdd); - if (ret) - return ret; - - ret = regulator_set_voltage(qphy->v3p3, HSPHY_3P3_MIN, HSPHY_3P3_MAX); - if (ret) - goto off_vdd; - - ret = regulator_enable(qphy->v3p3); - if (ret) - goto off_vdd; - - ret = regulator_set_voltage(qphy->v1p8, HSPHY_1P8_MIN, HSPHY_1P8_MAX); - if (ret) - goto off_3p3; - - ret = regulator_enable(qphy->v1p8); - if (ret) - goto off_3p3; - - return 0; - -off_3p3: - regulator_disable(qphy->v3p3); -off_vdd: - regulator_disable(qphy->vdd); - - return ret; -} - -static void phy_8x16_regulators_disable(struct phy_8x16 *qphy) -{ - regulator_disable(qphy->v1p8); - regulator_disable(qphy->v3p3); - regulator_disable(qphy->vdd); -} - static int phy_8x16_notify_connect(struct usb_phy *phy, enum usb_device_speed speed) { @@ -261,7 +214,6 @@ static void phy_8x16_shutdown(struct usb_phy *phy) static int phy_8x16_read_devicetree(struct phy_8x16 *qphy) { - struct regulator_bulk_data regs[3]; struct device *dev = qphy->phy.dev; int ret; @@ -273,18 +225,15 @@ static int phy_8x16_read_devicetree(struct phy_8x16 *qphy) if (IS_ERR(qphy->iface_clk)) return PTR_ERR(qphy->iface_clk); - regs[0].supply = "v3p3"; - regs[1].supply = "v1p8"; - regs[2].supply = "vddcx"; + qphy->regulator[0].supply = "v3p3"; + qphy->regulator[1].supply = "v1p8"; + qphy->regulator[2].supply = "vddcx"; - ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(regs), regs); + ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(qphy->regulator), + qphy->regulator); if (ret) return ret; - qphy->v3p3 = regs[0].consumer; - qphy->v1p8 = regs[1].consumer; - qphy->vdd = regs[2].consumer; - qphy->phy_reset = devm_reset_control_get(dev, "phy"); if (IS_ERR(qphy->phy_reset)) return PTR_ERR(qphy->phy_reset); @@ -364,8 +313,9 @@ static int phy_8x16_probe(struct platform_device *pdev) if (ret < 0) goto off_core; - ret = phy_8x16_regulators_enable(qphy); - if (0 && ret) + ret = regulator_bulk_enable(ARRAY_SIZE(qphy->regulator), + qphy->regulator); + if (WARN_ON(ret)) goto off_clks; qphy->vbus_notify.notifier_call = phy_8x16_vbus_notify; @@ -387,7 +337,7 @@ off_extcon: extcon_unregister_notifier(qphy->vbus_edev, EXTCON_USB, &qphy->vbus_notify); off_power: - phy_8x16_regulators_disable(qphy); + regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator); off_clks: clk_disable_unprepare(qphy->iface_clk); off_core: @@ -413,7 +363,7 @@ static int phy_8x16_remove(struct platform_device *pdev) clk_disable_unprepare(qphy->iface_clk); clk_disable_unprepare(qphy->core_clk); - phy_8x16_regulators_disable(qphy); + regulator_bulk_disable(ARRAY_SIZE(qphy->regulator), qphy->regulator); return 0; } diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index b4de70ee16d3..000f9750149f 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -190,7 +190,8 @@ static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type) goto __usbhs_pkt_handler_end; } - ret = func(pkt, &is_done); + if (likely(func)) + ret = func(pkt, &is_done); if (is_done) __usbhsf_pkt_del(pkt); @@ -889,6 +890,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) pkt->trans = len; + usbhsf_tx_irq_ctrl(pipe, 0); INIT_WORK(&pkt->work, xfer_work); schedule_work(&pkt->work); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 664b263e4b20..53d104b56ef1 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -158,10 +158,14 @@ static void usbhsg_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt) struct usbhs_pipe *pipe = pkt->pipe; struct usbhsg_uep *uep = usbhsg_pipe_to_uep(pipe); struct usbhsg_request *ureq = usbhsg_pkt_to_ureq(pkt); + unsigned long flags; ureq->req.actual = pkt->actual; - usbhsg_queue_pop(uep, ureq, 0); + usbhs_lock(priv, flags); + if (uep) + __usbhsg_queue_pop(uep, ureq, 0); + usbhs_unlock(priv, flags); } static void usbhsg_queue_push(struct usbhsg_uep *uep, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fbfe761c7fba..dd47823bb014 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -165,6 +165,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ + { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index b283eb8b86d6..bbeeb2bd55a8 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -447,6 +447,11 @@ static int cypress_generic_port_probe(struct usb_serial_port *port) struct usb_serial *serial = port->serial; struct cypress_private *priv; + if (!port->interrupt_out_urb || !port->interrupt_in_urb) { + dev_err(&port->dev, "required endpoint is missing\n"); + return -ENODEV; + } + priv = kzalloc(sizeof(struct cypress_private), GFP_KERNEL); if (!priv) return -ENOMEM; @@ -606,12 +611,6 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port) cypress_set_termios(tty, port, &priv->tmp_termios); /* setup the port and start reading from the device */ - if (!port->interrupt_in_urb) { - dev_err(&port->dev, "%s - interrupt_in_urb is empty!\n", - __func__); - return -1; - } - usb_fill_int_urb(port->interrupt_in_urb, serial->dev, usb_rcvintpipe(serial->dev, port->interrupt_in_endpointAddress), port->interrupt_in_urb->transfer_buffer, diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index 010a42a92688..16e8e37b3b36 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c @@ -1251,8 +1251,27 @@ static int digi_port_init(struct usb_serial_port *port, unsigned port_num) static int digi_startup(struct usb_serial *serial) { + struct device *dev = &serial->interface->dev; struct digi_serial *serial_priv; int ret; + int i; + + /* check whether the device has the expected number of endpoints */ + if (serial->num_port_pointers < serial->type->num_ports + 1) { + dev_err(dev, "OOB endpoints missing\n"); + return -ENODEV; + } + + for (i = 0; i < serial->type->num_ports + 1 ; i++) { + if (!serial->port[i]->read_urb) { + dev_err(dev, "bulk-in endpoint missing\n"); + return -ENODEV; + } + if (!serial->port[i]->write_urb) { + dev_err(dev, "bulk-out endpoint missing\n"); + return -ENODEV; + } + } serial_priv = kzalloc(sizeof(*serial_priv), GFP_KERNEL); if (!serial_priv) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 427ae43ee898..3a814e802dee 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -1004,6 +1004,10 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, + /* ICP DAS I-756xU devices */ + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, { } /* Terminating entry */ }; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index a84df2513994..c5d6c1e73e8e 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -872,6 +872,14 @@ #define NOVITUS_BONO_E_PID 0x6010 /* + * ICPDAS I-756*U devices + */ +#define ICPDAS_VID 0x1b5c +#define ICPDAS_I7560U_PID 0x0103 +#define ICPDAS_I7561U_PID 0x0104 +#define ICPDAS_I7563U_PID 0x0105 + +/* * RT Systems programming cables for various ham radios */ #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c index 4446b8d70ac2..885655315de1 100644 --- a/drivers/usb/serial/mct_u232.c +++ b/drivers/usb/serial/mct_u232.c @@ -376,14 +376,21 @@ static void mct_u232_msr_to_state(struct usb_serial_port *port, static int mct_u232_port_probe(struct usb_serial_port *port) { + struct usb_serial *serial = port->serial; struct mct_u232_private *priv; + /* check first to simplify error handling */ + if (!serial->port[1] || !serial->port[1]->interrupt_in_urb) { + dev_err(&port->dev, "expected endpoint missing\n"); + return -ENODEV; + } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* Use second interrupt-in endpoint for reading. */ - priv->read_urb = port->serial->port[1]->interrupt_in_urb; + priv->read_urb = serial->port[1]->interrupt_in_urb; priv->read_urb->context = port; spin_lock_init(&priv->lock); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 348e19834b83..c6f497f16526 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1818,6 +1818,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index dba51362d2e2..90901861bfc0 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -123,7 +123,7 @@ static int slave_configure(struct scsi_device *sdev) unsigned int max_sectors = 64; if (us->fflags & US_FL_MAX_SECTORS_MIN) - max_sectors = PAGE_CACHE_SIZE >> 9; + max_sectors = PAGE_SIZE >> 9; if (queue_max_hw_sectors(sdev->request_queue) > max_sectors) blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index facaaf003f19..e40da7759a0e 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb) if (!(size > 0)) return 0; + if (size > urb->transfer_buffer_length) { + /* should not happen, probably malicious packet */ + if (ud->side == USBIP_STUB) { + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP); + return 0; + } else { + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP); + return -EPIPE; + } + } + ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size); if (ret != size) { dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret); diff --git a/drivers/video/fbdev/pvr2fb.c b/drivers/video/fbdev/pvr2fb.c index 71a923e53f93..3b1ca4411073 100644 --- a/drivers/video/fbdev/pvr2fb.c +++ b/drivers/video/fbdev/pvr2fb.c @@ -735,7 +735,7 @@ out: out_unmap: for (i = 0; i < nr_pages; i++) - page_cache_release(pages[i]); + put_page(pages[i]); kfree(pages); diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index f6f28cc7eb45..e76bd91a29da 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -17,6 +17,7 @@ * */ +#include <linux/delay.h> #define VIRTIO_PCI_NO_LEGACY #include "virtio_pci_common.h" @@ -271,9 +272,13 @@ static void vp_reset(struct virtio_device *vdev) struct virtio_pci_device *vp_dev = to_vp_device(vdev); /* 0 status means a reset. */ vp_iowrite8(0, &vp_dev->common->device_status); - /* Flush out the status write, and flush in device writes, - * including MSI-X interrupts, if any. */ - vp_ioread8(&vp_dev->common->device_status); + /* After writing 0 to device_status, the driver MUST wait for a read of + * device_status to return 0 before reinitializing the device. + * This will flush out the status write, and flush in device writes, + * including MSI-X interrupts, if any. + */ + while (vp_ioread8(&vp_dev->common->device_status)) + msleep(1); /* Flush pending VQ/configuration callbacks. */ vp_synchronize_vectors(vdev); } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 488017a0806a..cb7138c97c69 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -484,9 +484,19 @@ static void eoi_pirq(struct irq_data *data) struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) }; int rc = 0; - irq_move_irq(data); + if (!VALID_EVTCHN(evtchn)) + return; - if (VALID_EVTCHN(evtchn)) + if (unlikely(irqd_is_setaffinity_pending(data))) { + int masked = test_and_set_mask(evtchn); + + clear_evtchn(evtchn); + + irq_move_masked_irq(data); + + if (!masked) + unmask_evtchn(evtchn); + } else clear_evtchn(evtchn); if (pirq_needs_eoi(data->irq)) { @@ -1357,9 +1367,19 @@ static void ack_dynirq(struct irq_data *data) { int evtchn = evtchn_from_irq(data->irq); - irq_move_irq(data); + if (!VALID_EVTCHN(evtchn)) + return; - if (VALID_EVTCHN(evtchn)) + if (unlikely(irqd_is_setaffinity_pending(data))) { + int masked = test_and_set_mask(evtchn); + + clear_evtchn(evtchn); + + irq_move_masked_irq(data); + + if (!masked) + unmask_evtchn(evtchn); + } else clear_evtchn(evtchn); } diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index e9e04376c52c..ac9225e86bf3 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -153,7 +153,7 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset, * If called with zero offset, we should release * the private state assocated with the page */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0 && length == PAGE_SIZE) v9fs_fscache_invalidate_page(page); } @@ -166,10 +166,10 @@ static int v9fs_vfs_writepage_locked(struct page *page) struct bio_vec bvec; int err, len; - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; bvec.bv_page = page; bvec.bv_offset = 0; @@ -271,7 +271,7 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping, int retval = 0; struct page *page; struct v9fs_inode *v9inode; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct inode *inode = mapping->host; @@ -288,11 +288,11 @@ start: if (PageUptodate(page)) goto out; - if (len == PAGE_CACHE_SIZE) + if (len == PAGE_SIZE) goto out; retval = v9fs_fid_readpage(v9inode->writeback_fid, page); - page_cache_release(page); + put_page(page); if (!retval) goto start; out: @@ -313,7 +313,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, /* * zero out the rest of the area */ - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); zero_user(page, from + copied, len - copied); flush_dcache_page(page); @@ -331,7 +331,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, } set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index eadc894faea2..b84c291ba1eb 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -421,8 +421,8 @@ v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); loff_t i_size; unsigned long pg_start, pg_end; - pg_start = origin >> PAGE_CACHE_SHIFT; - pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; + pg_start = origin >> PAGE_SHIFT; + pg_end = (origin + retval - 1) >> PAGE_SHIFT; if (inode->i_mapping && inode->i_mapping->nrpages) invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c index bf495cedec26..de3ed8629196 100644 --- a/fs/9p/vfs_super.c +++ b/fs/9p/vfs_super.c @@ -87,7 +87,7 @@ v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, sb->s_op = &v9fs_super_ops; sb->s_bdi = &v9ses->bdi; if (v9ses->cache) - sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; + sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE; sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; if (!v9ses->cache) diff --git a/fs/affs/file.c b/fs/affs/file.c index 22fc7c802d69..0cde550050e8 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -510,9 +510,9 @@ affs_do_readpage_ofs(struct page *page, unsigned to) pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino, page->index, to); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(to > PAGE_SIZE); bsize = AFFS_SB(sb)->s_data_blksize; - tmp = page->index << PAGE_CACHE_SHIFT; + tmp = page->index << PAGE_SHIFT; bidx = tmp / bsize; boff = tmp % bsize; @@ -613,10 +613,10 @@ affs_readpage_ofs(struct file *file, struct page *page) int err; pr_debug("%s(%lu, %ld)\n", __func__, inode->i_ino, page->index); - to = PAGE_CACHE_SIZE; - if (((page->index + 1) << PAGE_CACHE_SHIFT) > inode->i_size) { - to = inode->i_size & ~PAGE_CACHE_MASK; - memset(page_address(page) + to, 0, PAGE_CACHE_SIZE - to); + to = PAGE_SIZE; + if (((page->index + 1) << PAGE_SHIFT) > inode->i_size) { + to = inode->i_size & ~PAGE_MASK; + memset(page_address(page) + to, 0, PAGE_SIZE - to); } err = affs_do_readpage_ofs(page, to); @@ -646,7 +646,7 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping return err; } - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; @@ -656,10 +656,10 @@ static int affs_write_begin_ofs(struct file *file, struct address_space *mapping return 0; /* XXX: inefficient but safe in the face of short writes */ - err = affs_do_readpage_ofs(page, PAGE_CACHE_SIZE); + err = affs_do_readpage_ofs(page, PAGE_SIZE); if (err) { unlock_page(page); - page_cache_release(page); + put_page(page); } return err; } @@ -677,7 +677,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, u32 tmp; int written; - from = pos & (PAGE_CACHE_SIZE - 1); + from = pos & (PAGE_SIZE - 1); to = pos + len; /* * XXX: not sure if this can handle short copies (len < copied), but @@ -692,7 +692,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, bh = NULL; written = 0; - tmp = (page->index << PAGE_CACHE_SHIFT) + from; + tmp = (page->index << PAGE_SHIFT) + from; bidx = tmp / bsize; boff = tmp % bsize; if (boff) { @@ -788,13 +788,13 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, done: affs_brelse(bh); - tmp = (page->index << PAGE_CACHE_SHIFT) + from; + tmp = (page->index << PAGE_SHIFT) + from; if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; err_first_bh: unlock_page(page); - page_cache_release(page); + put_page(page); return written; diff --git a/fs/afs/dir.c b/fs/afs/dir.c index e10e17788f06..5fda2bc53cd7 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -181,7 +181,7 @@ error: static inline void afs_dir_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* diff --git a/fs/afs/file.c b/fs/afs/file.c index 999bc3caec92..6344aee4ac4b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -164,7 +164,7 @@ int afs_page_filler(void *data, struct page *page) _debug("cache said ENOBUFS"); default: go_on: - offset = page->index << PAGE_CACHE_SHIFT; + offset = page->index << PAGE_SHIFT; len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE); /* read the contents of the file from the server into the @@ -319,7 +319,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset, BUG_ON(!PageLocked(page)); /* we clean up only if the entire page is being invalidated */ - if (offset == 0 && length == PAGE_CACHE_SIZE) { + if (offset == 0 && length == PAGE_SIZE) { #ifdef CONFIG_AFS_FSCACHE if (PageFsCache(page)) { struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index ccd0b212e82a..81dd075356b9 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c @@ -93,7 +93,7 @@ int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) kunmap(page); out_free: - page_cache_release(page); + put_page(page); out: _leave(" = %d", ret); return ret; @@ -189,7 +189,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) buf = kmap_atomic(page); memcpy(devname, buf, size); kunmap_atomic(buf); - page_cache_release(page); + put_page(page); page = NULL; } @@ -211,7 +211,7 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt) return mnt; error: - page_cache_release(page); + put_page(page); error_no_page: free_page((unsigned long) options); error_no_options: diff --git a/fs/afs/super.c b/fs/afs/super.c index 81afefe7d8a6..fbdb022b75a2 100644 --- a/fs/afs/super.c +++ b/fs/afs/super.c @@ -315,8 +315,8 @@ static int afs_fill_super(struct super_block *sb, _enter(""); /* fill in the superblock */ - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = AFS_FS_MAGIC; sb->s_op = &afs_super_ops; sb->s_bdi = &as->volume->bdi; diff --git a/fs/afs/write.c b/fs/afs/write.c index dfef94f70667..65de439bdc4f 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -93,10 +93,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, _enter(",,%llu", (unsigned long long)pos); i_size = i_size_read(&vnode->vfs_inode); - if (pos + PAGE_CACHE_SIZE > i_size) + if (pos + PAGE_SIZE > i_size) len = i_size - pos; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; ret = afs_vnode_fetch_data(vnode, key, pos, len, page); if (ret < 0) { @@ -123,9 +123,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping, struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); struct page *page; struct key *key = file->private_data; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int ret; _enter("{%x:%u},{%lx},%u,%u", @@ -151,8 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping, *pagep = page; /* page won't leak in error case: it eventually gets cleaned off LRU */ - if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) { - ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page); + if (!PageUptodate(page) && len != PAGE_SIZE) { + ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); if (ret < 0) { kfree(candidate); _leave(" = %d [prep]", ret); @@ -266,7 +266,7 @@ int afs_write_end(struct file *file, struct address_space *mapping, if (PageDirty(page)) _debug("dirtied"); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -480,7 +480,7 @@ static int afs_writepages_region(struct address_space *mapping, if (page->index > end) { *_next = index; - page_cache_release(page); + put_page(page); _leave(" = 0 [%lx]", *_next); return 0; } @@ -494,7 +494,7 @@ static int afs_writepages_region(struct address_space *mapping, if (page->mapping != mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); continue; } @@ -515,7 +515,7 @@ static int afs_writepages_region(struct address_space *mapping, ret = afs_write_back_from_locked_page(wb, page); unlock_page(page); - page_cache_release(page); + put_page(page); if (ret < 0) { _leave(" = %d", ret); return ret; @@ -551,13 +551,13 @@ int afs_writepages(struct address_space *mapping, &next); mapping->writeback_index = next; } else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) { - end = (pgoff_t)(LLONG_MAX >> PAGE_CACHE_SHIFT); + end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT); ret = afs_writepages_region(mapping, wbc, 0, end, &next); if (wbc->nr_to_write > 0) mapping->writeback_index = next; } else { - start = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; ret = afs_writepages_region(mapping, wbc, start, end, &next); } diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 7d914c67a9d0..81381cc0dd17 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -2292,7 +2292,7 @@ static int elf_core_dump(struct coredump_params *cprm) void *kaddr = kmap(page); stop = !dump_emit(cprm, kaddr, PAGE_SIZE); kunmap(page); - page_cache_release(page); + put_page(page); } else stop = !dump_skip(cprm, PAGE_SIZE); if (stop) diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index b1adb92e69de..083ea2bc60ab 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1533,7 +1533,7 @@ static bool elf_fdpic_dump_segments(struct coredump_params *cprm) void *kaddr = kmap(page); res = dump_emit(cprm, kaddr, PAGE_SIZE); kunmap(page); - page_cache_release(page); + put_page(page); } else { res = dump_skip(cprm, PAGE_SIZE); } diff --git a/fs/block_dev.c b/fs/block_dev.c index 3172c4e2f502..20a2c02b77c4 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -331,7 +331,7 @@ static int blkdev_write_end(struct file *file, struct address_space *mapping, ret = block_write_end(file, mapping, pos, len, copied, page, fsdata); unlock_page(page); - page_cache_release(page); + put_page(page); return ret; } @@ -1149,7 +1149,7 @@ void bd_set_size(struct block_device *bdev, loff_t size) inode_lock(bdev->bd_inode); i_size_write(bdev->bd_inode, size); inode_unlock(bdev->bd_inode); - while (bsize < PAGE_CACHE_SIZE) { + while (bsize < PAGE_SIZE) { if (size & bsize) break; bsize <<= 1; diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index e34a71b3e225..516e19d1d202 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -757,7 +757,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state, BUG_ON(NULL == l); ret = btrfsic_read_block(state, &tmp_next_block_ctx); - if (ret < (int)PAGE_CACHE_SIZE) { + if (ret < (int)PAGE_SIZE) { printk(KERN_INFO "btrfsic: read @logical %llu failed!\n", tmp_next_block_ctx.start); @@ -1231,15 +1231,15 @@ static void btrfsic_read_from_block_data( size_t offset_in_page; char *kaddr; char *dst = (char *)dstv; - size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT; + size_t start_offset = block_ctx->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + offset) >> PAGE_SHIFT; WARN_ON(offset + len > block_ctx->len); - offset_in_page = (start_offset + offset) & (PAGE_CACHE_SIZE - 1); + offset_in_page = (start_offset + offset) & (PAGE_SIZE - 1); while (len > 0) { - cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page)); - BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_CACHE_SIZE)); + cur = min(len, ((size_t)PAGE_SIZE - offset_in_page)); + BUG_ON(i >= DIV_ROUND_UP(block_ctx->len, PAGE_SIZE)); kaddr = block_ctx->datav[i]; memcpy(dst, kaddr + offset_in_page, cur); @@ -1605,8 +1605,8 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) BUG_ON(!block_ctx->datav); BUG_ON(!block_ctx->pagev); - num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> + PAGE_SHIFT; while (num_pages > 0) { num_pages--; if (block_ctx->datav[num_pages]) { @@ -1637,15 +1637,15 @@ static int btrfsic_read_block(struct btrfsic_state *state, BUG_ON(block_ctx->datav); BUG_ON(block_ctx->pagev); BUG_ON(block_ctx->mem_to_free); - if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) { + if (block_ctx->dev_bytenr & ((u64)PAGE_SIZE - 1)) { printk(KERN_INFO "btrfsic: read_block() with unaligned bytenr %llu\n", block_ctx->dev_bytenr); return -1; } - num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + num_pages = (block_ctx->len + (u64)PAGE_SIZE - 1) >> + PAGE_SHIFT; block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) + sizeof(*block_ctx->pagev)) * num_pages, GFP_NOFS); @@ -1676,8 +1676,8 @@ static int btrfsic_read_block(struct btrfsic_state *state, for (j = i; j < num_pages; j++) { ret = bio_add_page(bio, block_ctx->pagev[j], - PAGE_CACHE_SIZE, 0); - if (PAGE_CACHE_SIZE != ret) + PAGE_SIZE, 0); + if (PAGE_SIZE != ret) break; } if (j == i) { @@ -1693,7 +1693,7 @@ static int btrfsic_read_block(struct btrfsic_state *state, return -1; } bio_put(bio); - dev_bytenr += (j - i) * PAGE_CACHE_SIZE; + dev_bytenr += (j - i) * PAGE_SIZE; i = j; } for (i = 0; i < num_pages; i++) { @@ -1769,9 +1769,9 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state, u32 crc = ~(u32)0; unsigned int i; - if (num_pages * PAGE_CACHE_SIZE < state->metablock_size) + if (num_pages * PAGE_SIZE < state->metablock_size) return 1; /* not metadata */ - num_pages = state->metablock_size >> PAGE_CACHE_SHIFT; + num_pages = state->metablock_size >> PAGE_SHIFT; h = (struct btrfs_header *)datav[0]; if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) @@ -1779,8 +1779,8 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state, for (i = 0; i < num_pages; i++) { u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE); - size_t sublen = i ? PAGE_CACHE_SIZE : - (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE); + size_t sublen = i ? PAGE_SIZE : + (PAGE_SIZE - BTRFS_CSUM_SIZE); crc = btrfs_crc32c(crc, data, sublen); } @@ -1826,14 +1826,14 @@ again: if (block->is_superblock) { bytenr = btrfs_super_bytenr((struct btrfs_super_block *) mapped_datav[0]); - if (num_pages * PAGE_CACHE_SIZE < + if (num_pages * PAGE_SIZE < BTRFS_SUPER_INFO_SIZE) { printk(KERN_INFO "btrfsic: cannot work with too short bios!\n"); return; } is_metadata = 1; - BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1)); + BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_SIZE - 1)); processed_len = BTRFS_SUPER_INFO_SIZE; if (state->print_mask & BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { @@ -1844,7 +1844,7 @@ again: } if (is_metadata) { if (!block->is_superblock) { - if (num_pages * PAGE_CACHE_SIZE < + if (num_pages * PAGE_SIZE < state->metablock_size) { printk(KERN_INFO "btrfsic: cannot work with too short bios!\n"); @@ -1880,7 +1880,7 @@ again: } block->logical_bytenr = bytenr; } else { - if (num_pages * PAGE_CACHE_SIZE < + if (num_pages * PAGE_SIZE < state->datablock_size) { printk(KERN_INFO "btrfsic: cannot work with too short bios!\n"); @@ -2013,7 +2013,7 @@ again: block->logical_bytenr = bytenr; block->is_metadata = 1; if (block->is_superblock) { - BUG_ON(PAGE_CACHE_SIZE != + BUG_ON(PAGE_SIZE != BTRFS_SUPER_INFO_SIZE); ret = btrfsic_process_written_superblock( state, @@ -2172,8 +2172,8 @@ again: continue_loop: BUG_ON(!processed_len); dev_bytenr += processed_len; - mapped_datav += processed_len >> PAGE_CACHE_SHIFT; - num_pages -= processed_len >> PAGE_CACHE_SHIFT; + mapped_datav += processed_len >> PAGE_SHIFT; + num_pages -= processed_len >> PAGE_SHIFT; goto again; } @@ -2954,7 +2954,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio) goto leave; cur_bytenr = dev_bytenr; for (i = 0; i < bio->bi_vcnt; i++) { - BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE); + BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE); mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); if (!mapped_datav[i]) { while (i > 0) { @@ -3037,16 +3037,16 @@ int btrfsic_mount(struct btrfs_root *root, struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; - if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) { + if (root->nodesize & ((u64)PAGE_SIZE - 1)) { printk(KERN_INFO - "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", - root->nodesize, PAGE_CACHE_SIZE); + "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_SIZE %ld!\n", + root->nodesize, PAGE_SIZE); return -1; } - if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) { + if (root->sectorsize & ((u64)PAGE_SIZE - 1)) { printk(KERN_INFO - "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n", - root->sectorsize, PAGE_CACHE_SIZE); + "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_SIZE %ld!\n", + root->sectorsize, PAGE_SIZE); return -1; } state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 3346cd8f9910..ff61a41ac90b 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -119,7 +119,7 @@ static int check_compressed_csum(struct inode *inode, csum = ~(u32)0; kaddr = kmap_atomic(page); - csum = btrfs_csum_data(kaddr, csum, PAGE_CACHE_SIZE); + csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); btrfs_csum_final(csum, (char *)&csum); kunmap_atomic(kaddr); @@ -190,7 +190,7 @@ csum_failed: for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; - page_cache_release(page); + put_page(page); } /* do io completion on the original bio */ @@ -224,8 +224,8 @@ out: static noinline void end_compressed_writeback(struct inode *inode, const struct compressed_bio *cb) { - unsigned long index = cb->start >> PAGE_CACHE_SHIFT; - unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT; + unsigned long index = cb->start >> PAGE_SHIFT; + unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT; struct page *pages[16]; unsigned long nr_pages = end_index - index + 1; int i; @@ -247,7 +247,7 @@ static noinline void end_compressed_writeback(struct inode *inode, if (cb->errors) SetPageError(pages[i]); end_page_writeback(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -304,7 +304,7 @@ static void end_compressed_bio_write(struct bio *bio) for (index = 0; index < cb->nr_pages; index++) { page = cb->compressed_pages[index]; page->mapping = NULL; - page_cache_release(page); + put_page(page); } /* finally free the cb struct */ @@ -341,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, int ret; int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - WARN_ON(start & ((u64)PAGE_CACHE_SIZE - 1)); + WARN_ON(start & ((u64)PAGE_SIZE - 1)); cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); if (!cb) return -ENOMEM; @@ -374,14 +374,14 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, page->mapping = inode->i_mapping; if (bio->bi_iter.bi_size) ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, - PAGE_CACHE_SIZE, + PAGE_SIZE, bio, 0); else ret = 0; page->mapping = NULL; - if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < - PAGE_CACHE_SIZE) { + if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) < + PAGE_SIZE) { bio_get(bio); /* @@ -410,15 +410,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, BUG_ON(!bio); bio->bi_private = cb; bio->bi_end_io = end_compressed_bio_write; - bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + bio_add_page(bio, page, PAGE_SIZE, 0); } - if (bytes_left < PAGE_CACHE_SIZE) { + if (bytes_left < PAGE_SIZE) { btrfs_info(BTRFS_I(inode)->root->fs_info, "bytes left %lu compress len %lu nr %lu", bytes_left, cb->compressed_len, cb->nr_pages); } - bytes_left -= PAGE_CACHE_SIZE; - first_byte += PAGE_CACHE_SIZE; + bytes_left -= PAGE_SIZE; + first_byte += PAGE_SIZE; cond_resched(); } bio_get(bio); @@ -457,17 +457,17 @@ static noinline int add_ra_bio_pages(struct inode *inode, int misses = 0; page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; - last_offset = (page_offset(page) + PAGE_CACHE_SIZE); + last_offset = (page_offset(page) + PAGE_SIZE); em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; if (isize == 0) return 0; - end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; while (last_offset < compressed_end) { - pg_index = last_offset >> PAGE_CACHE_SHIFT; + pg_index = last_offset >> PAGE_SHIFT; if (pg_index > end_index) break; @@ -488,11 +488,11 @@ static noinline int add_ra_bio_pages(struct inode *inode, break; if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { - page_cache_release(page); + put_page(page); goto next; } - end = last_offset + PAGE_CACHE_SIZE - 1; + end = last_offset + PAGE_SIZE - 1; /* * at this point, we have a locked page in the page cache * for these bytes in the file. But, we have to make @@ -502,27 +502,27 @@ static noinline int add_ra_bio_pages(struct inode *inode, lock_extent(tree, last_offset, end); read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, last_offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); read_unlock(&em_tree->lock); if (!em || last_offset < em->start || - (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || + (last_offset + PAGE_SIZE > extent_map_end(em)) || (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, last_offset, end); unlock_page(page); - page_cache_release(page); + put_page(page); break; } free_extent_map(em); if (page->index == end_index) { char *userpage; - size_t zero_offset = isize & (PAGE_CACHE_SIZE - 1); + size_t zero_offset = isize & (PAGE_SIZE - 1); if (zero_offset) { int zeros; - zeros = PAGE_CACHE_SIZE - zero_offset; + zeros = PAGE_SIZE - zero_offset; userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, zeros); flush_dcache_page(page); @@ -531,19 +531,19 @@ static noinline int add_ra_bio_pages(struct inode *inode, } ret = bio_add_page(cb->orig_bio, page, - PAGE_CACHE_SIZE, 0); + PAGE_SIZE, 0); - if (ret == PAGE_CACHE_SIZE) { + if (ret == PAGE_SIZE) { nr_pages++; - page_cache_release(page); + put_page(page); } else { unlock_extent(tree, last_offset, end); unlock_page(page); - page_cache_release(page); + put_page(page); break; } next: - last_offset += PAGE_CACHE_SIZE; + last_offset += PAGE_SIZE; } return 0; } @@ -567,7 +567,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, struct extent_map_tree *em_tree; struct compressed_bio *cb; struct btrfs_root *root = BTRFS_I(inode)->root; - unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE; unsigned long compressed_len; unsigned long nr_pages; unsigned long pg_index; @@ -589,7 +589,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, read_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, page_offset(bio->bi_io_vec->bv_page), - PAGE_CACHE_SIZE); + PAGE_SIZE); read_unlock(&em_tree->lock); if (!em) return -EIO; @@ -617,7 +617,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; - nr_pages = DIV_ROUND_UP(compressed_len, PAGE_CACHE_SIZE); + nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE); cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); if (!cb->compressed_pages) @@ -640,7 +640,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, add_ra_bio_pages(inode, em_start + em_len, cb); /* include any pages we added in add_ra-bio_pages */ - uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; + uncompressed_len = bio->bi_vcnt * PAGE_SIZE; cb->len = uncompressed_len; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); @@ -653,18 +653,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, for (pg_index = 0; pg_index < nr_pages; pg_index++) { page = cb->compressed_pages[pg_index]; page->mapping = inode->i_mapping; - page->index = em_start >> PAGE_CACHE_SHIFT; + page->index = em_start >> PAGE_SHIFT; if (comp_bio->bi_iter.bi_size) ret = tree->ops->merge_bio_hook(READ, page, 0, - PAGE_CACHE_SIZE, + PAGE_SIZE, comp_bio, 0); else ret = 0; page->mapping = NULL; - if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < - PAGE_CACHE_SIZE) { + if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) < + PAGE_SIZE) { bio_get(comp_bio); ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, @@ -702,9 +702,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, comp_bio->bi_private = cb; comp_bio->bi_end_io = end_compressed_bio_read; - bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0); + bio_add_page(comp_bio, page, PAGE_SIZE, 0); } - cur_disk_byte += PAGE_CACHE_SIZE; + cur_disk_byte += PAGE_SIZE; } bio_get(comp_bio); @@ -1013,8 +1013,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, /* copy bytes from the working buffer into the pages */ while (working_bytes > 0) { - bytes = min(PAGE_CACHE_SIZE - *pg_offset, - PAGE_CACHE_SIZE - buf_offset); + bytes = min(PAGE_SIZE - *pg_offset, + PAGE_SIZE - buf_offset); bytes = min(bytes, working_bytes); kaddr = kmap_atomic(page_out); memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); @@ -1027,7 +1027,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, current_buf_start += bytes; /* check if we need to pick another page */ - if (*pg_offset == PAGE_CACHE_SIZE) { + if (*pg_offset == PAGE_SIZE) { (*pg_index)++; if (*pg_index >= vcnt) return 0; diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 77592931ab4f..ec7928a27aaa 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -19,6 +19,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/rbtree.h> +#include <linux/vmalloc.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -5361,10 +5362,13 @@ int btrfs_compare_trees(struct btrfs_root *left_root, goto out; } - tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL); + tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL | __GFP_NOWARN); if (!tmp_buf) { - ret = -ENOMEM; - goto out; + tmp_buf = vmalloc(left_root->nodesize); + if (!tmp_buf) { + ret = -ENOMEM; + goto out; + } } left_path->search_commit_root = 1; @@ -5565,7 +5569,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root, out: btrfs_free_path(left_path); btrfs_free_path(right_path); - kfree(tmp_buf); + kvfree(tmp_buf); return ret; } diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index a1d6652e0c47..26bcb487f958 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -394,6 +394,8 @@ int btrfs_dev_replace_start(struct btrfs_root *root, dev_replace->cursor_right = 0; dev_replace->is_valid = 1; dev_replace->item_needs_writeback = 1; + atomic64_set(&dev_replace->num_write_errors, 0); + atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; btrfs_dev_replace_unlock(dev_replace, 1); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d01f89d130e0..4e47849d7427 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1062,7 +1062,7 @@ static void btree_invalidatepage(struct page *page, unsigned int offset, (unsigned long long)page_offset(page)); ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } } @@ -1764,7 +1764,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) if (err) return err; - bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; + bdi->ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_SIZE; bdi->congested_fn = btrfs_congested_fn; bdi->congested_data = info; bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; @@ -2542,7 +2542,7 @@ int open_ctree(struct super_block *sb, err = ret; goto fail_bdi; } - fs_info->dirty_metadata_batch = PAGE_CACHE_SIZE * + fs_info->dirty_metadata_batch = PAGE_SIZE * (1 + ilog2(nr_cpu_ids)); ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); @@ -2787,7 +2787,7 @@ int open_ctree(struct super_block *sb, * flag our filesystem as having big metadata blocks if * they are bigger than the page size */ - if (btrfs_super_nodesize(disk_super) > PAGE_CACHE_SIZE) { + if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; @@ -2837,7 +2837,7 @@ int open_ctree(struct super_block *sb, fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, - SZ_4M / PAGE_CACHE_SIZE); + SZ_4M / PAGE_SIZE); tree_root->nodesize = nodesize; tree_root->sectorsize = sectorsize; @@ -4076,9 +4076,9 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, ret = -EINVAL; } /* Only PAGE SIZE is supported yet */ - if (sectorsize != PAGE_CACHE_SIZE) { + if (sectorsize != PAGE_SIZE) { printk(KERN_ERR "BTRFS: sectorsize %llu not supported yet, only support %lu\n", - sectorsize, PAGE_CACHE_SIZE); + sectorsize, PAGE_SIZE); ret = -EINVAL; } if (!is_power_of_2(nodesize) || nodesize < sectorsize || diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 53e12977bfd0..84e060eb0de8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3452,7 +3452,7 @@ again: num_pages = 1; num_pages *= 16; - num_pages *= PAGE_CACHE_SIZE; + num_pages *= PAGE_SIZE; ret = btrfs_check_data_free_space(inode, 0, num_pages); if (ret) @@ -4639,7 +4639,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, loops = 0; while (delalloc_bytes && loops < 3) { max_reclaim = min(delalloc_bytes, to_reclaim); - nr_pages = max_reclaim >> PAGE_CACHE_SHIFT; + nr_pages = max_reclaim >> PAGE_SHIFT; btrfs_writeback_inodes_sb_nr(root, nr_pages, items); /* * We need to wait for the async pages to actually start before @@ -9386,15 +9386,23 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) u64 dev_min = 1; u64 dev_nr = 0; u64 target; + int debug; int index; int full = 0; int ret = 0; + debug = btrfs_test_opt(root, ENOSPC_DEBUG); + block_group = btrfs_lookup_block_group(root->fs_info, bytenr); /* odd, couldn't find the block group, leave it alone */ - if (!block_group) + if (!block_group) { + if (debug) + btrfs_warn(root->fs_info, + "can't find block group for bytenr %llu", + bytenr); return -1; + } min_free = btrfs_block_group_used(&block_group->item); @@ -9448,8 +9456,13 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) * this is just a balance, so if we were marked as full * we know there is no space for a new chunk */ - if (full) + if (full) { + if (debug) + btrfs_warn(root->fs_info, + "no space to alloc new chunk for block group %llu", + block_group->key.objectid); goto out; + } index = get_block_group_index(block_group); } @@ -9496,6 +9509,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ret = -1; } } + if (debug && ret == -1) + btrfs_warn(root->fs_info, + "no space to allocate a new chunk for block group %llu", + block_group->key.objectid); mutex_unlock(&root->fs_info->chunk_mutex); btrfs_end_transaction(trans, root); out: diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 76a0c8597d98..d247fc0eea19 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1363,23 +1363,23 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end) void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end) { - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; struct page *page; while (index <= end_index) { page = find_get_page(inode->i_mapping, index); BUG_ON(!page); /* Pages should be in the extent_io_tree */ clear_page_dirty_for_io(page); - page_cache_release(page); + put_page(page); index++; } } void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) { - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; struct page *page; while (index <= end_index) { @@ -1387,7 +1387,7 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) BUG_ON(!page); /* Pages should be in the extent_io_tree */ __set_page_dirty_nobuffers(page); account_page_redirty(page); - page_cache_release(page); + put_page(page); index++; } } @@ -1397,15 +1397,15 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) */ static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) { - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; struct page *page; while (index <= end_index) { page = find_get_page(tree->mapping, index); BUG_ON(!page); /* Pages should be in the extent_io_tree */ set_page_writeback(page); - page_cache_release(page); + put_page(page); index++; } } @@ -1556,8 +1556,8 @@ static noinline void __unlock_for_delalloc(struct inode *inode, { int ret; struct page *pages[16]; - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; @@ -1571,7 +1571,7 @@ static noinline void __unlock_for_delalloc(struct inode *inode, for (i = 0; i < ret; i++) { if (pages[i] != locked_page) unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -1584,9 +1584,9 @@ static noinline int lock_delalloc_pages(struct inode *inode, u64 delalloc_start, u64 delalloc_end) { - unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT; + unsigned long index = delalloc_start >> PAGE_SHIFT; unsigned long start_index = index; - unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT; + unsigned long end_index = delalloc_end >> PAGE_SHIFT; unsigned long pages_locked = 0; struct page *pages[16]; unsigned long nrpages; @@ -1619,11 +1619,11 @@ static noinline int lock_delalloc_pages(struct inode *inode, pages[i]->mapping != inode->i_mapping) { ret = -EAGAIN; unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); goto done; } } - page_cache_release(pages[i]); + put_page(pages[i]); pages_locked++; } nrpages -= ret; @@ -1636,7 +1636,7 @@ done: __unlock_for_delalloc(inode, locked_page, delalloc_start, ((u64)(start_index + pages_locked - 1)) << - PAGE_CACHE_SHIFT); + PAGE_SHIFT); } return ret; } @@ -1696,7 +1696,7 @@ again: free_extent_state(cached_state); cached_state = NULL; if (!loops) { - max_bytes = PAGE_CACHE_SIZE; + max_bytes = PAGE_SIZE; loops = 1; goto again; } else { @@ -1735,8 +1735,8 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; int ret; struct page *pages[16]; - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; @@ -1757,7 +1757,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, SetPagePrivate2(pages[i]); if (pages[i] == locked_page) { - page_cache_release(pages[i]); + put_page(pages[i]); continue; } if (page_ops & PAGE_CLEAR_DIRTY) @@ -1770,7 +1770,7 @@ void extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, end_page_writeback(pages[i]); if (page_ops & PAGE_UNLOCK) unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -1961,7 +1961,7 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) { u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL)) SetPageUptodate(page); } @@ -2071,11 +2071,11 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb, struct page *p = eb->pages[i]; ret = repair_io_failure(root->fs_info->btree_inode, start, - PAGE_CACHE_SIZE, start, p, + PAGE_SIZE, start, p, start - page_offset(p), mirror_num); if (ret) break; - start += PAGE_CACHE_SIZE; + start += PAGE_SIZE; } return ret; @@ -2466,8 +2466,8 @@ static void end_bio_extent_writepage(struct bio *bio) * advance bv_offset and adjust bv_len to compensate. * Print a warning for nonzero offsets, and an error * if they don't add up to a full page. */ - if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { - if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) + if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { + if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, "partial page write in btrfs with offset %u and length %u", bvec->bv_offset, bvec->bv_len); @@ -2541,8 +2541,8 @@ static void end_bio_extent_readpage(struct bio *bio) * advance bv_offset and adjust bv_len to compensate. * Print a warning for nonzero offsets, and an error * if they don't add up to a full page. */ - if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE) { - if (bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE) + if (bvec->bv_offset || bvec->bv_len != PAGE_SIZE) { + if (bvec->bv_offset + bvec->bv_len != PAGE_SIZE) btrfs_err(BTRFS_I(page->mapping->host)->root->fs_info, "partial page read in btrfs with offset %u and length %u", bvec->bv_offset, bvec->bv_len); @@ -2598,13 +2598,13 @@ static void end_bio_extent_readpage(struct bio *bio) readpage_ok: if (likely(uptodate)) { loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned off; /* Zero out the end if this page straddles i_size */ - off = i_size & (PAGE_CACHE_SIZE-1); + off = i_size & (PAGE_SIZE-1); if (page->index == end_index && off) - zero_user_segment(page, off, PAGE_CACHE_SIZE); + zero_user_segment(page, off, PAGE_SIZE); SetPageUptodate(page); } else { ClearPageUptodate(page); @@ -2768,7 +2768,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, struct bio *bio; int contig = 0; int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; - size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); + size_t page_size = min_t(size_t, size, PAGE_SIZE); if (bio_ret && *bio_ret) { bio = *bio_ret; @@ -2821,7 +2821,7 @@ static void attach_extent_buffer_page(struct extent_buffer *eb, { if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long)eb); } else { WARN_ON(page->private != (unsigned long)eb); @@ -2832,7 +2832,7 @@ void set_page_extent_mapped(struct page *page) { if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, EXTENT_PAGE_PRIVATE); } } @@ -2880,7 +2880,7 @@ static int __do_readpage(struct extent_io_tree *tree, { struct inode *inode = page->mapping->host; u64 start = page_offset(page); - u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 page_end = start + PAGE_SIZE - 1; u64 end; u64 cur = start; u64 extent_offset; @@ -2909,12 +2909,12 @@ static int __do_readpage(struct extent_io_tree *tree, } } - if (page->index == last_byte >> PAGE_CACHE_SHIFT) { + if (page->index == last_byte >> PAGE_SHIFT) { char *userpage; - size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1); + size_t zero_offset = last_byte & (PAGE_SIZE - 1); if (zero_offset) { - iosize = PAGE_CACHE_SIZE - zero_offset; + iosize = PAGE_SIZE - zero_offset; userpage = kmap_atomic(page); memset(userpage + zero_offset, 0, iosize); flush_dcache_page(page); @@ -2922,14 +2922,14 @@ static int __do_readpage(struct extent_io_tree *tree, } } while (cur <= end) { - unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; + unsigned long pnr = (last_byte >> PAGE_SHIFT) + 1; bool force_bio_submit = false; if (cur >= last_byte) { char *userpage; struct extent_state *cached = NULL; - iosize = PAGE_CACHE_SIZE - pg_offset; + iosize = PAGE_SIZE - pg_offset; userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, iosize); flush_dcache_page(page); @@ -3112,7 +3112,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, for (index = 0; index < nr_pages; index++) { __do_readpage(tree, pages[index], get_extent, em_cached, bio, mirror_num, bio_flags, rw, prev_em_start); - page_cache_release(pages[index]); + put_page(pages[index]); } } @@ -3134,10 +3134,10 @@ static void __extent_readpages(struct extent_io_tree *tree, page_start = page_offset(pages[index]); if (!end) { start = page_start; - end = start + PAGE_CACHE_SIZE - 1; + end = start + PAGE_SIZE - 1; first_index = index; } else if (end + 1 == page_start) { - end += PAGE_CACHE_SIZE; + end += PAGE_SIZE; } else { __do_contiguous_readpages(tree, &pages[first_index], index - first_index, start, @@ -3145,7 +3145,7 @@ static void __extent_readpages(struct extent_io_tree *tree, bio, mirror_num, bio_flags, rw, prev_em_start); start = page_start; - end = start + PAGE_CACHE_SIZE - 1; + end = start + PAGE_SIZE - 1; first_index = index; } } @@ -3167,13 +3167,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree, struct inode *inode = page->mapping->host; struct btrfs_ordered_extent *ordered; u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; int ret; while (1) { lock_extent(tree, start, end); ordered = btrfs_lookup_ordered_range(inode, start, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (!ordered) break; unlock_extent(tree, start, end); @@ -3227,7 +3227,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, unsigned long *nr_written) { struct extent_io_tree *tree = epd->tree; - u64 page_end = delalloc_start + PAGE_CACHE_SIZE - 1; + u64 page_end = delalloc_start + PAGE_SIZE - 1; u64 nr_delalloc; u64 delalloc_to_write = 0; u64 delalloc_end = 0; @@ -3264,13 +3264,11 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, goto done; } /* - * delalloc_end is already one less than the total - * length, so we don't subtract one from - * PAGE_CACHE_SIZE + * delalloc_end is already one less than the total length, so + * we don't subtract one from PAGE_SIZE */ delalloc_to_write += (delalloc_end - delalloc_start + - PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + PAGE_SIZE) >> PAGE_SHIFT; delalloc_start = delalloc_end + 1; } if (wbc->nr_to_write < delalloc_to_write) { @@ -3319,7 +3317,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, { struct extent_io_tree *tree = epd->tree; u64 start = page_offset(page); - u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 page_end = start + PAGE_SIZE - 1; u64 end; u64 cur = start; u64 extent_offset; @@ -3434,7 +3432,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, if (ret) { SetPageError(page); } else { - unsigned long max_nr = (i_size >> PAGE_CACHE_SHIFT) + 1; + unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1; set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { @@ -3477,12 +3475,12 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, struct inode *inode = page->mapping->host; struct extent_page_data *epd = data; u64 start = page_offset(page); - u64 page_end = start + PAGE_CACHE_SIZE - 1; + u64 page_end = start + PAGE_SIZE - 1; int ret; int nr = 0; size_t pg_offset = 0; loff_t i_size = i_size_read(inode); - unsigned long end_index = i_size >> PAGE_CACHE_SHIFT; + unsigned long end_index = i_size >> PAGE_SHIFT; int write_flags; unsigned long nr_written = 0; @@ -3497,10 +3495,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, ClearPageError(page); - pg_offset = i_size & (PAGE_CACHE_SIZE - 1); + pg_offset = i_size & (PAGE_SIZE - 1); if (page->index > end_index || (page->index == end_index && !pg_offset)) { - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return 0; } @@ -3510,7 +3508,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, userpage = kmap_atomic(page); memset(userpage + pg_offset, 0, - PAGE_CACHE_SIZE - pg_offset); + PAGE_SIZE - pg_offset); kunmap_atomic(userpage); flush_dcache_page(page); } @@ -3748,7 +3746,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, clear_page_dirty_for_io(p); set_page_writeback(p); ret = submit_extent_page(rw, tree, wbc, p, offset >> 9, - PAGE_CACHE_SIZE, 0, bdev, &epd->bio, + PAGE_SIZE, 0, bdev, &epd->bio, -1, end_bio_extent_buffer_writepage, 0, epd->bio_flags, bio_flags, false); epd->bio_flags = bio_flags; @@ -3760,7 +3758,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, ret = -EIO; break; } - offset += PAGE_CACHE_SIZE; + offset += PAGE_SIZE; update_nr_written(p, wbc, 1); unlock_page(p); } @@ -3804,8 +3802,8 @@ int btree_write_cache_pages(struct address_space *mapping, index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) @@ -3948,8 +3946,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) @@ -4083,8 +4081,8 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, int ret = 0; struct address_space *mapping = inode->i_mapping; struct page *page; - unsigned long nr_pages = (end - start + PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + unsigned long nr_pages = (end - start + PAGE_SIZE) >> + PAGE_SHIFT; struct extent_page_data epd = { .bio = NULL, @@ -4102,18 +4100,18 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode, }; while (start <= end) { - page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + page = find_get_page(mapping, start >> PAGE_SHIFT); if (clear_page_dirty_for_io(page)) ret = __extent_writepage(page, &wbc_writepages, &epd); else { if (tree->ops && tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, start, - start + PAGE_CACHE_SIZE - 1, + start + PAGE_SIZE - 1, NULL, 1); unlock_page(page); } - page_cache_release(page); - start += PAGE_CACHE_SIZE; + put_page(page); + start += PAGE_SIZE; } flush_epd_write_bio(&epd); @@ -4163,7 +4161,7 @@ int extent_readpages(struct extent_io_tree *tree, list_del(&page->lru); if (add_to_page_cache_lru(page, mapping, page->index, GFP_NOFS)) { - page_cache_release(page); + put_page(page); continue; } @@ -4197,7 +4195,7 @@ int extent_invalidatepage(struct extent_io_tree *tree, { struct extent_state *cached_state = NULL; u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; size_t blocksize = page->mapping->host->i_sb->s_blocksize; start += ALIGN(offset, blocksize); @@ -4223,7 +4221,7 @@ static int try_release_extent_state(struct extent_map_tree *map, struct page *page, gfp_t mask) { u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; int ret = 1; if (test_range_bit(tree, start, end, @@ -4262,7 +4260,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, { struct extent_map *em; u64 start = page_offset(page); - u64 end = start + PAGE_CACHE_SIZE - 1; + u64 end = start + PAGE_SIZE - 1; if (gfpflags_allow_blocking(mask) && page->mapping->host->i_size > SZ_16M) { @@ -4587,14 +4585,14 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) ClearPagePrivate(page); set_page_private(page, 0); /* One for the page private */ - page_cache_release(page); + put_page(page); } if (mapped) spin_unlock(&page->mapping->private_lock); /* One for when we alloced the page */ - page_cache_release(page); + put_page(page); } while (index != 0); } @@ -4779,7 +4777,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, rcu_read_lock(); eb = radix_tree_lookup(&fs_info->buffer_radix, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); if (eb && atomic_inc_not_zero(&eb->refs)) { rcu_read_unlock(); /* @@ -4829,7 +4827,7 @@ again: goto free_eb; spin_lock(&fs_info->buffer_lock); ret = radix_tree_insert(&fs_info->buffer_radix, - start >> PAGE_CACHE_SHIFT, eb); + start >> PAGE_SHIFT, eb); spin_unlock(&fs_info->buffer_lock); radix_tree_preload_end(); if (ret == -EEXIST) { @@ -4862,7 +4860,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, unsigned long len = fs_info->tree_root->nodesize; unsigned long num_pages = num_extent_pages(start, len); unsigned long i; - unsigned long index = start >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; struct extent_buffer *eb; struct extent_buffer *exists = NULL; struct page *p; @@ -4896,7 +4894,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, if (atomic_inc_not_zero(&exists->refs)) { spin_unlock(&mapping->private_lock); unlock_page(p); - page_cache_release(p); + put_page(p); mark_extent_buffer_accessed(exists, p); goto free_eb; } @@ -4908,7 +4906,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, */ ClearPagePrivate(p); WARN_ON(PageDirty(p)); - page_cache_release(p); + put_page(p); } attach_extent_buffer_page(eb, p); spin_unlock(&mapping->private_lock); @@ -4931,7 +4929,7 @@ again: spin_lock(&fs_info->buffer_lock); ret = radix_tree_insert(&fs_info->buffer_radix, - start >> PAGE_CACHE_SHIFT, eb); + start >> PAGE_SHIFT, eb); spin_unlock(&fs_info->buffer_lock); radix_tree_preload_end(); if (ret == -EEXIST) { @@ -4994,7 +4992,7 @@ static int release_extent_buffer(struct extent_buffer *eb) spin_lock(&fs_info->buffer_lock); radix_tree_delete(&fs_info->buffer_radix, - eb->start >> PAGE_CACHE_SHIFT); + eb->start >> PAGE_SHIFT); spin_unlock(&fs_info->buffer_lock); } else { spin_unlock(&eb->refs_lock); @@ -5168,8 +5166,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (start) { WARN_ON(start < eb->start); - start_i = (start >> PAGE_CACHE_SHIFT) - - (eb->start >> PAGE_CACHE_SHIFT); + start_i = (start >> PAGE_SHIFT) - + (eb->start >> PAGE_SHIFT); } else { start_i = 0; } @@ -5252,18 +5250,18 @@ void read_extent_buffer(struct extent_buffer *eb, void *dstv, struct page *page; char *kaddr; char *dst = (char *)dstv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; - cur = min(len, (PAGE_CACHE_SIZE - offset)); + cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); memcpy(dst, kaddr + offset, cur); @@ -5283,19 +5281,19 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv, struct page *page; char *kaddr; char __user *dst = (char __user *)dstv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; - cur = min(len, (PAGE_CACHE_SIZE - offset)); + cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); if (copy_to_user(dst, kaddr + offset, cur)) { ret = -EFAULT; @@ -5316,13 +5314,13 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, unsigned long *map_start, unsigned long *map_len) { - size_t offset = start & (PAGE_CACHE_SIZE - 1); + size_t offset = start & (PAGE_SIZE - 1); char *kaddr; struct page *p; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; unsigned long end_i = (start_offset + start + min_len - 1) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if (i != end_i) return -EINVAL; @@ -5332,7 +5330,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, *map_start = 0; } else { offset = 0; - *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset; + *map_start = ((u64)i << PAGE_SHIFT) - start_offset; } if (start + min_len > eb->len) { @@ -5345,7 +5343,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start, p = eb->pages[i]; kaddr = page_address(p); *map = kaddr + offset; - *map_len = PAGE_CACHE_SIZE - offset; + *map_len = PAGE_SIZE - offset; return 0; } @@ -5358,19 +5356,19 @@ int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv, struct page *page; char *kaddr; char *ptr = (char *)ptrv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; int ret = 0; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; - cur = min(len, (PAGE_CACHE_SIZE - offset)); + cur = min(len, (PAGE_SIZE - offset)); kaddr = page_address(page); ret = memcmp(ptr, kaddr + offset, cur); @@ -5393,19 +5391,19 @@ void write_extent_buffer(struct extent_buffer *eb, const void *srcv, struct page *page; char *kaddr; char *src = (char *)srcv; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; WARN_ON(!PageUptodate(page)); - cur = min(len, PAGE_CACHE_SIZE - offset); + cur = min(len, PAGE_SIZE - offset); kaddr = page_address(page); memcpy(kaddr + offset, src, cur); @@ -5423,19 +5421,19 @@ void memset_extent_buffer(struct extent_buffer *eb, char c, size_t offset; struct page *page; char *kaddr; - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT; + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + start) >> PAGE_SHIFT; WARN_ON(start > eb->len); WARN_ON(start + len > eb->start + eb->len); - offset = (start_offset + start) & (PAGE_CACHE_SIZE - 1); + offset = (start_offset + start) & (PAGE_SIZE - 1); while (len > 0) { page = eb->pages[i]; WARN_ON(!PageUptodate(page)); - cur = min(len, PAGE_CACHE_SIZE - offset); + cur = min(len, PAGE_SIZE - offset); kaddr = page_address(page); memset(kaddr + offset, c, cur); @@ -5454,19 +5452,19 @@ void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src, size_t offset; struct page *page; char *kaddr; - size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); - unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; + size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); + unsigned long i = (start_offset + dst_offset) >> PAGE_SHIFT; WARN_ON(src->len != dst_len); offset = (start_offset + dst_offset) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); while (len > 0) { page = dst->pages[i]; WARN_ON(!PageUptodate(page)); - cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset)); + cur = min(len, (unsigned long)(PAGE_SIZE - offset)); kaddr = page_address(page); read_extent_buffer(src, kaddr + offset, src_offset, cur); @@ -5508,7 +5506,7 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, unsigned long *page_index, size_t *page_offset) { - size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1); + size_t start_offset = eb->start & ((u64)PAGE_SIZE - 1); size_t byte_offset = BIT_BYTE(nr); size_t offset; @@ -5519,8 +5517,8 @@ static inline void eb_bitmap_offset(struct extent_buffer *eb, */ offset = start_offset + start + byte_offset; - *page_index = offset >> PAGE_CACHE_SHIFT; - *page_offset = offset & (PAGE_CACHE_SIZE - 1); + *page_index = offset >> PAGE_SHIFT; + *page_offset = offset & (PAGE_SIZE - 1); } /** @@ -5572,7 +5570,7 @@ void extent_buffer_bitmap_set(struct extent_buffer *eb, unsigned long start, len -= bits_to_set; bits_to_set = BITS_PER_BYTE; mask_to_set = ~0U; - if (++offset >= PAGE_CACHE_SIZE && len > 0) { + if (++offset >= PAGE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); @@ -5614,7 +5612,7 @@ void extent_buffer_bitmap_clear(struct extent_buffer *eb, unsigned long start, len -= bits_to_clear; bits_to_clear = BITS_PER_BYTE; mask_to_clear = ~0U; - if (++offset >= PAGE_CACHE_SIZE && len > 0) { + if (++offset >= PAGE_SIZE && len > 0) { offset = 0; page = eb->pages[++i]; WARN_ON(!PageUptodate(page)); @@ -5661,7 +5659,7 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, size_t cur; size_t dst_off_in_page; size_t src_off_in_page; - size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); unsigned long dst_i; unsigned long src_i; @@ -5680,17 +5678,17 @@ void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, while (len > 0) { dst_off_in_page = (start_offset + dst_offset) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); src_off_in_page = (start_offset + src_offset) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); - dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT; - src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT; + dst_i = (start_offset + dst_offset) >> PAGE_SHIFT; + src_i = (start_offset + src_offset) >> PAGE_SHIFT; - cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - + cur = min(len, (unsigned long)(PAGE_SIZE - src_off_in_page)); cur = min_t(unsigned long, cur, - (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page)); + (unsigned long)(PAGE_SIZE - dst_off_in_page)); copy_pages(dst->pages[dst_i], dst->pages[src_i], dst_off_in_page, src_off_in_page, cur); @@ -5709,7 +5707,7 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, size_t src_off_in_page; unsigned long dst_end = dst_offset + len - 1; unsigned long src_end = src_offset + len - 1; - size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1); + size_t start_offset = dst->start & ((u64)PAGE_SIZE - 1); unsigned long dst_i; unsigned long src_i; @@ -5728,13 +5726,13 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, return; } while (len > 0) { - dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT; - src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT; + dst_i = (start_offset + dst_end) >> PAGE_SHIFT; + src_i = (start_offset + src_end) >> PAGE_SHIFT; dst_off_in_page = (start_offset + dst_end) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); src_off_in_page = (start_offset + src_end) & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); cur = min_t(unsigned long, len, src_off_in_page + 1); cur = min(cur, dst_off_in_page + 1); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 5dbf92e68fbd..b5e0ade90e88 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -120,7 +120,7 @@ struct extent_state { }; #define INLINE_EXTENT_BUFFER_PAGES 16 -#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_CACHE_SIZE) +#define MAX_INLINE_EXTENT_BUFFER_SIZE (INLINE_EXTENT_BUFFER_PAGES * PAGE_SIZE) struct extent_buffer { u64 start; unsigned long len; @@ -365,8 +365,8 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb); static inline unsigned long num_extent_pages(u64 start, u64 len) { - return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - - (start >> PAGE_CACHE_SHIFT); + return ((start + len + PAGE_SIZE - 1) >> PAGE_SHIFT) - + (start >> PAGE_SHIFT); } static inline void extent_buffer_get(struct extent_buffer *eb) diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index b5baf5bdc8e1..7a7d6e253cfc 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -32,7 +32,7 @@ size) - 1)) #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \ - PAGE_CACHE_SIZE)) + PAGE_SIZE)) #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ sizeof(struct btrfs_ordered_sum)) / \ @@ -203,7 +203,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, csum = (u8 *)dst; } - if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8) + if (bio->bi_iter.bi_size > PAGE_SIZE * 8) path->reada = READA_FORWARD; WARN_ON(bio->bi_vcnt <= 0); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 15a09cb156ce..8d7b5a45c005 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -414,11 +414,11 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, size_t copied = 0; size_t total_copied = 0; int pg = 0; - int offset = pos & (PAGE_CACHE_SIZE - 1); + int offset = pos & (PAGE_SIZE - 1); while (write_bytes > 0) { size_t count = min_t(size_t, - PAGE_CACHE_SIZE - offset, write_bytes); + PAGE_SIZE - offset, write_bytes); struct page *page = prepared_pages[pg]; /* * Copy data from userspace to the current page @@ -448,7 +448,7 @@ static noinline int btrfs_copy_from_user(loff_t pos, size_t write_bytes, if (unlikely(copied == 0)) break; - if (copied < PAGE_CACHE_SIZE - offset) { + if (copied < PAGE_SIZE - offset) { offset += copied; } else { pg++; @@ -473,7 +473,7 @@ static void btrfs_drop_pages(struct page **pages, size_t num_pages) */ ClearPageChecked(pages[i]); unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } } @@ -1297,7 +1297,7 @@ static int prepare_uptodate_page(struct inode *inode, { int ret = 0; - if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && + if (((pos & (PAGE_SIZE - 1)) || force_uptodate) && !PageUptodate(page)) { ret = btrfs_readpage(NULL, page); if (ret) @@ -1323,7 +1323,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, size_t write_bytes, bool force_uptodate) { int i; - unsigned long index = pos >> PAGE_CACHE_SHIFT; + unsigned long index = pos >> PAGE_SHIFT; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); int err = 0; int faili; @@ -1345,7 +1345,7 @@ again: err = prepare_uptodate_page(inode, pages[i], pos + write_bytes, false); if (err) { - page_cache_release(pages[i]); + put_page(pages[i]); if (err == -EAGAIN) { err = 0; goto again; @@ -1360,7 +1360,7 @@ again: fail: while (faili >= 0) { unlock_page(pages[faili]); - page_cache_release(pages[faili]); + put_page(pages[faili]); faili--; } return err; @@ -1408,7 +1408,7 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); @@ -1497,8 +1497,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, bool force_page_uptodate = false; bool need_unlock; - nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_CACHE_SIZE), - PAGE_CACHE_SIZE / (sizeof(struct page *))); + nrptrs = min(DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE), + PAGE_SIZE / (sizeof(struct page *))); nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); nrptrs = max(nrptrs, 8); pages = kmalloc_array(nrptrs, sizeof(struct page *), GFP_KERNEL); @@ -1506,13 +1506,13 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, return -ENOMEM; while (iov_iter_count(i) > 0) { - size_t offset = pos & (PAGE_CACHE_SIZE - 1); + size_t offset = pos & (PAGE_SIZE - 1); size_t sector_offset; size_t write_bytes = min(iov_iter_count(i), - nrptrs * (size_t)PAGE_CACHE_SIZE - + nrptrs * (size_t)PAGE_SIZE - offset); size_t num_pages = DIV_ROUND_UP(write_bytes + offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); size_t reserve_bytes; size_t dirty_pages; size_t copied; @@ -1547,7 +1547,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, * write_bytes, so scale down. */ num_pages = DIV_ROUND_UP(write_bytes + offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); reserve_bytes = round_up(write_bytes + sector_offset, root->sectorsize); goto reserve_metadata; @@ -1609,7 +1609,7 @@ again: } else { force_page_uptodate = false; dirty_pages = DIV_ROUND_UP(copied + offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); } /* @@ -1641,7 +1641,7 @@ again: u64 __pos; __pos = round_down(pos, root->sectorsize) + - (dirty_pages << PAGE_CACHE_SHIFT); + (dirty_pages << PAGE_SHIFT); btrfs_delalloc_release_space(inode, __pos, release_bytes); } @@ -1682,7 +1682,7 @@ again: cond_resched(); balance_dirty_pages_ratelimited(inode->i_mapping); - if (dirty_pages < (root->nodesize >> PAGE_CACHE_SHIFT) + 1) + if (dirty_pages < (root->nodesize >> PAGE_SHIFT) + 1) btrfs_btree_balance_dirty(root); pos += copied; @@ -1738,8 +1738,8 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb, goto out; written += written_buffered; iocb->ki_pos = pos + written_buffered; - invalidate_mapping_pages(file->f_mapping, pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + invalidate_mapping_pages(file->f_mapping, pos >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); out: return written ? written : err; } @@ -1905,7 +1905,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end) */ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_trans_handle *trans; @@ -2682,9 +2682,12 @@ static long btrfs_fallocate(struct file *file, int mode, return ret; inode_lock(inode); - ret = inode_newsize_ok(inode, alloc_end); - if (ret) - goto out; + + if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) { + ret = inode_newsize_ok(inode, offset + len); + if (ret) + goto out; + } /* * TODO: Move these two operations after we have checked diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 8f835bfa1bdd..5e6062c26129 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -29,7 +29,7 @@ #include "inode-map.h" #include "volumes.h" -#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define BITS_PER_BITMAP (PAGE_SIZE * 8) #define MAX_CACHE_BYTES_PER_GIG SZ_32K struct btrfs_trim_range { @@ -295,7 +295,7 @@ static int readahead_cache(struct inode *inode) return -ENOMEM; file_ra_state_init(ra, inode->i_mapping); - last_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; + last_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; page_cache_sync_readahead(inode->i_mapping, ra, NULL, 0, last_index); @@ -310,14 +310,14 @@ static int io_ctl_init(struct btrfs_io_ctl *io_ctl, struct inode *inode, int num_pages; int check_crcs = 0; - num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); + num_pages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) check_crcs = 1; /* Make sure we can fit our crcs into the first page */ if (write && check_crcs && - (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) + (num_pages * sizeof(u32)) >= PAGE_SIZE) return -ENOSPC; memset(io_ctl, 0, sizeof(struct btrfs_io_ctl)); @@ -354,9 +354,9 @@ static void io_ctl_map_page(struct btrfs_io_ctl *io_ctl, int clear) io_ctl->page = io_ctl->pages[io_ctl->index++]; io_ctl->cur = page_address(io_ctl->page); io_ctl->orig = io_ctl->cur; - io_ctl->size = PAGE_CACHE_SIZE; + io_ctl->size = PAGE_SIZE; if (clear) - memset(io_ctl->cur, 0, PAGE_CACHE_SIZE); + memset(io_ctl->cur, 0, PAGE_SIZE); } static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) @@ -369,7 +369,7 @@ static void io_ctl_drop_pages(struct btrfs_io_ctl *io_ctl) if (io_ctl->pages[i]) { ClearPageChecked(io_ctl->pages[i]); unlock_page(io_ctl->pages[i]); - page_cache_release(io_ctl->pages[i]); + put_page(io_ctl->pages[i]); } } } @@ -475,7 +475,7 @@ static void io_ctl_set_crc(struct btrfs_io_ctl *io_ctl, int index) offset = sizeof(u32) * io_ctl->num_pages; crc = btrfs_csum_data(io_ctl->orig + offset, crc, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); btrfs_csum_final(crc, (char *)&crc); io_ctl_unmap_page(io_ctl); tmp = page_address(io_ctl->pages[0]); @@ -503,7 +503,7 @@ static int io_ctl_check_crc(struct btrfs_io_ctl *io_ctl, int index) io_ctl_map_page(io_ctl, 0); crc = btrfs_csum_data(io_ctl->orig + offset, crc, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); btrfs_csum_final(crc, (char *)&crc); if (val != crc) { btrfs_err_rl(io_ctl->root->fs_info, @@ -561,7 +561,7 @@ static int io_ctl_add_bitmap(struct btrfs_io_ctl *io_ctl, void *bitmap) io_ctl_map_page(io_ctl, 0); } - memcpy(io_ctl->cur, bitmap, PAGE_CACHE_SIZE); + memcpy(io_ctl->cur, bitmap, PAGE_SIZE); io_ctl_set_crc(io_ctl, io_ctl->index - 1); if (io_ctl->index < io_ctl->num_pages) io_ctl_map_page(io_ctl, 0); @@ -621,7 +621,7 @@ static int io_ctl_read_bitmap(struct btrfs_io_ctl *io_ctl, if (ret) return ret; - memcpy(entry->bitmap, io_ctl->cur, PAGE_CACHE_SIZE); + memcpy(entry->bitmap, io_ctl->cur, PAGE_SIZE); io_ctl_unmap_page(io_ctl); return 0; @@ -775,7 +775,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, } else { ASSERT(num_bitmaps); num_bitmaps--; - e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); if (!e->bitmap) { kmem_cache_free( btrfs_free_space_cachep, e); @@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as * we add more bitmaps. */ - bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; + bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE; if (bitmap_bytes >= max_bytes) { ctl->extents_thresh = 0; @@ -2111,7 +2111,7 @@ new_bitmap: } /* allocate the bitmap */ - info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS); spin_lock(&ctl->tree_lock); if (!info->bitmap) { ret = -ENOMEM; @@ -3580,7 +3580,7 @@ again: } if (!map) { - map = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); + map = kzalloc(PAGE_SIZE, GFP_NOFS); if (!map) { kmem_cache_free(btrfs_free_space_cachep, info); return -ENOMEM; diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 1f0ec19b23f6..70107f7c9307 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -283,7 +283,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root) } #define INIT_THRESHOLD ((SZ_32K / 2) / sizeof(struct btrfs_free_space)) -#define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define INODES_PER_BITMAP (PAGE_SIZE * 8) /* * The goal is to keep the memory used by the free_ino tree won't @@ -317,7 +317,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) } ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * - PAGE_CACHE_SIZE / sizeof(*info); + PAGE_SIZE / sizeof(*info); } /* @@ -481,12 +481,12 @@ again: spin_lock(&ctl->tree_lock); prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; - prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); - prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; + prealloc = ALIGN(prealloc, PAGE_SIZE); + prealloc += ctl->total_bitmaps * PAGE_SIZE; spin_unlock(&ctl->tree_lock); /* Just to make sure we have enough space */ - prealloc += 8 * PAGE_CACHE_SIZE; + prealloc += 8 * PAGE_SIZE; ret = btrfs_delalloc_reserve_space(inode, 0, prealloc); if (ret) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 41a5688ffdfe..2aaba58b4856 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -194,7 +194,7 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, while (compressed_size > 0) { cpage = compressed_pages[i]; cur_size = min_t(unsigned long, compressed_size, - PAGE_CACHE_SIZE); + PAGE_SIZE); kaddr = kmap_atomic(cpage); write_extent_buffer(leaf, kaddr, ptr, cur_size); @@ -208,13 +208,13 @@ static int insert_inline_extent(struct btrfs_trans_handle *trans, compress_type); } else { page = find_get_page(inode->i_mapping, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); btrfs_set_file_extent_compression(leaf, ei, 0); kaddr = kmap_atomic(page); - offset = start & (PAGE_CACHE_SIZE - 1); + offset = start & (PAGE_SIZE - 1); write_extent_buffer(leaf, kaddr + offset, ptr, size); kunmap_atomic(kaddr); - page_cache_release(page); + put_page(page); } btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -322,7 +322,7 @@ out: * And at reserve time, it's always aligned to page size, so * just free one page here. */ - btrfs_qgroup_free_data(inode, 0, PAGE_CACHE_SIZE); + btrfs_qgroup_free_data(inode, 0, PAGE_SIZE); btrfs_free_path(path); btrfs_end_transaction(trans, root); return ret; @@ -435,8 +435,8 @@ static noinline void compress_file_range(struct inode *inode, actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; - nr_pages = (end >> PAGE_CACHE_SHIFT) - (start >> PAGE_CACHE_SHIFT) + 1; - nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_CACHE_SIZE); + nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; + nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE); /* * we don't want to send crud past the end of i_size through @@ -514,7 +514,7 @@ again: if (!ret) { unsigned long offset = total_compressed & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); struct page *page = pages[nr_pages_ret - 1]; char *kaddr; @@ -524,7 +524,7 @@ again: if (offset) { kaddr = kmap_atomic(page); memset(kaddr + offset, 0, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); kunmap_atomic(kaddr); } will_compress = 1; @@ -580,7 +580,7 @@ cont: * one last check to make sure the compression is really a * win, compare the page count read with the blocks on disk */ - total_in = ALIGN(total_in, PAGE_CACHE_SIZE); + total_in = ALIGN(total_in, PAGE_SIZE); if (total_compressed >= total_in) { will_compress = 0; } else { @@ -594,7 +594,7 @@ cont: */ for (i = 0; i < nr_pages_ret; i++) { WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); + put_page(pages[i]); } kfree(pages); pages = NULL; @@ -650,7 +650,7 @@ cleanup_and_bail_uncompressed: free_pages_out: for (i = 0; i < nr_pages_ret; i++) { WARN_ON(pages[i]->mapping); - page_cache_release(pages[i]); + put_page(pages[i]); } kfree(pages); } @@ -664,7 +664,7 @@ static void free_async_extent_pages(struct async_extent *async_extent) for (i = 0; i < async_extent->nr_pages; i++) { WARN_ON(async_extent->pages[i]->mapping); - page_cache_release(async_extent->pages[i]); + put_page(async_extent->pages[i]); } kfree(async_extent->pages); async_extent->nr_pages = 0; @@ -966,7 +966,7 @@ static noinline int cow_file_range(struct inode *inode, PAGE_END_WRITEBACK); *nr_written = *nr_written + - (end - start + PAGE_CACHE_SIZE) / PAGE_CACHE_SIZE; + (end - start + PAGE_SIZE) / PAGE_SIZE; *page_started = 1; goto out; } else if (ret < 0) { @@ -1106,8 +1106,8 @@ static noinline void async_cow_submit(struct btrfs_work *work) async_cow = container_of(work, struct async_cow, work); root = async_cow->root; - nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + nr_pages = (async_cow->end - async_cow->start + PAGE_SIZE) >> + PAGE_SHIFT; /* * atomic_sub_return implies a barrier for waitqueue_active @@ -1164,8 +1164,8 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, async_cow_start, async_cow_submit, async_cow_free); - nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> - PAGE_CACHE_SHIFT; + nr_pages = (cur_end - start + PAGE_SIZE) >> + PAGE_SHIFT; atomic_add(nr_pages, &root->fs_info->async_delalloc_pages); btrfs_queue_work(root->fs_info->delalloc_workers, @@ -1960,7 +1960,7 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans, int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, struct extent_state **cached_state) { - WARN_ON((end & (PAGE_CACHE_SIZE - 1)) == 0); + WARN_ON((end & (PAGE_SIZE - 1)) == 0); return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end, cached_state, GFP_NOFS); } @@ -1993,7 +1993,7 @@ again: inode = page->mapping->host; page_start = page_offset(page); - page_end = page_offset(page) + PAGE_CACHE_SIZE - 1; + page_end = page_offset(page) + PAGE_SIZE - 1; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state); @@ -2003,7 +2003,7 @@ again: goto out; ordered = btrfs_lookup_ordered_range(inode, page_start, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ordered) { unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end, &cached_state, GFP_NOFS); @@ -2014,7 +2014,7 @@ again: } ret = btrfs_delalloc_reserve_space(inode, page_start, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ret) { mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); @@ -2030,7 +2030,7 @@ out: &cached_state, GFP_NOFS); out_page: unlock_page(page); - page_cache_release(page); + put_page(page); kfree(fixup); } @@ -2063,7 +2063,7 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) return -EAGAIN; SetPageChecked(page); - page_cache_get(page); + get_page(page); btrfs_init_work(&fixup->work, btrfs_fixup_helper, btrfs_writepage_fixup_worker, NULL, NULL); fixup->page = page; @@ -4247,7 +4247,7 @@ static int truncate_inline_extent(struct inode *inode, if (btrfs_file_extent_compression(leaf, fi) != BTRFS_COMPRESS_NONE) { loff_t offset = new_size; - loff_t page_end = ALIGN(offset, PAGE_CACHE_SIZE); + loff_t page_end = ALIGN(offset, PAGE_SIZE); /* * Zero out the remaining of the last page of our inline extent, @@ -4633,7 +4633,7 @@ int btrfs_truncate_block(struct inode *inode, loff_t from, loff_t len, struct extent_state *cached_state = NULL; char *kaddr; u32 blocksize = root->sectorsize; - pgoff_t index = from >> PAGE_CACHE_SHIFT; + pgoff_t index = from >> PAGE_SHIFT; unsigned offset = from & (blocksize - 1); struct page *page; gfp_t mask = btrfs_alloc_write_mask(mapping); @@ -4668,7 +4668,7 @@ again: lock_page(page); if (page->mapping != mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } if (!PageUptodate(page)) { @@ -4686,7 +4686,7 @@ again: unlock_extent_cached(io_tree, block_start, block_end, &cached_state, GFP_NOFS); unlock_page(page); - page_cache_release(page); + put_page(page); btrfs_start_ordered_extent(inode, ordered, 1); btrfs_put_ordered_extent(ordered); goto again; @@ -4728,7 +4728,7 @@ out_unlock: btrfs_delalloc_release_space(inode, block_start, blocksize); unlock_page(page); - page_cache_release(page); + put_page(page); out: return ret; } @@ -6717,7 +6717,7 @@ static noinline int uncompress_inline(struct btrfs_path *path, read_extent_buffer(leaf, tmp, ptr, inline_size); - max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size); + max_size = min_t(unsigned long, PAGE_SIZE, max_size); ret = btrfs_decompress(compress_type, tmp, page, extent_offset, inline_size, max_size); kfree(tmp); @@ -6879,8 +6879,8 @@ next: size = btrfs_file_extent_inline_len(leaf, path->slots[0], item); extent_offset = page_offset(page) + pg_offset - extent_start; - copy_size = min_t(u64, PAGE_CACHE_SIZE - pg_offset, - size - extent_offset); + copy_size = min_t(u64, PAGE_SIZE - pg_offset, + size - extent_offset); em->start = extent_start + extent_offset; em->len = ALIGN(copy_size, root->sectorsize); em->orig_block_len = em->len; @@ -6899,9 +6899,9 @@ next: map = kmap(page); read_extent_buffer(leaf, map + pg_offset, ptr, copy_size); - if (pg_offset + copy_size < PAGE_CACHE_SIZE) { + if (pg_offset + copy_size < PAGE_SIZE) { memset(map + pg_offset + copy_size, 0, - PAGE_CACHE_SIZE - pg_offset - + PAGE_SIZE - pg_offset - copy_size); } kunmap(page); @@ -7336,12 +7336,12 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) int start_idx; int end_idx; - start_idx = start >> PAGE_CACHE_SHIFT; + start_idx = start >> PAGE_SHIFT; /* * end is the last byte in the last page. end == start is legal */ - end_idx = end >> PAGE_CACHE_SHIFT; + end_idx = end >> PAGE_SHIFT; rcu_read_lock(); @@ -7382,7 +7382,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) * include/linux/pagemap.h for details. */ if (unlikely(page != *pagep)) { - page_cache_release(page); + put_page(page); page = NULL; } } @@ -7390,7 +7390,7 @@ bool btrfs_page_exists_in_range(struct inode *inode, loff_t start, loff_t end) if (page) { if (page->index <= end_idx) found = true; - page_cache_release(page); + put_page(page); } rcu_read_unlock(); @@ -8719,7 +8719,7 @@ static int __btrfs_releasepage(struct page *page, gfp_t gfp_flags) if (ret == 1) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } return ret; } @@ -8739,7 +8739,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, struct btrfs_ordered_extent *ordered; struct extent_state *cached_state = NULL; u64 page_start = page_offset(page); - u64 page_end = page_start + PAGE_CACHE_SIZE - 1; + u64 page_end = page_start + PAGE_SIZE - 1; u64 start; u64 end; int inode_evicting = inode->i_state & I_FREEING; @@ -8822,7 +8822,7 @@ again: * 2) Not written to disk * This means the reserved space should be freed here. */ - btrfs_qgroup_free_data(inode, page_start, PAGE_CACHE_SIZE); + btrfs_qgroup_free_data(inode, page_start, PAGE_SIZE); if (!inode_evicting) { clear_extent_bit(tree, page_start, page_end, EXTENT_LOCKED | EXTENT_DIRTY | @@ -8837,7 +8837,7 @@ again: if (PagePrivate(page)) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } } @@ -8874,11 +8874,11 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) u64 page_end; u64 end; - reserved_space = PAGE_CACHE_SIZE; + reserved_space = PAGE_SIZE; sb_start_pagefault(inode->i_sb); page_start = page_offset(page); - page_end = page_start + PAGE_CACHE_SIZE - 1; + page_end = page_start + PAGE_SIZE - 1; end = page_end; /* @@ -8934,15 +8934,15 @@ again: goto again; } - if (page->index == ((size - 1) >> PAGE_CACHE_SHIFT)) { + if (page->index == ((size - 1) >> PAGE_SHIFT)) { reserved_space = round_up(size - page_start, root->sectorsize); - if (reserved_space < PAGE_CACHE_SIZE) { + if (reserved_space < PAGE_SIZE) { end = page_start + reserved_space - 1; spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, page_start, - PAGE_CACHE_SIZE - reserved_space); + PAGE_SIZE - reserved_space); } } @@ -8969,14 +8969,14 @@ again: ret = 0; /* page is wholly or partially inside EOF */ - if (page_start + PAGE_CACHE_SIZE > size) - zero_start = size & ~PAGE_CACHE_MASK; + if (page_start + PAGE_SIZE > size) + zero_start = size & ~PAGE_MASK; else - zero_start = PAGE_CACHE_SIZE; + zero_start = PAGE_SIZE; - if (zero_start != PAGE_CACHE_SIZE) { + if (zero_start != PAGE_SIZE) { kaddr = kmap(page); - memset(kaddr + zero_start, 0, PAGE_CACHE_SIZE - zero_start); + memset(kaddr + zero_start, 0, PAGE_SIZE - zero_start); flush_dcache_page(page); kunmap(page); } diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 053e677839fe..5a23806ae418 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -898,7 +898,7 @@ static int check_defrag_in_cache(struct inode *inode, u64 offset, u32 thresh) u64 end; read_lock(&em_tree->lock); - em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); + em = lookup_extent_mapping(em_tree, offset, PAGE_SIZE); read_unlock(&em_tree->lock); if (em) { @@ -988,7 +988,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; struct extent_map *em; - u64 len = PAGE_CACHE_SIZE; + u64 len = PAGE_SIZE; /* * hopefully we have this extent in the tree already, try without @@ -1124,15 +1124,15 @@ static int cluster_pages_for_defrag(struct inode *inode, struct extent_io_tree *tree; gfp_t mask = btrfs_alloc_write_mask(inode->i_mapping); - file_end = (isize - 1) >> PAGE_CACHE_SHIFT; + file_end = (isize - 1) >> PAGE_SHIFT; if (!isize || start_index > file_end) return 0; page_cnt = min_t(u64, (u64)num_pages, (u64)file_end - start_index + 1); ret = btrfs_delalloc_reserve_space(inode, - start_index << PAGE_CACHE_SHIFT, - page_cnt << PAGE_CACHE_SHIFT); + start_index << PAGE_SHIFT, + page_cnt << PAGE_SHIFT); if (ret) return ret; i_done = 0; @@ -1148,7 +1148,7 @@ again: break; page_start = page_offset(page); - page_end = page_start + PAGE_CACHE_SIZE - 1; + page_end = page_start + PAGE_SIZE - 1; while (1) { lock_extent_bits(tree, page_start, page_end, &cached_state); @@ -1169,7 +1169,7 @@ again: */ if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } } @@ -1179,7 +1179,7 @@ again: lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = -EIO; break; } @@ -1187,7 +1187,7 @@ again: if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } @@ -1208,7 +1208,7 @@ again: wait_on_page_writeback(pages[i]); page_start = page_offset(pages[0]); - page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; + page_end = page_offset(pages[i_done - 1]) + PAGE_SIZE; lock_extent_bits(&BTRFS_I(inode)->io_tree, page_start, page_end - 1, &cached_state); @@ -1222,8 +1222,8 @@ again: BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); btrfs_delalloc_release_space(inode, - start_index << PAGE_CACHE_SHIFT, - (page_cnt - i_done) << PAGE_CACHE_SHIFT); + start_index << PAGE_SHIFT, + (page_cnt - i_done) << PAGE_SHIFT); } @@ -1240,17 +1240,17 @@ again: set_page_extent_mapped(pages[i]); set_page_dirty(pages[i]); unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } return i_done; out: for (i = 0; i < i_done; i++) { unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } btrfs_delalloc_release_space(inode, - start_index << PAGE_CACHE_SHIFT, - page_cnt << PAGE_CACHE_SHIFT); + start_index << PAGE_SHIFT, + page_cnt << PAGE_SHIFT); return ret; } @@ -1273,7 +1273,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, int defrag_count = 0; int compress_type = BTRFS_COMPRESS_ZLIB; u32 extent_thresh = range->extent_thresh; - unsigned long max_cluster = SZ_256K >> PAGE_CACHE_SHIFT; + unsigned long max_cluster = SZ_256K >> PAGE_SHIFT; unsigned long cluster = max_cluster; u64 new_align = ~((u64)SZ_128K - 1); struct page **pages = NULL; @@ -1317,9 +1317,9 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, /* find the last page to defrag */ if (range->start + range->len > range->start) { last_index = min_t(u64, isize - 1, - range->start + range->len - 1) >> PAGE_CACHE_SHIFT; + range->start + range->len - 1) >> PAGE_SHIFT; } else { - last_index = (isize - 1) >> PAGE_CACHE_SHIFT; + last_index = (isize - 1) >> PAGE_SHIFT; } if (newer_than) { @@ -1331,11 +1331,11 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, * we always align our defrag to help keep * the extents in the file evenly spaced */ - i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + i = (newer_off & new_align) >> PAGE_SHIFT; } else goto out_ra; } else { - i = range->start >> PAGE_CACHE_SHIFT; + i = range->start >> PAGE_SHIFT; } if (!max_to_defrag) max_to_defrag = last_index - i + 1; @@ -1348,7 +1348,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, inode->i_mapping->writeback_index = i; while (i <= last_index && defrag_count < max_to_defrag && - (i < DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE))) { + (i < DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE))) { /* * make sure we stop running if someone unmounts * the FS @@ -1362,7 +1362,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, break; } - if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, + if (!should_defrag_range(inode, (u64)i << PAGE_SHIFT, extent_thresh, &last_len, &skip, &defrag_end, range->flags & BTRFS_DEFRAG_RANGE_COMPRESS)) { @@ -1371,14 +1371,14 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, * the should_defrag function tells us how much to skip * bump our counter by the suggested amount */ - next = DIV_ROUND_UP(skip, PAGE_CACHE_SIZE); + next = DIV_ROUND_UP(skip, PAGE_SIZE); i = max(i + 1, next); continue; } if (!newer_than) { - cluster = (PAGE_CACHE_ALIGN(defrag_end) >> - PAGE_CACHE_SHIFT) - i; + cluster = (PAGE_ALIGN(defrag_end) >> + PAGE_SHIFT) - i; cluster = min(cluster, max_cluster); } else { cluster = max_cluster; @@ -1412,20 +1412,20 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, i += ret; newer_off = max(newer_off + 1, - (u64)i << PAGE_CACHE_SHIFT); + (u64)i << PAGE_SHIFT); ret = find_new_extents(root, inode, newer_than, &newer_off, SZ_64K); if (!ret) { range->start = newer_off; - i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; + i = (newer_off & new_align) >> PAGE_SHIFT; } else { break; } } else { if (ret > 0) { i += ret; - last_len += ret << PAGE_CACHE_SHIFT; + last_len += ret << PAGE_SHIFT; } else { i++; last_len = 0; @@ -1654,7 +1654,7 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file, src_inode = file_inode(src.file); if (src_inode->i_sb != file_inode(file)->i_sb) { - btrfs_info(BTRFS_I(src_inode)->root->fs_info, + btrfs_info(BTRFS_I(file_inode(file))->root->fs_info, "Snapshot src from another FS"); ret = -EXDEV; } else if (!inode_owner_or_capable(src_inode)) { @@ -1722,7 +1722,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, if (vol_args->flags & BTRFS_SUBVOL_RDONLY) readonly = true; if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { - if (vol_args->size > PAGE_CACHE_SIZE) { + if (vol_args->size > PAGE_SIZE) { ret = -EINVAL; goto free_args; } @@ -2806,12 +2806,12 @@ static struct page *extent_same_get_page(struct inode *inode, pgoff_t index) lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); return ERR_PTR(-EIO); } if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); return ERR_PTR(-EAGAIN); } } @@ -2823,7 +2823,7 @@ static int gather_extent_pages(struct inode *inode, struct page **pages, int num_pages, u64 off) { int i; - pgoff_t index = off >> PAGE_CACHE_SHIFT; + pgoff_t index = off >> PAGE_SHIFT; for (i = 0; i < num_pages; i++) { again: @@ -2932,12 +2932,12 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp) pg = cmp->src_pages[i]; if (pg) { unlock_page(pg); - page_cache_release(pg); + put_page(pg); } pg = cmp->dst_pages[i]; if (pg) { unlock_page(pg); - page_cache_release(pg); + put_page(pg); } } kfree(cmp->src_pages); @@ -2949,7 +2949,7 @@ static int btrfs_cmp_data_prepare(struct inode *src, u64 loff, u64 len, struct cmp_pages *cmp) { int ret; - int num_pages = PAGE_CACHE_ALIGN(len) >> PAGE_CACHE_SHIFT; + int num_pages = PAGE_ALIGN(len) >> PAGE_SHIFT; struct page **src_pgarr, **dst_pgarr; /* @@ -2987,12 +2987,12 @@ static int btrfs_cmp_data(struct inode *src, u64 loff, struct inode *dst, int ret = 0; int i; struct page *src_page, *dst_page; - unsigned int cmp_len = PAGE_CACHE_SIZE; + unsigned int cmp_len = PAGE_SIZE; void *addr, *dst_addr; i = 0; while (len) { - if (len < PAGE_CACHE_SIZE) + if (len < PAGE_SIZE) cmp_len = len; BUG_ON(i >= cmp->num_pages); @@ -3191,7 +3191,7 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, if (olen > BTRFS_MAX_DEDUPE_LEN) olen = BTRFS_MAX_DEDUPE_LEN; - if (WARN_ON_ONCE(bs < PAGE_CACHE_SIZE)) { + if (WARN_ON_ONCE(bs < PAGE_SIZE)) { /* * Btrfs does not support blocksize < page_size. As a * result, btrfs_cmp_data() won't correctly handle @@ -3891,8 +3891,8 @@ static noinline int btrfs_clone_files(struct file *file, struct file *file_src, * data immediately and not the previous data. */ truncate_inode_pages_range(&inode->i_data, - round_down(destoff, PAGE_CACHE_SIZE), - round_up(destoff + len, PAGE_CACHE_SIZE) - 1); + round_down(destoff, PAGE_SIZE), + round_up(destoff + len, PAGE_SIZE) - 1); out_unlock: if (!same_inode) btrfs_double_inode_unlock(src, inode); @@ -4124,7 +4124,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) /* we generally have at most 6 or so space infos, one for each raid * level. So, a whole page should be more than enough for everyone */ - if (alloc_size > PAGE_CACHE_SIZE) + if (alloc_size > PAGE_SIZE) return -ENOMEM; space_args.total_spaces = 0; diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index a2f051347731..1adfbe7be6b8 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -55,8 +55,8 @@ static struct list_head *lzo_alloc_workspace(void) return ERR_PTR(-ENOMEM); workspace->mem = vmalloc(LZO1X_MEM_COMPRESS); - workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); - workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_CACHE_SIZE)); + workspace->buf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); + workspace->cbuf = vmalloc(lzo1x_worst_compress(PAGE_SIZE)); if (!workspace->mem || !workspace->buf || !workspace->cbuf) goto fail; @@ -116,7 +116,7 @@ static int lzo_compress_pages(struct list_head *ws, *total_out = 0; *total_in = 0; - in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + in_page = find_get_page(mapping, start >> PAGE_SHIFT); data_in = kmap(in_page); /* @@ -133,10 +133,10 @@ static int lzo_compress_pages(struct list_head *ws, tot_out = LZO_LEN; pages[0] = out_page; nr_pages = 1; - pg_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + pg_bytes_left = PAGE_SIZE - LZO_LEN; /* compress at most one page of data each time */ - in_len = min(len, PAGE_CACHE_SIZE); + in_len = min(len, PAGE_SIZE); while (tot_in < len) { ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, &out_len, workspace->mem); @@ -201,7 +201,7 @@ static int lzo_compress_pages(struct list_head *ws, cpage_out = kmap(out_page); pages[nr_pages++] = out_page; - pg_bytes_left = PAGE_CACHE_SIZE; + pg_bytes_left = PAGE_SIZE; out_offset = 0; } } @@ -221,12 +221,12 @@ static int lzo_compress_pages(struct list_head *ws, bytes_left = len - tot_in; kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); - start += PAGE_CACHE_SIZE; - in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + start += PAGE_SIZE; + in_page = find_get_page(mapping, start >> PAGE_SHIFT); data_in = kmap(in_page); - in_len = min(bytes_left, PAGE_CACHE_SIZE); + in_len = min(bytes_left, PAGE_SIZE); } if (tot_out > tot_in) @@ -248,7 +248,7 @@ out: if (in_page) { kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); } return ret; @@ -266,7 +266,7 @@ static int lzo_decompress_biovec(struct list_head *ws, char *data_in; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); + unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); unsigned long buf_start; unsigned long buf_offset = 0; unsigned long bytes; @@ -289,7 +289,7 @@ static int lzo_decompress_biovec(struct list_head *ws, tot_in = LZO_LEN; in_offset = LZO_LEN; tot_len = min_t(size_t, srclen, tot_len); - in_page_bytes_left = PAGE_CACHE_SIZE - LZO_LEN; + in_page_bytes_left = PAGE_SIZE - LZO_LEN; tot_out = 0; pg_offset = 0; @@ -345,12 +345,12 @@ cont: data_in = kmap(pages_in[++page_in_index]); - in_page_bytes_left = PAGE_CACHE_SIZE; + in_page_bytes_left = PAGE_SIZE; in_offset = 0; } } - out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); + out_len = lzo1x_worst_compress(PAGE_SIZE); ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, &out_len); if (need_unmap) @@ -399,7 +399,7 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in, in_len = read_compress_length(data_in); data_in += LZO_LEN; - out_len = PAGE_CACHE_SIZE; + out_len = PAGE_SIZE; ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len); if (ret != LZO_E_OK) { printk(KERN_WARNING "BTRFS: decompress failed!\n"); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 5279fdae7142..9e119552ed32 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1463,6 +1463,7 @@ struct btrfs_qgroup_extent_record u64 bytenr = record->bytenr; assert_spin_locked(&delayed_refs->lock); + trace_btrfs_qgroup_insert_dirty_extent(record); while (*p) { parent_node = *p; @@ -1594,6 +1595,9 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq); cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq); + trace_qgroup_update_counters(qg->qgroupid, cur_old_count, + cur_new_count); + /* Rfer update part */ if (cur_old_count == 0 && cur_new_count > 0) { qg->rfer += num_bytes; @@ -1683,6 +1687,9 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans, goto out_free; BUG_ON(!fs_info->quota_root); + trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots, + nr_new_roots); + qgroups = ulist_alloc(GFP_NOFS); if (!qgroups) { ret = -ENOMEM; @@ -1752,6 +1759,8 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans, record = rb_entry(node, struct btrfs_qgroup_extent_record, node); + trace_btrfs_qgroup_account_extents(record); + if (!ret) { /* * Use (u64)-1 as time_seq to do special search, which @@ -1842,8 +1851,10 @@ out: } /* - * copy the acounting information between qgroups. This is necessary when a - * snapshot or a subvolume is created + * Copy the acounting information between qgroups. This is necessary + * when a snapshot or a subvolume is created. Throwing an error will + * cause a transaction abort so we take extra care here to only error + * when a readonly fs is a reasonable outcome. */ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info, u64 srcid, u64 objectid, @@ -1873,15 +1884,15 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, 2 * inherit->num_excl_copies; for (i = 0; i < nums; ++i) { srcgroup = find_qgroup_rb(fs_info, *i_qgroups); - if (!srcgroup) { - ret = -EINVAL; - goto out; - } - if ((srcgroup->qgroupid >> 48) <= (objectid >> 48)) { - ret = -EINVAL; - goto out; - } + /* + * Zero out invalid groups so we can ignore + * them later. + */ + if (!srcgroup || + ((srcgroup->qgroupid >> 48) <= (objectid >> 48))) + *i_qgroups = 0ULL; + ++i_qgroups; } } @@ -1916,17 +1927,19 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, */ if (inherit) { i_qgroups = (u64 *)(inherit + 1); - for (i = 0; i < inherit->num_qgroups; ++i) { + for (i = 0; i < inherit->num_qgroups; ++i, ++i_qgroups) { + if (*i_qgroups == 0) + continue; ret = add_qgroup_relation_item(trans, quota_root, objectid, *i_qgroups); - if (ret) + if (ret && ret != -EEXIST) goto out; ret = add_qgroup_relation_item(trans, quota_root, *i_qgroups, objectid); - if (ret) + if (ret && ret != -EEXIST) goto out; - ++i_qgroups; } + ret = 0; } @@ -1987,17 +2000,22 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, i_qgroups = (u64 *)(inherit + 1); for (i = 0; i < inherit->num_qgroups; ++i) { - ret = add_relation_rb(quota_root->fs_info, objectid, - *i_qgroups); - if (ret) - goto unlock; + if (*i_qgroups) { + ret = add_relation_rb(quota_root->fs_info, objectid, + *i_qgroups); + if (ret) + goto unlock; + } ++i_qgroups; } - for (i = 0; i < inherit->num_ref_copies; ++i) { + for (i = 0; i < inherit->num_ref_copies; ++i, i_qgroups += 2) { struct btrfs_qgroup *src; struct btrfs_qgroup *dst; + if (!i_qgroups[0] || !i_qgroups[1]) + continue; + src = find_qgroup_rb(fs_info, i_qgroups[0]); dst = find_qgroup_rb(fs_info, i_qgroups[1]); @@ -2008,12 +2026,14 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, dst->rfer = src->rfer - level_size; dst->rfer_cmpr = src->rfer_cmpr - level_size; - i_qgroups += 2; } - for (i = 0; i < inherit->num_excl_copies; ++i) { + for (i = 0; i < inherit->num_excl_copies; ++i, i_qgroups += 2) { struct btrfs_qgroup *src; struct btrfs_qgroup *dst; + if (!i_qgroups[0] || !i_qgroups[1]) + continue; + src = find_qgroup_rb(fs_info, i_qgroups[0]); dst = find_qgroup_rb(fs_info, i_qgroups[1]); @@ -2024,7 +2044,6 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, dst->excl = src->excl + level_size; dst->excl_cmpr = src->excl_cmpr + level_size; - i_qgroups += 2; } unlock: diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 55161369fab1..0b7792e02dd5 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -270,7 +270,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) s = kmap(rbio->bio_pages[i]); d = kmap(rbio->stripe_pages[i]); - memcpy(d, s, PAGE_CACHE_SIZE); + memcpy(d, s, PAGE_SIZE); kunmap(rbio->bio_pages[i]); kunmap(rbio->stripe_pages[i]); @@ -962,7 +962,7 @@ static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, */ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) { - return DIV_ROUND_UP(stripe_len, PAGE_CACHE_SIZE) * nr_stripes; + return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes; } /* @@ -1078,7 +1078,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, u64 disk_start; stripe = &rbio->bbio->stripes[stripe_nr]; - disk_start = stripe->physical + (page_index << PAGE_CACHE_SHIFT); + disk_start = stripe->physical + (page_index << PAGE_SHIFT); /* if the device is missing, just fail this stripe */ if (!stripe->dev->bdev) @@ -1096,8 +1096,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, if (last_end == disk_start && stripe->dev->bdev && !last->bi_error && last->bi_bdev == stripe->dev->bdev) { - ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); - if (ret == PAGE_CACHE_SIZE) + ret = bio_add_page(last, page, PAGE_SIZE, 0); + if (ret == PAGE_SIZE) return 0; } } @@ -1111,7 +1111,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, bio->bi_bdev = stripe->dev->bdev; bio->bi_iter.bi_sector = disk_start >> 9; - bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); + bio_add_page(bio, page, PAGE_SIZE, 0); bio_list_add(bio_list, bio); return 0; } @@ -1154,7 +1154,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) bio_list_for_each(bio, &rbio->bio_list) { start = (u64)bio->bi_iter.bi_sector << 9; stripe_offset = start - rbio->bbio->raid_map[0]; - page_index = stripe_offset >> PAGE_CACHE_SHIFT; + page_index = stripe_offset >> PAGE_SHIFT; for (i = 0; i < bio->bi_vcnt; i++) { p = bio->bi_io_vec[i].bv_page; @@ -1253,7 +1253,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio) } else { /* raid5 */ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); - run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); + run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); } @@ -1914,7 +1914,7 @@ pstripe: /* Copy parity block into failed block to start with */ memcpy(pointers[faila], pointers[rbio->nr_data], - PAGE_CACHE_SIZE); + PAGE_SIZE); /* rearrange the pointer array */ p = pointers[faila]; @@ -1923,7 +1923,7 @@ pstripe: pointers[rbio->nr_data - 1] = p; /* xor in the rest */ - run_xor(pointers, rbio->nr_data - 1, PAGE_CACHE_SIZE); + run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); } /* if we're doing this rebuild as part of an rmw, go through * and set all of our private rbio pages in the @@ -2250,7 +2250,7 @@ void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + rbio->stripe_len * rbio->nr_data); stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); - index = stripe_offset >> PAGE_CACHE_SHIFT; + index = stripe_offset >> PAGE_SHIFT; rbio->bio_pages[index] = page; } @@ -2365,14 +2365,14 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, } else { /* raid5 */ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); - run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE); + run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); } /* Check scrubbing pairty and repair it */ p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); parity = kmap(p); - if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE)) - memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE); + if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) + memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE); else /* Parity is right, needn't writeback */ bitmap_clear(rbio->dbitmap, pagenr, 1); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index b892914968c1..298631eaee78 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -226,7 +226,7 @@ int btree_readahead_hook(struct btrfs_fs_info *fs_info, /* find extent */ spin_lock(&fs_info->reada_lock); re = radix_tree_lookup(&fs_info->reada_tree, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); if (re) re->refcnt++; spin_unlock(&fs_info->reada_lock); @@ -257,7 +257,7 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, zone = NULL; spin_lock(&fs_info->reada_lock); ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, - logical >> PAGE_CACHE_SHIFT, 1); + logical >> PAGE_SHIFT, 1); if (ret == 1 && logical >= zone->start && logical <= zone->end) { kref_get(&zone->refcnt); spin_unlock(&fs_info->reada_lock); @@ -294,13 +294,13 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, spin_lock(&fs_info->reada_lock); ret = radix_tree_insert(&dev->reada_zones, - (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), + (unsigned long)(zone->end >> PAGE_SHIFT), zone); if (ret == -EEXIST) { kfree(zone); ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, - logical >> PAGE_CACHE_SHIFT, 1); + logical >> PAGE_SHIFT, 1); if (ret == 1 && logical >= zone->start && logical <= zone->end) kref_get(&zone->refcnt); else @@ -326,7 +326,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, u64 length; int real_stripes; int nzones = 0; - unsigned long index = logical >> PAGE_CACHE_SHIFT; + unsigned long index = logical >> PAGE_SHIFT; int dev_replace_is_ongoing; int have_zone = 0; @@ -495,7 +495,7 @@ static void reada_extent_put(struct btrfs_fs_info *fs_info, struct reada_extent *re) { int i; - unsigned long index = re->logical >> PAGE_CACHE_SHIFT; + unsigned long index = re->logical >> PAGE_SHIFT; spin_lock(&fs_info->reada_lock); if (--re->refcnt) { @@ -538,7 +538,7 @@ static void reada_zone_release(struct kref *kref) struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt); radix_tree_delete(&zone->device->reada_zones, - zone->end >> PAGE_CACHE_SHIFT); + zone->end >> PAGE_SHIFT); kfree(zone); } @@ -587,7 +587,7 @@ static int reada_add_block(struct reada_control *rc, u64 logical, static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock) { int i; - unsigned long index = zone->end >> PAGE_CACHE_SHIFT; + unsigned long index = zone->end >> PAGE_SHIFT; for (i = 0; i < zone->ndevs; ++i) { struct reada_zone *peer; @@ -622,7 +622,7 @@ static int reada_pick_zone(struct btrfs_device *dev) (void **)&zone, index, 1); if (ret == 0) break; - index = (zone->end >> PAGE_CACHE_SHIFT) + 1; + index = (zone->end >> PAGE_SHIFT) + 1; if (zone->locked) { if (zone->elems > top_locked_elems) { top_locked_elems = zone->elems; @@ -673,7 +673,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, * plugging to speed things up */ ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, - dev->reada_next >> PAGE_CACHE_SHIFT, 1); + dev->reada_next >> PAGE_SHIFT, 1); if (ret == 0 || re->logical > dev->reada_curr_zone->end) { ret = reada_pick_zone(dev); if (!ret) { @@ -682,7 +682,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, } re = NULL; ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re, - dev->reada_next >> PAGE_CACHE_SHIFT, 1); + dev->reada_next >> PAGE_SHIFT, 1); } if (ret == 0) { spin_unlock(&fs_info->reada_lock); @@ -838,7 +838,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) printk(KERN_CONT " curr off %llu", device->reada_next - zone->start); printk(KERN_CONT "\n"); - index = (zone->end >> PAGE_CACHE_SHIFT) + 1; + index = (zone->end >> PAGE_SHIFT) + 1; } cnt = 0; index = 0; @@ -864,7 +864,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) } } printk(KERN_CONT "\n"); - index = (re->logical >> PAGE_CACHE_SHIFT) + 1; + index = (re->logical >> PAGE_SHIFT) + 1; if (++cnt > 15) break; } @@ -880,7 +880,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) if (ret == 0) break; if (!re->scheduled) { - index = (re->logical >> PAGE_CACHE_SHIFT) + 1; + index = (re->logical >> PAGE_SHIFT) + 1; continue; } printk(KERN_DEBUG @@ -897,7 +897,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) } } printk(KERN_CONT "\n"); - index = (re->logical >> PAGE_CACHE_SHIFT) + 1; + index = (re->logical >> PAGE_SHIFT) + 1; } spin_unlock(&fs_info->reada_lock); } diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 2bd0011450df..08ef890deca6 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1850,6 +1850,7 @@ again: eb = read_tree_block(dest, old_bytenr, old_ptr_gen); if (IS_ERR(eb)) { ret = PTR_ERR(eb); + break; } else if (!extent_buffer_uptodate(eb)) { ret = -EIO; free_extent_buffer(eb); @@ -3129,10 +3130,10 @@ static int relocate_file_extent_cluster(struct inode *inode, if (ret) goto out; - index = (cluster->start - offset) >> PAGE_CACHE_SHIFT; - last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT; + index = (cluster->start - offset) >> PAGE_SHIFT; + last_index = (cluster->end - offset) >> PAGE_SHIFT; while (index <= last_index) { - ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE); + ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE); if (ret) goto out; @@ -3145,7 +3146,7 @@ static int relocate_file_extent_cluster(struct inode *inode, mask); if (!page) { btrfs_delalloc_release_metadata(inode, - PAGE_CACHE_SIZE); + PAGE_SIZE); ret = -ENOMEM; goto out; } @@ -3162,16 +3163,16 @@ static int relocate_file_extent_cluster(struct inode *inode, lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); btrfs_delalloc_release_metadata(inode, - PAGE_CACHE_SIZE); + PAGE_SIZE); ret = -EIO; goto out; } } page_start = page_offset(page); - page_end = page_start + PAGE_CACHE_SIZE - 1; + page_end = page_start + PAGE_SIZE - 1; lock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); @@ -3191,7 +3192,7 @@ static int relocate_file_extent_cluster(struct inode *inode, unlock_extent(&BTRFS_I(inode)->io_tree, page_start, page_end); unlock_page(page); - page_cache_release(page); + put_page(page); index++; balance_dirty_pages_ratelimited(inode->i_mapping); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 39dbdcbf4d13..4678f03e878e 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -703,7 +703,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) if (IS_ERR(inode)) return PTR_ERR(inode); - index = offset >> PAGE_CACHE_SHIFT; + index = offset >> PAGE_SHIFT; page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); if (!page) { @@ -1636,7 +1636,7 @@ static int scrub_write_page_to_dev_replace(struct scrub_block *sblock, if (spage->io_error) { void *mapped_buffer = kmap_atomic(spage->page); - memset(mapped_buffer, 0, PAGE_CACHE_SIZE); + memset(mapped_buffer, 0, PAGE_SIZE); flush_dcache_page(spage->page); kunmap_atomic(mapped_buffer); } @@ -4294,8 +4294,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, goto out; } - while (len >= PAGE_CACHE_SIZE) { - index = offset >> PAGE_CACHE_SHIFT; + while (len >= PAGE_SIZE) { + index = offset >> PAGE_SHIFT; again: page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); if (!page) { @@ -4326,7 +4326,7 @@ again: */ if (page->mapping != inode->i_mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto again; } if (!PageUptodate(page)) { @@ -4348,15 +4348,15 @@ again: ret = err; next_page: unlock_page(page); - page_cache_release(page); + put_page(page); if (ret) break; - offset += PAGE_CACHE_SIZE; - physical_for_dev_replace += PAGE_CACHE_SIZE; - nocow_ctx_logical += PAGE_CACHE_SIZE; - len -= PAGE_CACHE_SIZE; + offset += PAGE_SIZE; + physical_for_dev_replace += PAGE_SIZE; + nocow_ctx_logical += PAGE_SIZE; + len -= PAGE_SIZE; } ret = COPY_COMPLETE; out: @@ -4390,8 +4390,8 @@ static int write_page_nocow(struct scrub_ctx *sctx, bio->bi_iter.bi_size = 0; bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; bio->bi_bdev = dev->bdev; - ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); - if (ret != PAGE_CACHE_SIZE) { + ret = bio_add_page(bio, page, PAGE_SIZE, 0); + if (ret != PAGE_SIZE) { leave_with_eio: bio_put(bio); btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 19b7bf4284ee..8d358c547c59 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -4449,9 +4449,9 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) struct page *page; char *addr; struct btrfs_key key; - pgoff_t index = offset >> PAGE_CACHE_SHIFT; + pgoff_t index = offset >> PAGE_SHIFT; pgoff_t last_index; - unsigned pg_offset = offset & ~PAGE_CACHE_MASK; + unsigned pg_offset = offset & ~PAGE_MASK; ssize_t ret = 0; key.objectid = sctx->cur_ino; @@ -4471,7 +4471,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) if (len == 0) goto out; - last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; + last_index = (offset + len - 1) >> PAGE_SHIFT; /* initial readahead */ memset(&sctx->ra, 0, sizeof(struct file_ra_state)); @@ -4481,7 +4481,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) while (index <= last_index) { unsigned cur_len = min_t(unsigned, len, - PAGE_CACHE_SIZE - pg_offset); + PAGE_SIZE - pg_offset); page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) { ret = -ENOMEM; @@ -4493,7 +4493,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) lock_page(page); if (!PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = -EIO; break; } @@ -4503,7 +4503,7 @@ static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); kunmap(page); unlock_page(page); - page_cache_release(page); + put_page(page); index++; pg_offset = 0; len -= cur_len; @@ -4804,7 +4804,7 @@ static int clone_range(struct send_ctx *sctx, type = btrfs_file_extent_type(leaf, ei); if (type == BTRFS_FILE_EXTENT_INLINE) { ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); - ext_len = PAGE_CACHE_ALIGN(ext_len); + ext_len = PAGE_ALIGN(ext_len); } else { ext_len = btrfs_file_extent_num_bytes(leaf, ei); } @@ -4886,7 +4886,7 @@ static int send_write_or_clone(struct send_ctx *sctx, * but there may be items after this page. Make * sure to send the whole thing */ - len = PAGE_CACHE_ALIGN(len); + len = PAGE_ALIGN(len); } else { len = btrfs_file_extent_num_bytes(path->nodes[0], ei); } diff --git a/fs/btrfs/struct-funcs.c b/fs/btrfs/struct-funcs.c index b976597b0721..e05619f241be 100644 --- a/fs/btrfs/struct-funcs.c +++ b/fs/btrfs/struct-funcs.c @@ -66,7 +66,7 @@ u##bits btrfs_get_token_##bits(struct extent_buffer *eb, void *ptr, \ \ if (token && token->kaddr && token->offset <= offset && \ token->eb == eb && \ - (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ + (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ p = kaddr + part_offset - token->offset; \ res = get_unaligned_le##bits(p + off); \ @@ -104,7 +104,7 @@ void btrfs_set_token_##bits(struct extent_buffer *eb, \ \ if (token && token->kaddr && token->offset <= offset && \ token->eb == eb && \ - (token->offset + PAGE_CACHE_SIZE >= offset + size)) { \ + (token->offset + PAGE_SIZE >= offset + size)) { \ kaddr = token->kaddr; \ p = kaddr + part_offset - token->offset; \ put_unaligned_le##bits(val, p + off); \ diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 669b58201e36..70948b13bc81 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -32,8 +32,8 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, { int ret; struct page *pages[16]; - unsigned long index = start >> PAGE_CACHE_SHIFT; - unsigned long end_index = end >> PAGE_CACHE_SHIFT; + unsigned long index = start >> PAGE_SHIFT; + unsigned long end_index = end >> PAGE_SHIFT; unsigned long nr_pages = end_index - index + 1; int i; int count = 0; @@ -49,9 +49,9 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, count++; if (flags & PROCESS_UNLOCK && PageLocked(pages[i])) unlock_page(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); if (flags & PROCESS_RELEASE) - page_cache_release(pages[i]); + put_page(pages[i]); } nr_pages -= ret; index += ret; @@ -93,7 +93,7 @@ static int test_find_delalloc(void) * everything to make sure our pages don't get evicted and screw up our * test. */ - for (index = 0; index < (total_dirty >> PAGE_CACHE_SHIFT); index++) { + for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) { page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); if (!page) { test_msg("Failed to allocate test page\n"); @@ -104,7 +104,7 @@ static int test_find_delalloc(void) if (index) { unlock_page(page); } else { - page_cache_get(page); + get_page(page); locked_page = page; } } @@ -129,7 +129,7 @@ static int test_find_delalloc(void) } unlock_extent(&tmp, start, end); unlock_page(locked_page); - page_cache_release(locked_page); + put_page(locked_page); /* * Test this scenario @@ -139,7 +139,7 @@ static int test_find_delalloc(void) */ test_start = SZ_64M; locked_page = find_lock_page(inode->i_mapping, - test_start >> PAGE_CACHE_SHIFT); + test_start >> PAGE_SHIFT); if (!locked_page) { test_msg("Couldn't find the locked page\n"); goto out_bits; @@ -165,7 +165,7 @@ static int test_find_delalloc(void) } unlock_extent(&tmp, start, end); /* locked_page was unlocked above */ - page_cache_release(locked_page); + put_page(locked_page); /* * Test this scenario @@ -174,7 +174,7 @@ static int test_find_delalloc(void) */ test_start = max_bytes + 4096; locked_page = find_lock_page(inode->i_mapping, test_start >> - PAGE_CACHE_SHIFT); + PAGE_SHIFT); if (!locked_page) { test_msg("Could'nt find the locked page\n"); goto out_bits; @@ -225,13 +225,13 @@ static int test_find_delalloc(void) * range we want to find. */ page = find_get_page(inode->i_mapping, - (max_bytes + SZ_1M) >> PAGE_CACHE_SHIFT); + (max_bytes + SZ_1M) >> PAGE_SHIFT); if (!page) { test_msg("Couldn't find our page\n"); goto out_bits; } ClearPageDirty(page); - page_cache_release(page); + put_page(page); /* We unlocked it in the previous test */ lock_page(locked_page); @@ -239,7 +239,7 @@ static int test_find_delalloc(void) end = 0; /* * Currently if we fail to find dirty pages in the delalloc range we - * will adjust max_bytes down to PAGE_CACHE_SIZE and then re-search. If + * will adjust max_bytes down to PAGE_SIZE and then re-search. If * this changes at any point in the future we will need to fix this * tests expected behavior. */ @@ -249,9 +249,9 @@ static int test_find_delalloc(void) test_msg("Didn't find our range\n"); goto out_bits; } - if (start != test_start && end != test_start + PAGE_CACHE_SIZE - 1) { + if (start != test_start && end != test_start + PAGE_SIZE - 1) { test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n", - test_start, test_start + PAGE_CACHE_SIZE - 1, start, + test_start, test_start + PAGE_SIZE - 1, start, end); goto out_bits; } @@ -265,7 +265,7 @@ out_bits: clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_KERNEL); out: if (locked_page) - page_cache_release(locked_page); + put_page(locked_page); process_page_range(inode, 0, total_dirty - 1, PROCESS_UNLOCK | PROCESS_RELEASE); iput(inode); @@ -298,9 +298,9 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, return -EINVAL; } - bitmap_set(bitmap, (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, + bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, sizeof(long) * BITS_PER_BYTE); - extent_buffer_bitmap_set(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, + extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, sizeof(long) * BITS_PER_BYTE); if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { test_msg("Setting straddling pages failed\n"); @@ -309,10 +309,10 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, bitmap_set(bitmap, 0, len * BITS_PER_BYTE); bitmap_clear(bitmap, - (PAGE_CACHE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, + (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, sizeof(long) * BITS_PER_BYTE); extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); - extent_buffer_bitmap_clear(eb, PAGE_CACHE_SIZE - sizeof(long) / 2, 0, + extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, sizeof(long) * BITS_PER_BYTE); if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { test_msg("Clearing straddling pages failed\n"); @@ -353,7 +353,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, static int test_eb_bitmaps(void) { - unsigned long len = PAGE_CACHE_SIZE * 4; + unsigned long len = PAGE_SIZE * 4; unsigned long *bitmap; struct extent_buffer *eb; int ret; @@ -379,7 +379,7 @@ static int test_eb_bitmaps(void) /* Do it over again with an extent buffer which isn't page-aligned. */ free_extent_buffer(eb); - eb = __alloc_dummy_extent_buffer(NULL, PAGE_CACHE_SIZE / 2, len); + eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len); if (!eb) { test_msg("Couldn't allocate test extent buffer\n"); kfree(bitmap); diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index c9ad97b1e690..514247515312 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c @@ -22,7 +22,7 @@ #include "../disk-io.h" #include "../free-space-cache.h" -#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) +#define BITS_PER_BITMAP (PAGE_SIZE * 8) /* * This test just does basic sanity checking, making sure we can add an exten diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 24d03c751149..517d0ccb351e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4415,6 +4415,127 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, return ret; } +/* + * When we are logging a new inode X, check if it doesn't have a reference that + * matches the reference from some other inode Y created in a past transaction + * and that was renamed in the current transaction. If we don't do this, then at + * log replay time we can lose inode Y (and all its files if it's a directory): + * + * mkdir /mnt/x + * echo "hello world" > /mnt/x/foobar + * sync + * mv /mnt/x /mnt/y + * mkdir /mnt/x # or touch /mnt/x + * xfs_io -c fsync /mnt/x + * <power fail> + * mount fs, trigger log replay + * + * After the log replay procedure, we would lose the first directory and all its + * files (file foobar). + * For the case where inode Y is not a directory we simply end up losing it: + * + * echo "123" > /mnt/foo + * sync + * mv /mnt/foo /mnt/bar + * echo "abc" > /mnt/foo + * xfs_io -c fsync /mnt/foo + * <power fail> + * + * We also need this for cases where a snapshot entry is replaced by some other + * entry (file or directory) otherwise we end up with an unreplayable log due to + * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as + * if it were a regular entry: + * + * mkdir /mnt/x + * btrfs subvolume snapshot /mnt /mnt/x/snap + * btrfs subvolume delete /mnt/x/snap + * rmdir /mnt/x + * mkdir /mnt/x + * fsync /mnt/x or fsync some new file inside it + * <power fail> + * + * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in + * the same transaction. + */ +static int btrfs_check_ref_name_override(struct extent_buffer *eb, + const int slot, + const struct btrfs_key *key, + struct inode *inode) +{ + int ret; + struct btrfs_path *search_path; + char *name = NULL; + u32 name_len = 0; + u32 item_size = btrfs_item_size_nr(eb, slot); + u32 cur_offset = 0; + unsigned long ptr = btrfs_item_ptr_offset(eb, slot); + + search_path = btrfs_alloc_path(); + if (!search_path) + return -ENOMEM; + search_path->search_commit_root = 1; + search_path->skip_locking = 1; + + while (cur_offset < item_size) { + u64 parent; + u32 this_name_len; + u32 this_len; + unsigned long name_ptr; + struct btrfs_dir_item *di; + + if (key->type == BTRFS_INODE_REF_KEY) { + struct btrfs_inode_ref *iref; + + iref = (struct btrfs_inode_ref *)(ptr + cur_offset); + parent = key->offset; + this_name_len = btrfs_inode_ref_name_len(eb, iref); + name_ptr = (unsigned long)(iref + 1); + this_len = sizeof(*iref) + this_name_len; + } else { + struct btrfs_inode_extref *extref; + + extref = (struct btrfs_inode_extref *)(ptr + + cur_offset); + parent = btrfs_inode_extref_parent(eb, extref); + this_name_len = btrfs_inode_extref_name_len(eb, extref); + name_ptr = (unsigned long)&extref->name; + this_len = sizeof(*extref) + this_name_len; + } + + if (this_name_len > name_len) { + char *new_name; + + new_name = krealloc(name, this_name_len, GFP_NOFS); + if (!new_name) { + ret = -ENOMEM; + goto out; + } + name_len = this_name_len; + name = new_name; + } + + read_extent_buffer(eb, name, name_ptr, this_name_len); + di = btrfs_lookup_dir_item(NULL, BTRFS_I(inode)->root, + search_path, parent, + name, this_name_len, 0); + if (di && !IS_ERR(di)) { + ret = 1; + goto out; + } else if (IS_ERR(di)) { + ret = PTR_ERR(di); + goto out; + } + btrfs_release_path(search_path); + + cur_offset += this_len; + } + ret = 0; +out: + btrfs_free_path(search_path); + kfree(name); + return ret; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -4602,6 +4723,22 @@ again: if (min_key.type == BTRFS_INODE_ITEM_KEY) need_log_inode_item = false; + if ((min_key.type == BTRFS_INODE_REF_KEY || + min_key.type == BTRFS_INODE_EXTREF_KEY) && + BTRFS_I(inode)->generation == trans->transid) { + ret = btrfs_check_ref_name_override(path->nodes[0], + path->slots[0], + &min_key, inode); + if (ret < 0) { + err = ret; + goto out_unlock; + } else if (ret > 0) { + err = 1; + btrfs_set_log_full_commit(root->fs_info, trans); + goto out_unlock; + } + } + /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ if (min_key.type == BTRFS_XATTR_ITEM_KEY) { if (ins_nr == 0) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index e2b54d546b7c..bd0f45fb38c4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1025,16 +1025,16 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, } /* make sure our super fits in the device */ - if (bytenr + PAGE_CACHE_SIZE >= i_size_read(bdev->bd_inode)) + if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) goto error_bdev_put; /* make sure our super fits in the page */ - if (sizeof(*disk_super) > PAGE_CACHE_SIZE) + if (sizeof(*disk_super) > PAGE_SIZE) goto error_bdev_put; /* make sure our super doesn't straddle pages on disk */ - index = bytenr >> PAGE_CACHE_SHIFT; - if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_CACHE_SHIFT != index) + index = bytenr >> PAGE_SHIFT; + if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) goto error_bdev_put; /* pull in the page with our super */ @@ -1047,7 +1047,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, p = kmap(page); /* align our pointer to the offset of the super block */ - disk_super = p + (bytenr & ~PAGE_CACHE_MASK); + disk_super = p + (bytenr & ~PAGE_MASK); if (btrfs_super_bytenr(disk_super) != bytenr || btrfs_super_magic(disk_super) != BTRFS_MAGIC) @@ -1075,7 +1075,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, error_unmap: kunmap(page); - page_cache_release(page); + put_page(page); error_bdev_put: blkdev_put(bdev, flags); @@ -6527,7 +6527,7 @@ int btrfs_read_sys_array(struct btrfs_root *root) * but sb spans only this function. Add an explicit SetPageUptodate call * to silence the warning eg. on PowerPC 64. */ - if (PAGE_CACHE_SIZE > BTRFS_SUPER_INFO_SIZE) + if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE) SetPageUptodate(sb->pages[0]); write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 82990b8f872b..88d274e8ecf2 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -59,7 +59,7 @@ static struct list_head *zlib_alloc_workspace(void) workspacesize = max(zlib_deflate_workspacesize(MAX_WBITS, MAX_MEM_LEVEL), zlib_inflate_workspacesize()); workspace->strm.workspace = vmalloc(workspacesize); - workspace->buf = kmalloc(PAGE_CACHE_SIZE, GFP_NOFS); + workspace->buf = kmalloc(PAGE_SIZE, GFP_NOFS); if (!workspace->strm.workspace || !workspace->buf) goto fail; @@ -103,7 +103,7 @@ static int zlib_compress_pages(struct list_head *ws, workspace->strm.total_in = 0; workspace->strm.total_out = 0; - in_page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); + in_page = find_get_page(mapping, start >> PAGE_SHIFT); data_in = kmap(in_page); out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); @@ -117,8 +117,8 @@ static int zlib_compress_pages(struct list_head *ws, workspace->strm.next_in = data_in; workspace->strm.next_out = cpage_out; - workspace->strm.avail_out = PAGE_CACHE_SIZE; - workspace->strm.avail_in = min(len, PAGE_CACHE_SIZE); + workspace->strm.avail_out = PAGE_SIZE; + workspace->strm.avail_in = min(len, PAGE_SIZE); while (workspace->strm.total_in < len) { ret = zlib_deflate(&workspace->strm, Z_SYNC_FLUSH); @@ -156,7 +156,7 @@ static int zlib_compress_pages(struct list_head *ws, cpage_out = kmap(out_page); pages[nr_pages] = out_page; nr_pages++; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; workspace->strm.next_out = cpage_out; } /* we're all done */ @@ -170,14 +170,14 @@ static int zlib_compress_pages(struct list_head *ws, bytes_left = len - workspace->strm.total_in; kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); - start += PAGE_CACHE_SIZE; + start += PAGE_SIZE; in_page = find_get_page(mapping, - start >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT); data_in = kmap(in_page); workspace->strm.avail_in = min(bytes_left, - PAGE_CACHE_SIZE); + PAGE_SIZE); workspace->strm.next_in = data_in; } } @@ -205,7 +205,7 @@ out: if (in_page) { kunmap(in_page); - page_cache_release(in_page); + put_page(in_page); } return ret; } @@ -223,18 +223,18 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, size_t total_out = 0; unsigned long page_in_index = 0; unsigned long page_out_index = 0; - unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_CACHE_SIZE); + unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE); unsigned long buf_start; unsigned long pg_offset; data_in = kmap(pages_in[page_in_index]); workspace->strm.next_in = data_in; - workspace->strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE); + workspace->strm.avail_in = min_t(size_t, srclen, PAGE_SIZE); workspace->strm.total_in = 0; workspace->strm.total_out = 0; workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; pg_offset = 0; /* If it's deflate, and it's got no preset dictionary, then @@ -274,7 +274,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, } workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; if (workspace->strm.avail_in == 0) { unsigned long tmp; @@ -288,7 +288,7 @@ static int zlib_decompress_biovec(struct list_head *ws, struct page **pages_in, workspace->strm.next_in = data_in; tmp = srclen - workspace->strm.total_in; workspace->strm.avail_in = min(tmp, - PAGE_CACHE_SIZE); + PAGE_SIZE); } } if (ret != Z_STREAM_END) @@ -325,7 +325,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, workspace->strm.total_in = 0; workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; workspace->strm.total_out = 0; /* If it's deflate, and it's got no preset dictionary, then we can tell zlib to skip the adler32 check. */ @@ -368,8 +368,8 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, else buf_offset = 0; - bytes = min(PAGE_CACHE_SIZE - pg_offset, - PAGE_CACHE_SIZE - buf_offset); + bytes = min(PAGE_SIZE - pg_offset, + PAGE_SIZE - buf_offset); bytes = min(bytes, bytes_left); kaddr = kmap_atomic(dest_page); @@ -380,7 +380,7 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in, bytes_left -= bytes; next: workspace->strm.next_out = workspace->buf; - workspace->strm.avail_out = PAGE_CACHE_SIZE; + workspace->strm.avail_out = PAGE_SIZE; } if (ret != Z_STREAM_END && bytes_left != 0) diff --git a/fs/buffer.c b/fs/buffer.c index 33be29675358..af0d9a82a8ed 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -129,7 +129,7 @@ __clear_page_buffers(struct page *page) { ClearPagePrivate(page); set_page_private(page, 0); - page_cache_release(page); + put_page(page); } static void buffer_io_error(struct buffer_head *bh, char *msg) @@ -207,7 +207,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) struct page *page; int all_mapped = 1; - index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits); + index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); if (!page) goto out; @@ -245,7 +245,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) } out_unlock: spin_unlock(&bd_mapping->private_lock); - page_cache_release(page); + put_page(page); out: return ret; } @@ -1040,7 +1040,7 @@ done: ret = (block < end_block) ? 1 : -ENXIO; failed: unlock_page(page); - page_cache_release(page); + put_page(page); return ret; } @@ -1533,7 +1533,7 @@ void block_invalidatepage(struct page *page, unsigned int offset, /* * Check for overflow */ - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + BUG_ON(stop > PAGE_SIZE || stop < length); head = page_buffers(page); bh = head; @@ -1716,7 +1716,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, blocksize = bh->b_size; bbits = block_size_bits(blocksize); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + block = (sector_t)page->index << (PAGE_SHIFT - bbits); last_block = (i_size_read(inode) - 1) >> bbits; /* @@ -1894,7 +1894,7 @@ EXPORT_SYMBOL(page_zero_new_buffers); int __block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; @@ -1904,15 +1904,15 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len, struct buffer_head *bh, *head, *wait[2], **wait_bh=wait; BUG_ON(!PageLocked(page)); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > PAGE_SIZE); + BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); head = create_page_buffers(page, inode, 0); blocksize = head->b_size; bbits = block_size_bits(blocksize); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + block = (sector_t)page->index << (PAGE_SHIFT - bbits); for(bh = head, block_start = 0; bh != head || !block_start; block++, block_start=block_end, bh = bh->b_this_page) { @@ -2020,7 +2020,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, get_block_t *get_block) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int status; @@ -2031,7 +2031,7 @@ int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, status = __block_write_begin(page, pos, len, get_block); if (unlikely(status)) { unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; } @@ -2047,7 +2047,7 @@ int block_write_end(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; unsigned start; - start = pos & (PAGE_CACHE_SIZE - 1); + start = pos & (PAGE_SIZE - 1); if (unlikely(copied < len)) { /* @@ -2099,7 +2099,7 @@ int generic_write_end(struct file *file, struct address_space *mapping, } unlock_page(page); - page_cache_release(page); + put_page(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); @@ -2136,9 +2136,9 @@ int block_is_partially_uptodate(struct page *page, unsigned long from, head = page_buffers(page); blocksize = head->b_size; - to = min_t(unsigned, PAGE_CACHE_SIZE - from, count); + to = min_t(unsigned, PAGE_SIZE - from, count); to = from + to; - if (from < blocksize && to > PAGE_CACHE_SIZE - blocksize) + if (from < blocksize && to > PAGE_SIZE - blocksize) return 0; bh = head; @@ -2181,7 +2181,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) blocksize = head->b_size; bbits = block_size_bits(blocksize); - iblock = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + iblock = (sector_t)page->index << (PAGE_SHIFT - bbits); lblock = (i_size_read(inode)+blocksize-1) >> bbits; bh = head; nr = 0; @@ -2295,16 +2295,16 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, unsigned zerofrom, offset, len; int err = 0; - index = pos >> PAGE_CACHE_SHIFT; - offset = pos & ~PAGE_CACHE_MASK; + index = pos >> PAGE_SHIFT; + offset = pos & ~PAGE_MASK; - while (index > (curidx = (curpos = *bytes)>>PAGE_CACHE_SHIFT)) { - zerofrom = curpos & ~PAGE_CACHE_MASK; + while (index > (curidx = (curpos = *bytes)>>PAGE_SHIFT)) { + zerofrom = curpos & ~PAGE_MASK; if (zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; } - len = PAGE_CACHE_SIZE - zerofrom; + len = PAGE_SIZE - zerofrom; err = pagecache_write_begin(file, mapping, curpos, len, AOP_FLAG_UNINTERRUPTIBLE, @@ -2329,7 +2329,7 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, /* page covers the boundary, find the boundary offset */ if (index == curidx) { - zerofrom = curpos & ~PAGE_CACHE_MASK; + zerofrom = curpos & ~PAGE_MASK; /* if we will expand the thing last block will be filled */ if (offset <= zerofrom) { goto out; @@ -2375,7 +2375,7 @@ int cont_write_begin(struct file *file, struct address_space *mapping, if (err) return err; - zerofrom = *bytes & ~PAGE_CACHE_MASK; + zerofrom = *bytes & ~PAGE_MASK; if (pos+len > *bytes && zerofrom & (blocksize-1)) { *bytes |= (blocksize-1); (*bytes)++; @@ -2430,10 +2430,10 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, } /* page is wholly or partially inside EOF */ - if (((page->index + 1) << PAGE_CACHE_SHIFT) > size) - end = size & ~PAGE_CACHE_MASK; + if (((page->index + 1) << PAGE_SHIFT) > size) + end = size & ~PAGE_MASK; else - end = PAGE_CACHE_SIZE; + end = PAGE_SIZE; ret = __block_write_begin(page, 0, end, get_block); if (!ret) @@ -2508,8 +2508,8 @@ int nobh_write_begin(struct address_space *mapping, int ret = 0; int is_mapped_to_disk = 1; - index = pos >> PAGE_CACHE_SHIFT; - from = pos & (PAGE_CACHE_SIZE - 1); + index = pos >> PAGE_SHIFT; + from = pos & (PAGE_SIZE - 1); to = from + len; page = grab_cache_page_write_begin(mapping, index, flags); @@ -2543,7 +2543,7 @@ int nobh_write_begin(struct address_space *mapping, goto out_release; } - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); /* * We loop across all blocks in the page, whether or not they are @@ -2551,7 +2551,7 @@ int nobh_write_begin(struct address_space *mapping, * page is fully mapped-to-disk. */ for (block_start = 0, block_in_page = 0, bh = head; - block_start < PAGE_CACHE_SIZE; + block_start < PAGE_SIZE; block_in_page++, block_start += blocksize, bh = bh->b_this_page) { int create; @@ -2623,7 +2623,7 @@ failed: out_release: unlock_page(page); - page_cache_release(page); + put_page(page); *pagep = NULL; return ret; @@ -2653,7 +2653,7 @@ int nobh_write_end(struct file *file, struct address_space *mapping, } unlock_page(page); - page_cache_release(page); + put_page(page); while (head) { bh = head; @@ -2675,7 +2675,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + const pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; int ret; @@ -2684,7 +2684,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, goto out; /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, @@ -2707,7 +2707,7 @@ int nobh_writepage(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); out: ret = mpage_writepage(page, get_block, wbc); if (ret == -EAGAIN) @@ -2720,8 +2720,8 @@ EXPORT_SYMBOL(nobh_writepage); int nobh_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; @@ -2738,7 +2738,7 @@ int nobh_truncate_page(struct address_space *mapping, return 0; length = blocksize - length; - iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; @@ -2748,7 +2748,7 @@ int nobh_truncate_page(struct address_space *mapping, if (page_has_buffers(page)) { has_buffers: unlock_page(page); - page_cache_release(page); + put_page(page); return block_truncate_page(mapping, from, get_block); } @@ -2772,7 +2772,7 @@ has_buffers: if (!PageUptodate(page)) { err = mapping->a_ops->readpage(NULL, page); if (err) { - page_cache_release(page); + put_page(page); goto out; } lock_page(page); @@ -2789,7 +2789,7 @@ has_buffers: unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out: return err; } @@ -2798,8 +2798,8 @@ EXPORT_SYMBOL(nobh_truncate_page); int block_truncate_page(struct address_space *mapping, loff_t from, get_block_t *get_block) { - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize; sector_t iblock; unsigned length, pos; @@ -2816,7 +2816,7 @@ int block_truncate_page(struct address_space *mapping, return 0; length = blocksize - length; - iblock = (sector_t)index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + iblock = (sector_t)index << (PAGE_SHIFT - inode->i_blkbits); page = grab_cache_page(mapping, index); err = -ENOMEM; @@ -2865,7 +2865,7 @@ int block_truncate_page(struct address_space *mapping, unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out: return err; } @@ -2879,7 +2879,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, { struct inode * const inode = page->mapping->host; loff_t i_size = i_size_read(inode); - const pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + const pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; /* Is the page fully inside i_size? */ @@ -2888,14 +2888,14 @@ int block_write_full_page(struct page *page, get_block_t *get_block, end_buffer_async_write); /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (page->index >= end_index+1 || !offset) { /* * The page may have dirty, unmapped buffers. For example, * they may have been added in ext3_writepage(). Make them * freeable here, so the page does not leak. */ - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); return 0; /* don't care */ } @@ -2907,7 +2907,7 @@ int block_write_full_page(struct page *page, get_block_t *get_block, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); return __block_write_full_page(inode, page, get_block, wbc, end_buffer_async_write); } diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index c0f3da3926a0..afbdc418966d 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -194,10 +194,10 @@ static void cachefiles_read_copier(struct fscache_operation *_op) error = -EIO; } - page_cache_release(monitor->back_page); + put_page(monitor->back_page); fscache_end_io(op, monitor->netfs_page, error); - page_cache_release(monitor->netfs_page); + put_page(monitor->netfs_page); fscache_retrieval_complete(op, 1); fscache_put_retrieval(op); kfree(monitor); @@ -288,8 +288,8 @@ monitor_backing_page: _debug("- monitor add"); /* install the monitor */ - page_cache_get(monitor->netfs_page); - page_cache_get(backpage); + get_page(monitor->netfs_page); + get_page(backpage); monitor->back_page = backpage; monitor->monitor.private = backpage; add_page_wait_queue(backpage, &monitor->monitor); @@ -310,7 +310,7 @@ backing_page_already_present: _debug("- present"); if (newpage) { - page_cache_release(newpage); + put_page(newpage); newpage = NULL; } @@ -342,7 +342,7 @@ success: out: if (backpage) - page_cache_release(backpage); + put_page(backpage); if (monitor) { fscache_put_retrieval(monitor->op); kfree(monitor); @@ -363,7 +363,7 @@ io_error: goto out; nomem_page: - page_cache_release(newpage); + put_page(newpage); nomem_monitor: fscache_put_retrieval(monitor->op); kfree(monitor); @@ -530,7 +530,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, netpage->index, cachefiles_gfp); if (ret < 0) { if (ret == -EEXIST) { - page_cache_release(netpage); + put_page(netpage); fscache_retrieval_complete(op, 1); continue; } @@ -538,10 +538,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, } /* install a monitor */ - page_cache_get(netpage); + get_page(netpage); monitor->netfs_page = netpage; - page_cache_get(backpage); + get_page(backpage); monitor->back_page = backpage; monitor->monitor.private = backpage; add_page_wait_queue(backpage, &monitor->monitor); @@ -555,10 +555,10 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, unlock_page(backpage); } - page_cache_release(backpage); + put_page(backpage); backpage = NULL; - page_cache_release(netpage); + put_page(netpage); netpage = NULL; continue; @@ -603,7 +603,7 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, netpage->index, cachefiles_gfp); if (ret < 0) { if (ret == -EEXIST) { - page_cache_release(netpage); + put_page(netpage); fscache_retrieval_complete(op, 1); continue; } @@ -612,14 +612,14 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, copy_highpage(netpage, backpage); - page_cache_release(backpage); + put_page(backpage); backpage = NULL; fscache_mark_page_cached(op, netpage); /* the netpage is unlocked and marked up to date here */ fscache_end_io(op, netpage, 0); - page_cache_release(netpage); + put_page(netpage); netpage = NULL; fscache_retrieval_complete(op, 1); continue; @@ -632,11 +632,11 @@ static int cachefiles_read_backing_file(struct cachefiles_object *object, out: /* tidy up */ if (newpage) - page_cache_release(newpage); + put_page(newpage); if (netpage) - page_cache_release(netpage); + put_page(netpage); if (backpage) - page_cache_release(backpage); + put_page(backpage); if (monitor) { fscache_put_retrieval(op); kfree(monitor); @@ -644,7 +644,7 @@ out: list_for_each_entry_safe(netpage, _n, list, lru) { list_del(&netpage->lru); - page_cache_release(netpage); + put_page(netpage); fscache_retrieval_complete(op, 1); } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index fc5cae2a0db2..4801571f51cb 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -143,7 +143,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset, inode = page->mapping->host; ci = ceph_inode(inode); - if (offset != 0 || length != PAGE_CACHE_SIZE) { + if (offset != 0 || length != PAGE_SIZE) { dout("%p invalidatepage %p idx %lu partial dirty page %u~%u\n", inode, page, page->index, offset, length); return; @@ -197,10 +197,10 @@ static int readpage_nounlock(struct file *filp, struct page *page) &ceph_inode_to_client(inode)->client->osdc; int err = 0; u64 off = page_offset(page); - u64 len = PAGE_CACHE_SIZE; + u64 len = PAGE_SIZE; if (off >= i_size_read(inode)) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -212,7 +212,7 @@ static int readpage_nounlock(struct file *filp, struct page *page) */ if (off == 0) return -EINVAL; - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -234,9 +234,9 @@ static int readpage_nounlock(struct file *filp, struct page *page) ceph_fscache_readpage_cancel(inode, page); goto out; } - if (err < PAGE_CACHE_SIZE) + if (err < PAGE_SIZE) /* zero fill remainder of page */ - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, err, PAGE_SIZE); else flush_dcache_page(page); @@ -278,10 +278,10 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) if (rc < 0 && rc != -ENOENT) goto unlock; - if (bytes < (int)PAGE_CACHE_SIZE) { + if (bytes < (int)PAGE_SIZE) { /* zero (remainder of) page */ int s = bytes < 0 ? 0 : bytes; - zero_user_segment(page, s, PAGE_CACHE_SIZE); + zero_user_segment(page, s, PAGE_SIZE); } dout("finish_read %p uptodate %p idx %lu\n", inode, page, page->index); @@ -290,8 +290,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) ceph_readpage_to_fscache(inode, page); unlock: unlock_page(page); - page_cache_release(page); - bytes -= PAGE_CACHE_SIZE; + put_page(page); + bytes -= PAGE_SIZE; } kfree(osd_data->pages); } @@ -336,7 +336,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (max && nr_pages == max) break; } - len = nr_pages << PAGE_CACHE_SHIFT; + len = nr_pages << PAGE_SHIFT; dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, off, len); vino = ceph_vino(inode); @@ -364,7 +364,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (add_to_page_cache_lru(page, &inode->i_data, page->index, GFP_KERNEL)) { ceph_fscache_uncache_page(inode, page); - page_cache_release(page); + put_page(page); dout("start_read %p add_to_page_cache failed %p\n", inode, page); nr_pages = i; @@ -415,8 +415,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping, if (rc == 0) goto out; - if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE) - max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) + if (fsc->mount_options->rsize >= PAGE_SIZE) + max = (fsc->mount_options->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT; dout("readpages %p file %p nr_pages %d max %d\n", inode, @@ -484,7 +484,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) long writeback_stat; u64 truncate_size; u32 truncate_seq; - int err = 0, len = PAGE_CACHE_SIZE; + int err = 0, len = PAGE_SIZE; dout("writepage %p idx %lu\n", page, page->index); @@ -725,9 +725,9 @@ static int ceph_writepages_start(struct address_space *mapping, } if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize) wsize = fsc->mount_options->wsize; - if (wsize < PAGE_CACHE_SIZE) - wsize = PAGE_CACHE_SIZE; - max_pages_ever = wsize >> PAGE_CACHE_SHIFT; + if (wsize < PAGE_SIZE) + wsize = PAGE_SIZE; + max_pages_ever = wsize >> PAGE_SHIFT; pagevec_init(&pvec, 0); @@ -737,8 +737,8 @@ static int ceph_writepages_start(struct address_space *mapping, end = -1; dout(" cyclic, start at %lu\n", start); } else { - start = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; should_loop = 0; @@ -887,7 +887,7 @@ get_more_pages: num_ops = 1 + do_sync; strip_unit_end = page->index + - ((len - 1) >> PAGE_CACHE_SHIFT); + ((len - 1) >> PAGE_SHIFT); BUG_ON(pages); max_pages = calc_pages_for(0, (u64)len); @@ -901,7 +901,7 @@ get_more_pages: len = 0; } else if (page->index != - (offset + len) >> PAGE_CACHE_SHIFT) { + (offset + len) >> PAGE_SHIFT) { if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : CEPH_OSD_MAX_OPS)) { redirty_page_for_writepage(wbc, page); @@ -929,7 +929,7 @@ get_more_pages: pages[locked_pages] = page; locked_pages++; - len += PAGE_CACHE_SIZE; + len += PAGE_SIZE; } /* did we get anything? */ @@ -981,7 +981,7 @@ new_request: BUG_ON(IS_ERR(req)); } BUG_ON(len < page_offset(pages[locked_pages - 1]) + - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); req->r_callback = writepages_finish; req->r_inode = inode; @@ -1011,7 +1011,7 @@ new_request: } set_page_writeback(pages[i]); - len += PAGE_CACHE_SIZE; + len += PAGE_SIZE; } if (snap_size != -1) { @@ -1020,7 +1020,7 @@ new_request: /* writepages_finish() clears writeback pages * according to the data length, so make sure * data length covers all locked pages */ - u64 min_len = len + 1 - PAGE_CACHE_SIZE; + u64 min_len = len + 1 - PAGE_SIZE; len = min(len, (u64)i_size_read(inode) - offset); len = max(len, min_len); } @@ -1135,8 +1135,8 @@ static int ceph_update_writeable_page(struct file *file, { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); - loff_t page_off = pos & PAGE_CACHE_MASK; - int pos_in_page = pos & ~PAGE_CACHE_MASK; + loff_t page_off = pos & PAGE_MASK; + int pos_in_page = pos & ~PAGE_MASK; int end_in_page = pos_in_page + len; loff_t i_size; int r; @@ -1191,7 +1191,7 @@ retry_locked: } /* full page? */ - if (pos_in_page == 0 && len == PAGE_CACHE_SIZE) + if (pos_in_page == 0 && len == PAGE_SIZE) return 0; /* past end of file? */ @@ -1199,12 +1199,12 @@ retry_locked: if (page_off >= i_size || (pos_in_page == 0 && (pos+len) >= i_size && - end_in_page - pos_in_page != PAGE_CACHE_SIZE)) { + end_in_page - pos_in_page != PAGE_SIZE)) { dout(" zeroing %p 0 - %d and %d - %d\n", - page, pos_in_page, end_in_page, (int)PAGE_CACHE_SIZE); + page, pos_in_page, end_in_page, (int)PAGE_SIZE); zero_user_segments(page, 0, pos_in_page, - end_in_page, PAGE_CACHE_SIZE); + end_in_page, PAGE_SIZE); return 0; } @@ -1228,7 +1228,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = file_inode(file); struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int r; do { @@ -1242,7 +1242,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, r = ceph_update_writeable_page(file, pos, len, page); if (r < 0) - page_cache_release(page); + put_page(page); else *pagep = page; } while (r == -EAGAIN); @@ -1259,7 +1259,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int check_cap = 0; dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, @@ -1279,7 +1279,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (check_cap) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); @@ -1322,11 +1322,11 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_file_info *fi = vma->vm_file->private_data; struct page *pinned_page = NULL; - loff_t off = vmf->pgoff << PAGE_CACHE_SHIFT; + loff_t off = vmf->pgoff << PAGE_SHIFT; int want, got, ret; dout("filemap_fault %p %llx.%llx %llu~%zd trying to get caps\n", - inode, ceph_vinop(inode), off, (size_t)PAGE_CACHE_SIZE); + inode, ceph_vinop(inode), off, (size_t)PAGE_SIZE); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else @@ -1343,7 +1343,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } } dout("filemap_fault %p %llu~%zd got cap refs on %s\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got)); + inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) || ci->i_inline_version == CEPH_INLINE_NONE) @@ -1352,16 +1352,16 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ret = -EAGAIN; dout("filemap_fault %p %llu~%zd dropping cap refs on %s ret %d\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ceph_cap_string(got), ret); + inode, off, (size_t)PAGE_SIZE, ceph_cap_string(got), ret); if (pinned_page) - page_cache_release(pinned_page); + put_page(pinned_page); ceph_put_cap_refs(ci, got); if (ret != -EAGAIN) return ret; /* read inline data */ - if (off >= PAGE_CACHE_SIZE) { + if (off >= PAGE_SIZE) { /* does not support inline data > PAGE_SIZE */ ret = VM_FAULT_SIGBUS; } else { @@ -1378,12 +1378,12 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) CEPH_STAT_CAP_INLINE_DATA, true); if (ret1 < 0 || off >= i_size_read(inode)) { unlock_page(page); - page_cache_release(page); + put_page(page); ret = VM_FAULT_SIGBUS; goto out; } - if (ret1 < PAGE_CACHE_SIZE) - zero_user_segment(page, ret1, PAGE_CACHE_SIZE); + if (ret1 < PAGE_SIZE) + zero_user_segment(page, ret1, PAGE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); @@ -1392,7 +1392,7 @@ static int ceph_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) } out: dout("filemap_fault %p %llu~%zd read inline data ret %d\n", - inode, off, (size_t)PAGE_CACHE_SIZE, ret); + inode, off, (size_t)PAGE_SIZE, ret); return ret; } @@ -1430,10 +1430,10 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) } } - if (off + PAGE_CACHE_SIZE <= size) - len = PAGE_CACHE_SIZE; + if (off + PAGE_SIZE <= size) + len = PAGE_SIZE; else - len = size & ~PAGE_CACHE_MASK; + len = size & ~PAGE_MASK; dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n", inode, ceph_vinop(inode), off, len, size); @@ -1519,7 +1519,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, return; if (PageUptodate(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); return; } } @@ -1534,14 +1534,14 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, } if (page != locked_page) { - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); else flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); } } @@ -1578,7 +1578,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) from_pagecache = true; lock_page(page); } else { - page_cache_release(page); + put_page(page); page = NULL; } } @@ -1586,8 +1586,8 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page) if (page) { len = i_size_read(inode); - if (len > PAGE_CACHE_SIZE) - len = PAGE_CACHE_SIZE; + if (len > PAGE_SIZE) + len = PAGE_SIZE; } else { page = __page_cache_alloc(GFP_NOFS); if (!page) { @@ -1670,7 +1670,7 @@ out: if (page && page != locked_page) { if (from_pagecache) { unlock_page(page); - page_cache_release(page); + put_page(page); } else __free_pages(page, 0); } diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index de17bb232ff8..cfaeef18cbca 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2510,7 +2510,7 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, *pinned_page = page; break; } - page_cache_release(page); + put_page(page); } /* * drop cap refs first because getattr while diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index fadc243dfb28..4fb2bbc2a272 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -129,7 +129,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, struct inode *dir = d_inode(parent); struct dentry *dentry, *last = NULL; struct ceph_dentry_info *di; - unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); + unsigned nsize = PAGE_SIZE / sizeof(struct dentry *); int err = 0; loff_t ptr_pos = 0; struct ceph_readdir_cache_control cache_ctl = {}; @@ -154,7 +154,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, } err = -EAGAIN; - pgoff = ptr_pos >> PAGE_CACHE_SHIFT; + pgoff = ptr_pos >> PAGE_SHIFT; if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { ceph_readdir_cache_release(&cache_ctl); cache_ctl.page = find_lock_page(&dir->i_data, pgoff); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ef38f01c1795..a79f9269831e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -466,7 +466,7 @@ more: ret += zlen; } - didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; + didpages = (page_align + ret) >> PAGE_SHIFT; pos += ret; read = pos - off; left -= ret; @@ -806,8 +806,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, if (write) { ret = invalidate_inode_pages2_range(inode->i_mapping, - pos >> PAGE_CACHE_SHIFT, - (pos + count) >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + (pos + count) >> PAGE_SHIFT); if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); @@ -872,7 +872,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter, * may block. */ truncate_inode_pages_range(inode->i_mapping, pos, - (pos+len) | (PAGE_CACHE_SIZE - 1)); + (pos+len) | (PAGE_SIZE - 1)); osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0); } @@ -1006,8 +1006,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, return ret; ret = invalidate_inode_pages2_range(inode->i_mapping, - pos >> PAGE_CACHE_SHIFT, - (pos + count) >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + (pos + count) >> PAGE_SHIFT); if (ret < 0) dout("invalidate_inode_pages2_range returned %d\n", ret); @@ -1036,7 +1036,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos, * write from beginning of first page, * regardless of io alignment */ - num_pages = (len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) { @@ -1159,7 +1159,7 @@ again: dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n", inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret); if (pinned_page) { - page_cache_release(pinned_page); + put_page(pinned_page); pinned_page = NULL; } ceph_put_cap_refs(ci, got); @@ -1188,10 +1188,10 @@ again: if (retry_op == READ_INLINE) { BUG_ON(ret > 0 || read > 0); if (iocb->ki_pos < i_size && - iocb->ki_pos < PAGE_CACHE_SIZE) { + iocb->ki_pos < PAGE_SIZE) { loff_t end = min_t(loff_t, i_size, iocb->ki_pos + len); - end = min_t(loff_t, end, PAGE_CACHE_SIZE); + end = min_t(loff_t, end, PAGE_SIZE); if (statret < end) zero_user_segment(page, statret, end); ret = copy_page_to_iter(page, @@ -1463,21 +1463,21 @@ static inline void ceph_zero_partial_page( struct inode *inode, loff_t offset, unsigned size) { struct page *page; - pgoff_t index = offset >> PAGE_CACHE_SHIFT; + pgoff_t index = offset >> PAGE_SHIFT; page = find_lock_page(inode->i_mapping, index); if (page) { wait_on_page_writeback(page); - zero_user(page, offset & (PAGE_CACHE_SIZE - 1), size); + zero_user(page, offset & (PAGE_SIZE - 1), size); unlock_page(page); - page_cache_release(page); + put_page(page); } } static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, loff_t length) { - loff_t nearly = round_up(offset, PAGE_CACHE_SIZE); + loff_t nearly = round_up(offset, PAGE_SIZE); if (offset < nearly) { loff_t size = nearly - offset; if (length < size) @@ -1486,8 +1486,8 @@ static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset, offset += size; length -= size; } - if (length >= PAGE_CACHE_SIZE) { - loff_t size = round_down(length, PAGE_CACHE_SIZE); + if (length >= PAGE_SIZE) { + loff_t size = round_down(length, PAGE_SIZE); truncate_pagecache_range(inode, offset, offset + size - 1); offset += size; length -= size; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index ed58b168904a..edfade037738 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1338,7 +1338,7 @@ void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) { if (ctl->page) { kunmap(ctl->page); - page_cache_release(ctl->page); + put_page(ctl->page); ctl->page = NULL; } } @@ -1348,7 +1348,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn, struct ceph_mds_request *req) { struct ceph_inode_info *ci = ceph_inode(dir); - unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry*); + unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); unsigned idx = ctl->index % nsize; pgoff_t pgoff = ctl->index / nsize; @@ -1367,7 +1367,7 @@ static int fill_readdir_cache(struct inode *dir, struct dentry *dn, unlock_page(ctl->page); ctl->dentries = kmap(ctl->page); if (idx == 0) - memset(ctl->dentries, 0, PAGE_CACHE_SIZE); + memset(ctl->dentries, 0, PAGE_SIZE); } if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 44852c3ae531..541ead4d8965 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1610,7 +1610,7 @@ again: while (!list_empty(&tmp_list)) { if (!msg) { msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, - PAGE_CACHE_SIZE, GFP_NOFS, false); + PAGE_SIZE, GFP_NOFS, false); if (!msg) goto out_err; head = msg->front.iov_base; diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 37712ccffcc6..ee69a537dba5 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -97,7 +97,7 @@ struct ceph_mds_reply_info_parsed { /* * cap releases are batched and sent to the MDS en masse. */ -#define CEPH_CAPS_PER_RELEASE ((PAGE_CACHE_SIZE - \ +#define CEPH_CAPS_PER_RELEASE ((PAGE_SIZE - \ sizeof(struct ceph_mds_cap_release)) / \ sizeof(struct ceph_mds_cap_item)) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c973043deb0e..f12d5e2955c2 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -560,7 +560,7 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, /* set up mempools */ err = -ENOMEM; - page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; + page_count = fsc->mount_options->wsize >> PAGE_SHIFT; size = sizeof (struct page *) * (page_count ? page_count : 1); fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); if (!fsc->wb_pagevec_pool) @@ -912,13 +912,13 @@ static int ceph_register_bdi(struct super_block *sb, int err; /* set ra_pages based on rasize mount option? */ - if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE) + if (fsc->mount_options->rasize >= PAGE_SIZE) fsc->backing_dev_info.ra_pages = - (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1) + (fsc->mount_options->rasize + PAGE_SIZE - 1) >> PAGE_SHIFT; else fsc->backing_dev_info.ra_pages = - VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE; + VM_MAX_READAHEAD * 1024 / PAGE_SIZE; err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld", atomic_long_inc_return(&bdi_seq)); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 1d86fc620e5c..89201564c346 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -962,7 +962,7 @@ static int cifs_clone_file_range(struct file *src_file, loff_t off, cifs_dbg(FYI, "about to flush pages\n"); /* should we flush first and last page first */ truncate_inode_pages_range(&target_inode->i_data, destoff, - PAGE_CACHE_ALIGN(destoff + len)-1); + PAGE_ALIGN(destoff + len)-1); if (target_tcon->ses->server->ops->duplicate_extents) rc = target_tcon->ses->server->ops->duplicate_extents(xid, diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index d21da9f05bae..f2cc0b3d1af7 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -714,7 +714,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) * * Note that this might make for "interesting" allocation problems during * writeback however as we have to allocate an array of pointers for the - * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. + * pages. A 16M write means ~32kb page array with PAGE_SIZE == 4096. * * For reads, there is a similar problem as we need to allocate an array * of kvecs to handle the receive, though that should only need to be done @@ -733,7 +733,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) /* * The default wsize is 1M. find_get_pages seems to return a maximum of 256 - * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill + * pages in a single call. With PAGE_SIZE == 4k, this means we can fill * a single wsize request with a single call. */ #define CIFS_DEFAULT_IOSIZE (1024 * 1024) diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 76fcb50295a3..a894bf809ff7 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1929,17 +1929,17 @@ cifs_writev_requeue(struct cifs_writedata *wdata) wsize = server->ops->wp_retry_size(inode); if (wsize < rest_len) { - nr_pages = wsize / PAGE_CACHE_SIZE; + nr_pages = wsize / PAGE_SIZE; if (!nr_pages) { rc = -ENOTSUPP; break; } - cur_len = nr_pages * PAGE_CACHE_SIZE; - tailsz = PAGE_CACHE_SIZE; + cur_len = nr_pages * PAGE_SIZE; + tailsz = PAGE_SIZE; } else { - nr_pages = DIV_ROUND_UP(rest_len, PAGE_CACHE_SIZE); + nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); cur_len = rest_len; - tailsz = rest_len - (nr_pages - 1) * PAGE_CACHE_SIZE; + tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; } wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); @@ -1957,7 +1957,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) wdata2->sync_mode = wdata->sync_mode; wdata2->nr_pages = nr_pages; wdata2->offset = page_offset(wdata2->pages[0]); - wdata2->pagesz = PAGE_CACHE_SIZE; + wdata2->pagesz = PAGE_SIZE; wdata2->tailsz = tailsz; wdata2->bytes = cur_len; @@ -1975,7 +1975,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata) if (rc != 0 && rc != -EAGAIN) { SetPageError(wdata2->pages[j]); end_page_writeback(wdata2->pages[j]); - page_cache_release(wdata2->pages[j]); + put_page(wdata2->pages[j]); } } @@ -2018,7 +2018,7 @@ cifs_writev_complete(struct work_struct *work) else if (wdata->result < 0) SetPageError(page); end_page_writeback(page); - page_cache_release(page); + put_page(page); } if (wdata->result != -EAGAIN) mapping_set_error(inode->i_mapping, wdata->result); diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a763cd3d9e7c..6f62ac821a84 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -3630,7 +3630,7 @@ try_mount_again: cifs_sb->rsize = server->ops->negotiate_rsize(tcon, volume_info); /* tune readahead according to rsize */ - cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; + cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_SIZE; remote_path_check: #ifdef CONFIG_CIFS_DFS_UPCALL diff --git a/fs/cifs/file.c b/fs/cifs/file.c index ff882aeaccc6..c03d0744648b 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1833,7 +1833,7 @@ refind_writable: static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; - loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + loff_t offset = (loff_t)page->index << PAGE_SHIFT; char *write_data; int rc = -EFAULT; int bytes_written = 0; @@ -1849,7 +1849,7 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) write_data = kmap(page); write_data += from; - if ((to > PAGE_CACHE_SIZE) || (from > to)) { + if ((to > PAGE_SIZE) || (from > to)) { kunmap(page); return -EIO; } @@ -1902,7 +1902,7 @@ wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping, * find_get_pages_tag seems to return a max of 256 on each * iteration, so we must call it several times in order to * fill the array or the wsize is effectively limited to - * 256 * PAGE_CACHE_SIZE. + * 256 * PAGE_SIZE. */ *found_pages = 0; pages = wdata->pages; @@ -1991,7 +1991,7 @@ wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages, /* put any pages we aren't going to use */ for (i = nr_pages; i < found_pages; i++) { - page_cache_release(wdata->pages[i]); + put_page(wdata->pages[i]); wdata->pages[i] = NULL; } @@ -2009,11 +2009,11 @@ wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages, wdata->sync_mode = wbc->sync_mode; wdata->nr_pages = nr_pages; wdata->offset = page_offset(wdata->pages[0]); - wdata->pagesz = PAGE_CACHE_SIZE; + wdata->pagesz = PAGE_SIZE; wdata->tailsz = min(i_size_read(mapping->host) - page_offset(wdata->pages[nr_pages - 1]), - (loff_t)PAGE_CACHE_SIZE); - wdata->bytes = ((nr_pages - 1) * PAGE_CACHE_SIZE) + wdata->tailsz; + (loff_t)PAGE_SIZE); + wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz; if (wdata->cfile != NULL) cifsFileInfo_put(wdata->cfile); @@ -2047,15 +2047,15 @@ static int cifs_writepages(struct address_space *mapping, * If wsize is smaller than the page cache size, default to writing * one page at a time via cifs_writepage */ - if (cifs_sb->wsize < PAGE_CACHE_SIZE) + if (cifs_sb->wsize < PAGE_SIZE) return generic_writepages(mapping, wbc); if (wbc->range_cyclic) { index = mapping->writeback_index; /* Start from prev offset */ end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = true; scanned = true; @@ -2071,7 +2071,7 @@ retry: if (rc) break; - tofind = min((wsize / PAGE_CACHE_SIZE) - 1, end - index) + 1; + tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1; wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index, &found_pages); @@ -2111,7 +2111,7 @@ retry: else SetPageError(wdata->pages[i]); end_page_writeback(wdata->pages[i]); - page_cache_release(wdata->pages[i]); + put_page(wdata->pages[i]); } if (rc != -EAGAIN) mapping_set_error(mapping, rc); @@ -2154,7 +2154,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc) xid = get_xid(); /* BB add check for wbc flags */ - page_cache_get(page); + get_page(page); if (!PageUptodate(page)) cifs_dbg(FYI, "ppw - page not up to date\n"); @@ -2170,7 +2170,7 @@ cifs_writepage_locked(struct page *page, struct writeback_control *wbc) */ set_page_writeback(page); retry_write: - rc = cifs_partialpagewrite(page, 0, PAGE_CACHE_SIZE); + rc = cifs_partialpagewrite(page, 0, PAGE_SIZE); if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL) goto retry_write; else if (rc == -EAGAIN) @@ -2180,7 +2180,7 @@ retry_write: else SetPageUptodate(page); end_page_writeback(page); - page_cache_release(page); + put_page(page); free_xid(xid); return rc; } @@ -2214,12 +2214,12 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, if (copied == len) SetPageUptodate(page); ClearPageChecked(page); - } else if (!PageUptodate(page) && copied == PAGE_CACHE_SIZE) + } else if (!PageUptodate(page) && copied == PAGE_SIZE) SetPageUptodate(page); if (!PageUptodate(page)) { char *page_data; - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned offset = pos & (PAGE_SIZE - 1); unsigned int xid; xid = get_xid(); @@ -2248,7 +2248,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, } unlock_page(page); - page_cache_release(page); + put_page(page); return rc; } @@ -3286,9 +3286,9 @@ cifs_readv_complete(struct work_struct *work) (rdata->result == -EAGAIN && got_bytes)) cifs_readpage_to_fscache(rdata->mapping->host, page); - got_bytes -= min_t(unsigned int, PAGE_CACHE_SIZE, got_bytes); + got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes); - page_cache_release(page); + put_page(page); rdata->pages[i] = NULL; } kref_put(&rdata->refcount, cifs_readdata_release); @@ -3307,21 +3307,21 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, /* determine the eof that the server (probably) has */ eof = CIFS_I(rdata->mapping->host)->server_eof; - eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; + eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0; cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index); rdata->got_bytes = 0; - rdata->tailsz = PAGE_CACHE_SIZE; + rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; - if (len >= PAGE_CACHE_SIZE) { + if (len >= PAGE_SIZE) { /* enough data to fill the page */ iov.iov_base = kmap(page); - iov.iov_len = PAGE_CACHE_SIZE; + iov.iov_len = PAGE_SIZE; cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", i, page->index, iov.iov_base, iov.iov_len); - len -= PAGE_CACHE_SIZE; + len -= PAGE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ iov.iov_base = kmap(page); @@ -3329,7 +3329,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", i, page->index, iov.iov_base, iov.iov_len); memset(iov.iov_base + len, - '\0', PAGE_CACHE_SIZE - len); + '\0', PAGE_SIZE - len); rdata->tailsz = len; len = 0; } else if (page->index > eof_index) { @@ -3341,12 +3341,12 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, * to prevent the VFS from repeatedly attempting to * fill them until the writes are flushed. */ - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); lru_cache_add_file(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); rdata->pages[i] = NULL; rdata->nr_pages--; continue; @@ -3354,7 +3354,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, /* no need to hold page hostage */ lru_cache_add_file(page); unlock_page(page); - page_cache_release(page); + put_page(page); rdata->pages[i] = NULL; rdata->nr_pages--; continue; @@ -3402,8 +3402,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, } /* move first page to the tmplist */ - *offset = (loff_t)page->index << PAGE_CACHE_SHIFT; - *bytes = PAGE_CACHE_SIZE; + *offset = (loff_t)page->index << PAGE_SHIFT; + *bytes = PAGE_SIZE; *nr_pages = 1; list_move_tail(&page->lru, tmplist); @@ -3415,7 +3415,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; /* would this page push the read over the rsize? */ - if (*bytes + PAGE_CACHE_SIZE > rsize) + if (*bytes + PAGE_SIZE > rsize) break; __SetPageLocked(page); @@ -3424,7 +3424,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list, break; } list_move_tail(&page->lru, tmplist); - (*bytes) += PAGE_CACHE_SIZE; + (*bytes) += PAGE_SIZE; expected_index++; (*nr_pages)++; } @@ -3493,7 +3493,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * reach this point however since we set ra_pages to 0 when the * rsize is smaller than a cache page. */ - if (unlikely(rsize < PAGE_CACHE_SIZE)) { + if (unlikely(rsize < PAGE_SIZE)) { add_credits_and_wake_if(server, credits, 0); return 0; } @@ -3512,7 +3512,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, list_del(&page->lru); lru_cache_add_file(page); unlock_page(page); - page_cache_release(page); + put_page(page); } rc = -ENOMEM; add_credits_and_wake_if(server, credits, 0); @@ -3524,7 +3524,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, rdata->offset = offset; rdata->bytes = bytes; rdata->pid = pid; - rdata->pagesz = PAGE_CACHE_SIZE; + rdata->pagesz = PAGE_SIZE; rdata->read_into_pages = cifs_readpages_read_into_pages; rdata->credits = credits; @@ -3542,7 +3542,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, page = rdata->pages[i]; lru_cache_add_file(page); unlock_page(page); - page_cache_release(page); + put_page(page); } /* Fallback to the readpage in error/reconnect cases */ kref_put(&rdata->refcount, cifs_readdata_release); @@ -3577,7 +3577,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page, read_data = kmap(page); /* for reads over a certain size could initiate async read ahead */ - rc = cifs_read(file, read_data, PAGE_CACHE_SIZE, poffset); + rc = cifs_read(file, read_data, PAGE_SIZE, poffset); if (rc < 0) goto io_error; @@ -3587,8 +3587,8 @@ static int cifs_readpage_worker(struct file *file, struct page *page, file_inode(file)->i_atime = current_fs_time(file_inode(file)->i_sb); - if (PAGE_CACHE_SIZE > rc) - memset(read_data + rc, 0, PAGE_CACHE_SIZE - rc); + if (PAGE_SIZE > rc) + memset(read_data + rc, 0, PAGE_SIZE - rc); flush_dcache_page(page); SetPageUptodate(page); @@ -3608,7 +3608,7 @@ read_complete: static int cifs_readpage(struct file *file, struct page *page) { - loff_t offset = (loff_t)page->index << PAGE_CACHE_SHIFT; + loff_t offset = (loff_t)page->index << PAGE_SHIFT; int rc = -EACCES; unsigned int xid; @@ -3679,8 +3679,8 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { int oncethru = 0; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - loff_t offset = pos & (PAGE_CACHE_SIZE - 1); + pgoff_t index = pos >> PAGE_SHIFT; + loff_t offset = pos & (PAGE_SIZE - 1); loff_t page_start = pos & PAGE_MASK; loff_t i_size; struct page *page; @@ -3703,7 +3703,7 @@ start: * the server. If the write is short, we'll end up doing a sync write * instead. */ - if (len == PAGE_CACHE_SIZE) + if (len == PAGE_SIZE) goto out; /* @@ -3718,7 +3718,7 @@ start: (offset == 0 && (pos + len) >= i_size)) { zero_user_segments(page, 0, offset, offset + len, - PAGE_CACHE_SIZE); + PAGE_SIZE); /* * PageChecked means that the parts of the page * to which we're not writing are considered up @@ -3737,7 +3737,7 @@ start: * do a sync write instead since PG_uptodate isn't set. */ cifs_readpage_worker(file, page, &page_start); - page_cache_release(page); + put_page(page); oncethru = 1; goto start; } else { @@ -3764,7 +3764,7 @@ static void cifs_invalidate_page(struct page *page, unsigned int offset, { struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host); - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0 && length == PAGE_SIZE) cifs_fscache_invalidate_page(page, &cifsi->vfs_inode); } @@ -3772,7 +3772,7 @@ static int cifs_launder_page(struct page *page) { int rc = 0; loff_t range_start = page_offset(page); - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index aeb26dbfa1bf..5f9ad5c42180 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -59,7 +59,7 @@ static void cifs_set_ops(struct inode *inode) /* check if server can support readpages */ if (cifs_sb_master_tcon(cifs_sb)->ses->server->maxBuf < - PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE) + PAGE_SIZE + MAX_CIFS_HDR_SIZE) inode->i_data.a_ops = &cifs_addr_ops_smallbuf; else inode->i_data.a_ops = &cifs_addr_ops; @@ -2019,8 +2019,8 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, static int cifs_truncate_page(struct address_space *mapping, loff_t from) { - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE - 1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE - 1); struct page *page; int rc = 0; @@ -2028,9 +2028,9 @@ static int cifs_truncate_page(struct address_space *mapping, loff_t from) if (!page) return -ENOMEM; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); unlock_page(page); - page_cache_release(page); + put_page(page); return rc; } diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c index a8f3b589a2df..cfd91320e869 100644 --- a/fs/configfs/mount.c +++ b/fs/configfs/mount.c @@ -71,8 +71,8 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent) struct inode *inode; struct dentry *root; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = CONFIGFS_MAGIC; sb->s_op = &configfs_ops; sb->s_time_gran = 1; diff --git a/fs/cramfs/README b/fs/cramfs/README index 445d1c2d7646..9d4e7ea311f4 100644 --- a/fs/cramfs/README +++ b/fs/cramfs/README @@ -86,26 +86,26 @@ Block Size (Block size in cramfs refers to the size of input data that is compressed at a time. It's intended to be somewhere around -PAGE_CACHE_SIZE for cramfs_readpage's convenience.) +PAGE_SIZE for cramfs_readpage's convenience.) The superblock ought to indicate the block size that the fs was written for, since comments in <linux/pagemap.h> indicate that -PAGE_CACHE_SIZE may grow in future (if I interpret the comment +PAGE_SIZE may grow in future (if I interpret the comment correctly). -Currently, mkcramfs #define's PAGE_CACHE_SIZE as 4096 and uses that -for blksize, whereas Linux-2.3.39 uses its PAGE_CACHE_SIZE, which in +Currently, mkcramfs #define's PAGE_SIZE as 4096 and uses that +for blksize, whereas Linux-2.3.39 uses its PAGE_SIZE, which in turn is defined as PAGE_SIZE (which can be as large as 32KB on arm). This discrepancy is a bug, though it's not clear which should be changed. -One option is to change mkcramfs to take its PAGE_CACHE_SIZE from +One option is to change mkcramfs to take its PAGE_SIZE from <asm/page.h>. Personally I don't like this option, but it does require the least amount of change: just change `#define -PAGE_CACHE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage +PAGE_SIZE (4096)' to `#include <asm/page.h>'. The disadvantage is that the generated cramfs cannot always be shared between different kernels, not even necessarily kernels of the same architecture if -PAGE_CACHE_SIZE is subject to change between kernel versions +PAGE_SIZE is subject to change between kernel versions (currently possible with arm and ia64). The remaining options try to make cramfs more sharable. @@ -126,22 +126,22 @@ size. The options are: 1. Always 4096 bytes. 2. Writer chooses blocksize; kernel adapts but rejects blocksize > - PAGE_CACHE_SIZE. + PAGE_SIZE. 3. Writer chooses blocksize; kernel adapts even to blocksize > - PAGE_CACHE_SIZE. + PAGE_SIZE. It's easy enough to change the kernel to use a smaller value than -PAGE_CACHE_SIZE: just make cramfs_readpage read multiple blocks. +PAGE_SIZE: just make cramfs_readpage read multiple blocks. -The cost of option 1 is that kernels with a larger PAGE_CACHE_SIZE +The cost of option 1 is that kernels with a larger PAGE_SIZE value don't get as good compression as they can. The cost of option 2 relative to option 1 is that the code uses variables instead of #define'd constants. The gain is that people -with kernels having larger PAGE_CACHE_SIZE can make use of that if +with kernels having larger PAGE_SIZE can make use of that if they don't mind their cramfs being inaccessible to kernels with -smaller PAGE_CACHE_SIZE values. +smaller PAGE_SIZE values. Option 3 is easy to implement if we don't mind being CPU-inefficient: e.g. get readpage to decompress to a buffer of size MAX_BLKSIZE (which diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index b862bc219cd7..3a32ddf98095 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -137,7 +137,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, * page cache and dentry tree anyway.. * * This also acts as a way to guarantee contiguous areas of up to - * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to + * BLKS_PER_BUF*PAGE_SIZE, so that the caller doesn't need to * worry about end-of-buffer issues even when decompressing a full * page cache. */ @@ -152,7 +152,7 @@ static struct inode *get_cramfs_inode(struct super_block *sb, */ #define BLKS_PER_BUF_SHIFT (2) #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT) -#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE) +#define BUFFER_SIZE (BLKS_PER_BUF*PAGE_SIZE) static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE]; static unsigned buffer_blocknr[READ_BUFFERS]; @@ -173,8 +173,8 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i if (!len) return NULL; - blocknr = offset >> PAGE_CACHE_SHIFT; - offset &= PAGE_CACHE_SIZE - 1; + blocknr = offset >> PAGE_SHIFT; + offset &= PAGE_SIZE - 1; /* Check if an existing buffer already has the data.. */ for (i = 0; i < READ_BUFFERS; i++) { @@ -184,14 +184,14 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i continue; if (blocknr < buffer_blocknr[i]) continue; - blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT; + blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_SHIFT; blk_offset += offset; if (blk_offset + len > BUFFER_SIZE) continue; return read_buffers[i] + blk_offset; } - devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT; + devsize = mapping->host->i_size >> PAGE_SHIFT; /* Ok, read in BLKS_PER_BUF pages completely first. */ for (i = 0; i < BLKS_PER_BUF; i++) { @@ -213,7 +213,7 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i wait_on_page_locked(page); if (!PageUptodate(page)) { /* asynchronous error */ - page_cache_release(page); + put_page(page); pages[i] = NULL; } } @@ -229,12 +229,12 @@ static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned i struct page *page = pages[i]; if (page) { - memcpy(data, kmap(page), PAGE_CACHE_SIZE); + memcpy(data, kmap(page), PAGE_SIZE); kunmap(page); - page_cache_release(page); + put_page(page); } else - memset(data, 0, PAGE_CACHE_SIZE); - data += PAGE_CACHE_SIZE; + memset(data, 0, PAGE_SIZE); + data += PAGE_SIZE; } return read_buffers[buffer] + offset; } @@ -353,7 +353,7 @@ static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf) u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = CRAMFS_MAGIC; - buf->f_bsize = PAGE_CACHE_SIZE; + buf->f_bsize = PAGE_SIZE; buf->f_blocks = CRAMFS_SB(sb)->blocks; buf->f_bfree = 0; buf->f_bavail = 0; @@ -496,7 +496,7 @@ static int cramfs_readpage(struct file *file, struct page *page) int bytes_filled; void *pgdata; - maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + maxblock = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; bytes_filled = 0; pgdata = kmap(page); @@ -516,14 +516,14 @@ static int cramfs_readpage(struct file *file, struct page *page) if (compr_len == 0) ; /* hole */ - else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) { + else if (unlikely(compr_len > (PAGE_SIZE << 1))) { pr_err("bad compressed blocksize %u\n", compr_len); goto err; } else { mutex_lock(&read_mutex); bytes_filled = cramfs_uncompress_block(pgdata, - PAGE_CACHE_SIZE, + PAGE_SIZE, cramfs_read(sb, start_offset, compr_len), compr_len); mutex_unlock(&read_mutex); @@ -532,7 +532,7 @@ static int cramfs_readpage(struct file *file, struct page *page) } } - memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled); + memset(pgdata + bytes_filled, 0, PAGE_SIZE - bytes_filled); flush_dcache_page(page); kunmap(page); SetPageUptodate(page); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 06cd1a22240b..7f5804537d30 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -175,10 +175,10 @@ static int do_page_crypto(struct inode *inode, FS_XTS_TWEAK_SIZE - sizeof(index)); sg_init_table(&dst, 1); - sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); + sg_set_page(&dst, dest_page, PAGE_SIZE, 0); sg_init_table(&src, 1); - sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); - skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, + sg_set_page(&src, src_page, PAGE_SIZE, 0); + skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, xts_tweak); if (rw == FS_DECRYPT) res = crypto_skcipher_decrypt(req); @@ -287,7 +287,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk, struct bio *bio; int ret, err = 0; - BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); ctx = fscrypt_get_ctx(inode); if (IS_ERR(ctx)) @@ -323,7 +323,7 @@ static int dax_load_hole(struct address_space *mapping, struct page *page, size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; if (vmf->pgoff >= size) { unlock_page(page); - page_cache_release(page); + put_page(page); return VM_FAULT_SIGBUS; } @@ -351,7 +351,7 @@ static int copy_user_bh(struct page *to, struct inode *inode, } #define NO_SECTOR -1 -#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_CACHE_SHIFT)) +#define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT)) static int dax_radix_entry(struct address_space *mapping, pgoff_t index, sector_t sector, bool pmd_entry, bool dirty) @@ -506,8 +506,8 @@ int dax_writeback_mapping_range(struct address_space *mapping, if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL) return 0; - start_index = wbc->range_start >> PAGE_CACHE_SHIFT; - end_index = wbc->range_end >> PAGE_CACHE_SHIFT; + start_index = wbc->range_start >> PAGE_SHIFT; + end_index = wbc->range_end >> PAGE_SHIFT; pmd_index = DAX_PMD_INDEX(start_index); rcu_read_lock(); @@ -642,12 +642,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, page = find_get_page(mapping, vmf->pgoff); if (page) { if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { - page_cache_release(page); + put_page(page); return VM_FAULT_RETRY; } if (unlikely(page->mapping != mapping)) { unlock_page(page); - page_cache_release(page); + put_page(page); goto repeat; } size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -711,10 +711,10 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, if (page) { unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT, - PAGE_CACHE_SIZE, 0); + PAGE_SIZE, 0); delete_from_page_cache(page); unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; } @@ -747,7 +747,7 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, unlock_page: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } goto out; } @@ -1094,7 +1094,7 @@ EXPORT_SYMBOL_GPL(dax_pfn_mkwrite); * you are truncating a file, the helper function dax_truncate_page() may be * more convenient. * - * We work in terms of PAGE_CACHE_SIZE here for commonality with + * We work in terms of PAGE_SIZE here for commonality with * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem * block size is smaller than PAGE_SIZE, we have to zero the rest of the page @@ -1104,18 +1104,18 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, get_block_t get_block) { struct buffer_head bh; - pgoff_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + pgoff_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); int err; /* Block boundary? Nothing to do */ if (!length) return 0; - BUG_ON((offset + length) > PAGE_CACHE_SIZE); + BUG_ON((offset + length) > PAGE_SIZE); memset(&bh, 0, sizeof(bh)); bh.b_bdev = inode->i_sb->s_bdev; - bh.b_size = PAGE_CACHE_SIZE; + bh.b_size = PAGE_SIZE; err = get_block(inode, index, &bh, 0); if (err < 0) return err; @@ -1123,7 +1123,7 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length, struct block_device *bdev = bh.b_bdev; struct blk_dax_ctl dax = { .sector = to_sector(&bh, inode), - .size = PAGE_CACHE_SIZE, + .size = PAGE_SIZE, }; if (dax_map_atomic(bdev, &dax) < 0) @@ -1146,7 +1146,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range); * Similar to block_truncate_page(), this function can be called by a * filesystem when it is truncating a DAX file to handle the partial page. * - * We work in terms of PAGE_CACHE_SIZE here for commonality with + * We work in terms of PAGE_SIZE here for commonality with * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem * took care of disposing of the unnecessary blocks. Even if the filesystem * block size is smaller than PAGE_SIZE, we have to zero the rest of the page @@ -1154,7 +1154,7 @@ EXPORT_SYMBOL_GPL(dax_zero_page_range); */ int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block) { - unsigned length = PAGE_CACHE_ALIGN(from) - from; + unsigned length = PAGE_ALIGN(from) - from; return dax_zero_page_range(inode, from, length, get_block); } EXPORT_SYMBOL_GPL(dax_truncate_page); diff --git a/fs/dcache.c b/fs/dcache.c index 32ceae3e6112..d5ecc6e477da 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1667,7 +1667,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE | DCACHE_OP_DELETE | - DCACHE_OP_SELECT_INODE)); + DCACHE_OP_SELECT_INODE | + DCACHE_OP_REAL)); dentry->d_op = op; if (!op) return; @@ -1685,6 +1686,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) dentry->d_flags |= DCACHE_OP_PRUNE; if (op->d_select_inode) dentry->d_flags |= DCACHE_OP_SELECT_INODE; + if (op->d_real) + dentry->d_flags |= DCACHE_OP_REAL; } EXPORT_SYMBOL(d_set_d_op); diff --git a/fs/direct-io.c b/fs/direct-io.c index 476f1ecbd1f0..472037732daf 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -172,7 +172,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio) */ if (dio->page_errors == 0) dio->page_errors = ret; - page_cache_get(page); + get_page(page); dio->pages[0] = page; sdio->head = 0; sdio->tail = 1; @@ -424,7 +424,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio) static inline void dio_cleanup(struct dio *dio, struct dio_submit *sdio) { while (sdio->head < sdio->tail) - page_cache_release(dio->pages[sdio->head++]); + put_page(dio->pages[sdio->head++]); } /* @@ -487,7 +487,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) if (dio->rw == READ && !PageCompound(page) && dio->should_dirty) set_page_dirty_lock(page); - page_cache_release(page); + put_page(page); } err = bio->bi_error; bio_put(bio); @@ -696,7 +696,7 @@ static inline int dio_bio_add_page(struct dio_submit *sdio) */ if ((sdio->cur_page_len + sdio->cur_page_offset) == PAGE_SIZE) sdio->pages_in_io--; - page_cache_get(sdio->cur_page); + get_page(sdio->cur_page); sdio->final_block_in_bio = sdio->cur_page_block + (sdio->cur_page_len >> sdio->blkbits); ret = 0; @@ -810,13 +810,13 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page, */ if (sdio->cur_page) { ret = dio_send_cur_page(dio, sdio, map_bh); - page_cache_release(sdio->cur_page); + put_page(sdio->cur_page); sdio->cur_page = NULL; if (ret) return ret; } - page_cache_get(page); /* It is in dio */ + get_page(page); /* It is in dio */ sdio->cur_page = page; sdio->cur_page_offset = offset; sdio->cur_page_len = len; @@ -830,7 +830,7 @@ out: if (sdio->boundary) { ret = dio_send_cur_page(dio, sdio, map_bh); dio_bio_submit(dio, sdio); - page_cache_release(sdio->cur_page); + put_page(sdio->cur_page); sdio->cur_page = NULL; } return ret; @@ -947,7 +947,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio, ret = get_more_blocks(dio, sdio, map_bh); if (ret) { - page_cache_release(page); + put_page(page); goto out; } if (!buffer_mapped(map_bh)) @@ -988,7 +988,7 @@ do_holes: /* AKPM: eargh, -ENOTBLK is a hack */ if (dio->rw & WRITE) { - page_cache_release(page); + put_page(page); return -ENOTBLK; } @@ -1001,7 +1001,7 @@ do_holes: if (sdio->block_in_file >= i_size_aligned >> blkbits) { /* We hit eof */ - page_cache_release(page); + put_page(page); goto out; } zero_user(page, from, 1 << blkbits); @@ -1041,7 +1041,7 @@ do_holes: sdio->next_block_for_io, map_bh); if (ret) { - page_cache_release(page); + put_page(page); goto out; } sdio->next_block_for_io += this_chunk_blocks; @@ -1057,7 +1057,7 @@ next_block: } /* Drop the ref which was taken in get_user_pages() */ - page_cache_release(page); + put_page(page); } out: return ret; @@ -1281,7 +1281,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, ret2 = dio_send_cur_page(dio, &sdio, &map_bh); if (retval == 0) retval = ret2; - page_cache_release(sdio.cur_page); + put_page(sdio.cur_page); sdio.cur_page = NULL; } if (sdio.bio) diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 00640e70ed7a..1ab012a27d9f 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -640,7 +640,7 @@ static int receive_from_sock(struct connection *con) con->rx_page = alloc_page(GFP_ATOMIC); if (con->rx_page == NULL) goto out_resched; - cbuf_init(&con->cb, PAGE_CACHE_SIZE); + cbuf_init(&con->cb, PAGE_SIZE); } /* @@ -657,7 +657,7 @@ static int receive_from_sock(struct connection *con) * buffer and the start of the currently used section (cb.base) */ if (cbuf_data(&con->cb) >= con->cb.base) { - iov[0].iov_len = PAGE_CACHE_SIZE - cbuf_data(&con->cb); + iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb); iov[1].iov_len = con->cb.base; iov[1].iov_base = page_address(con->rx_page); nvec = 2; @@ -675,7 +675,7 @@ static int receive_from_sock(struct connection *con) ret = dlm_process_incoming_buffer(con->nodeid, page_address(con->rx_page), con->cb.base, con->cb.len, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (ret == -EBADMSG) { log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", page_address(con->rx_page), con->cb.base, @@ -1416,7 +1416,7 @@ void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc) spin_lock(&con->writequeue_lock); e = list_entry(con->writequeue.prev, struct writequeue_entry, list); if ((&e->list == &con->writequeue) || - (PAGE_CACHE_SIZE - e->end < len)) { + (PAGE_SIZE - e->end < len)) { e = NULL; } else { offset = e->end; diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index 64026e53722a..d09cb4cdd09f 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -286,7 +286,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, pg = virt_to_page(addr); offset = offset_in_page(addr); sg_set_page(&sg[i], pg, 0, offset); - remainder_of_page = PAGE_CACHE_SIZE - offset; + remainder_of_page = PAGE_SIZE - offset; if (size >= remainder_of_page) { sg[i].length = remainder_of_page; addr += remainder_of_page; @@ -400,7 +400,7 @@ static loff_t lower_offset_for_page(struct ecryptfs_crypt_stat *crypt_stat, struct page *page) { return ecryptfs_lower_header_size(crypt_stat) + - ((loff_t)page->index << PAGE_CACHE_SHIFT); + ((loff_t)page->index << PAGE_SHIFT); } /** @@ -428,7 +428,7 @@ static int crypt_extent(struct ecryptfs_crypt_stat *crypt_stat, size_t extent_size = crypt_stat->extent_size; int rc; - extent_base = (((loff_t)page_index) * (PAGE_CACHE_SIZE / extent_size)); + extent_base = (((loff_t)page_index) * (PAGE_SIZE / extent_size)); rc = ecryptfs_derive_iv(extent_iv, crypt_stat, (extent_base + extent_offset)); if (rc) { @@ -498,7 +498,7 @@ int ecryptfs_encrypt_page(struct page *page) } for (extent_offset = 0; - extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); + extent_offset < (PAGE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, enc_extent_page, page, extent_offset, ENCRYPT); @@ -512,7 +512,7 @@ int ecryptfs_encrypt_page(struct page *page) lower_offset = lower_offset_for_page(crypt_stat, page); enc_extent_virt = kmap(enc_extent_page); rc = ecryptfs_write_lower(ecryptfs_inode, enc_extent_virt, lower_offset, - PAGE_CACHE_SIZE); + PAGE_SIZE); kunmap(enc_extent_page); if (rc < 0) { ecryptfs_printk(KERN_ERR, @@ -560,7 +560,7 @@ int ecryptfs_decrypt_page(struct page *page) lower_offset = lower_offset_for_page(crypt_stat, page); page_virt = kmap(page); - rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_CACHE_SIZE, + rc = ecryptfs_read_lower(page_virt, lower_offset, PAGE_SIZE, ecryptfs_inode); kunmap(page); if (rc < 0) { @@ -571,7 +571,7 @@ int ecryptfs_decrypt_page(struct page *page) } for (extent_offset = 0; - extent_offset < (PAGE_CACHE_SIZE / crypt_stat->extent_size); + extent_offset < (PAGE_SIZE / crypt_stat->extent_size); extent_offset++) { rc = crypt_extent(crypt_stat, page, page, extent_offset, DECRYPT); @@ -659,11 +659,11 @@ void ecryptfs_set_default_sizes(struct ecryptfs_crypt_stat *crypt_stat) if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else { - if (PAGE_CACHE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) + if (PAGE_SIZE <= ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE) crypt_stat->metadata_size = ECRYPTFS_MINIMUM_HEADER_EXTENT_SIZE; else - crypt_stat->metadata_size = PAGE_CACHE_SIZE; + crypt_stat->metadata_size = PAGE_SIZE; } } @@ -1442,7 +1442,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) ECRYPTFS_VALIDATE_HEADER_SIZE); if (rc) { /* metadata is not in the file header, so try xattrs */ - memset(page_virt, 0, PAGE_CACHE_SIZE); + memset(page_virt, 0, PAGE_SIZE); rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode); if (rc) { printk(KERN_DEBUG "Valid eCryptfs headers not found in " @@ -1475,7 +1475,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) } out: if (page_virt) { - memset(page_virt, 0, PAGE_CACHE_SIZE); + memset(page_virt, 0, PAGE_SIZE); kmem_cache_free(ecryptfs_header_cache, page_virt); } return rc; diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 121114e9a464..224b49e71aa4 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -763,10 +763,10 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia, } else { /* ia->ia_size < i_size_read(inode) */ /* We're chopping off all the pages down to the page * in which ia->ia_size is located. Fill in the end of - * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to - * PAGE_CACHE_SIZE with zeros. */ - size_t num_zeros = (PAGE_CACHE_SIZE - - (ia->ia_size & ~PAGE_CACHE_MASK)); + * that page from (ia->ia_size & ~PAGE_MASK) to + * PAGE_SIZE with zeros. */ + size_t num_zeros = (PAGE_SIZE + - (ia->ia_size & ~PAGE_MASK)); if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { truncate_setsize(inode, ia->ia_size); diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 9893d1538122..3cf1546dca82 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -1798,7 +1798,7 @@ int ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat, * added the our &auth_tok_list */ next_packet_is_auth_tok_packet = 1; while (next_packet_is_auth_tok_packet) { - size_t max_packet_size = ((PAGE_CACHE_SIZE - 8) - i); + size_t max_packet_size = ((PAGE_SIZE - 8) - i); switch (src[i]) { case ECRYPTFS_TAG_3_PACKET_TYPE: diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index 8b0b4a73116d..1698132d0e57 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c @@ -695,12 +695,12 @@ static struct ecryptfs_cache_info { { .cache = &ecryptfs_header_cache, .name = "ecryptfs_headers", - .size = PAGE_CACHE_SIZE, + .size = PAGE_SIZE, }, { .cache = &ecryptfs_xattr_cache, .name = "ecryptfs_xattr_cache", - .size = PAGE_CACHE_SIZE, + .size = PAGE_SIZE, }, { .cache = &ecryptfs_key_record_cache, @@ -818,7 +818,7 @@ static int __init ecryptfs_init(void) { int rc; - if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_CACHE_SIZE) { + if (ECRYPTFS_DEFAULT_EXTENT_SIZE > PAGE_SIZE) { rc = -EINVAL; ecryptfs_printk(KERN_ERR, "The eCryptfs extent size is " "larger than the host's page size, and so " @@ -826,7 +826,7 @@ static int __init ecryptfs_init(void) "default eCryptfs extent size is [%u] bytes; " "the page size is [%lu] bytes.\n", ECRYPTFS_DEFAULT_EXTENT_SIZE, - (unsigned long)PAGE_CACHE_SIZE); + (unsigned long)PAGE_SIZE); goto out; } rc = ecryptfs_init_kmem_caches(); diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 1f5865263b3e..e6b1d80952b9 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -122,7 +122,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, struct ecryptfs_crypt_stat *crypt_stat) { loff_t extent_num_in_page = 0; - loff_t num_extents_per_page = (PAGE_CACHE_SIZE + loff_t num_extents_per_page = (PAGE_SIZE / crypt_stat->extent_size); int rc = 0; @@ -138,7 +138,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, char *page_virt; page_virt = kmap_atomic(page); - memset(page_virt, 0, PAGE_CACHE_SIZE); + memset(page_virt, 0, PAGE_SIZE); /* TODO: Support more than one header extent */ if (view_extent_num == 0) { size_t written; @@ -164,8 +164,8 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, - crypt_stat->metadata_size); rc = ecryptfs_read_lower_page_segment( - page, (lower_offset >> PAGE_CACHE_SHIFT), - (lower_offset & ~PAGE_CACHE_MASK), + page, (lower_offset >> PAGE_SHIFT), + (lower_offset & ~PAGE_MASK), crypt_stat->extent_size, page->mapping->host); if (rc) { printk(KERN_ERR "%s: Error attempting to read " @@ -198,7 +198,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page) if (!crypt_stat || !(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { rc = ecryptfs_read_lower_page_segment(page, page->index, 0, - PAGE_CACHE_SIZE, + PAGE_SIZE, page->mapping->host); } else if (crypt_stat->flags & ECRYPTFS_VIEW_AS_ENCRYPTED) { if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) { @@ -215,7 +215,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page) } else { rc = ecryptfs_read_lower_page_segment( - page, page->index, 0, PAGE_CACHE_SIZE, + page, page->index, 0, PAGE_SIZE, page->mapping->host); if (rc) { printk(KERN_ERR "Error reading page; rc = " @@ -250,12 +250,12 @@ static int fill_zeros_to_end_of_page(struct page *page, unsigned int to) struct inode *inode = page->mapping->host; int end_byte_in_page; - if ((i_size_read(inode) / PAGE_CACHE_SIZE) != page->index) + if ((i_size_read(inode) / PAGE_SIZE) != page->index) goto out; - end_byte_in_page = i_size_read(inode) % PAGE_CACHE_SIZE; + end_byte_in_page = i_size_read(inode) % PAGE_SIZE; if (to > end_byte_in_page) end_byte_in_page = to; - zero_user_segment(page, end_byte_in_page, PAGE_CACHE_SIZE); + zero_user_segment(page, end_byte_in_page, PAGE_SIZE); out: return 0; } @@ -279,7 +279,7 @@ static int ecryptfs_write_begin(struct file *file, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; loff_t prev_page_end_size; int rc = 0; @@ -289,14 +289,14 @@ static int ecryptfs_write_begin(struct file *file, return -ENOMEM; *pagep = page; - prev_page_end_size = ((loff_t)index << PAGE_CACHE_SHIFT); + prev_page_end_size = ((loff_t)index << PAGE_SHIFT); if (!PageUptodate(page)) { struct ecryptfs_crypt_stat *crypt_stat = &ecryptfs_inode_to_private(mapping->host)->crypt_stat; if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) { rc = ecryptfs_read_lower_page_segment( - page, index, 0, PAGE_CACHE_SIZE, mapping->host); + page, index, 0, PAGE_SIZE, mapping->host); if (rc) { printk(KERN_ERR "%s: Error attempting to read " "lower page segment; rc = [%d]\n", @@ -322,7 +322,7 @@ static int ecryptfs_write_begin(struct file *file, SetPageUptodate(page); } else { rc = ecryptfs_read_lower_page_segment( - page, index, 0, PAGE_CACHE_SIZE, + page, index, 0, PAGE_SIZE, mapping->host); if (rc) { printk(KERN_ERR "%s: Error reading " @@ -336,9 +336,9 @@ static int ecryptfs_write_begin(struct file *file, } else { if (prev_page_end_size >= i_size_read(page->mapping->host)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); - } else if (len < PAGE_CACHE_SIZE) { + } else if (len < PAGE_SIZE) { rc = ecryptfs_decrypt_page(page); if (rc) { printk(KERN_ERR "%s: Error decrypting " @@ -371,11 +371,11 @@ static int ecryptfs_write_begin(struct file *file, * of page? Zero it out. */ if ((i_size_read(mapping->host) == prev_page_end_size) && (pos != 0)) - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); out: if (unlikely(rc)) { unlock_page(page); - page_cache_release(page); + put_page(page); *pagep = NULL; } return rc; @@ -437,7 +437,7 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode) } inode_lock(lower_inode); size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME, - xattr_virt, PAGE_CACHE_SIZE); + xattr_virt, PAGE_SIZE); if (size < 0) size = 8; put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt); @@ -479,8 +479,8 @@ static int ecryptfs_write_end(struct file *file, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + pgoff_t index = pos >> PAGE_SHIFT; + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + copied; struct inode *ecryptfs_inode = mapping->host; struct ecryptfs_crypt_stat *crypt_stat = @@ -500,7 +500,7 @@ static int ecryptfs_write_end(struct file *file, goto out; } if (!PageUptodate(page)) { - if (copied < PAGE_CACHE_SIZE) { + if (copied < PAGE_SIZE) { rc = 0; goto out; } @@ -533,7 +533,7 @@ static int ecryptfs_write_end(struct file *file, rc = copied; out: unlock_page(page); - page_cache_release(page); + put_page(page); return rc; } diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 09fe622274e4..158a3a39f82d 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c @@ -74,7 +74,7 @@ int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode, loff_t offset; int rc; - offset = ((((loff_t)page_for_lower->index) << PAGE_CACHE_SHIFT) + offset = ((((loff_t)page_for_lower->index) << PAGE_SHIFT) + offset_in_page); virt = kmap(page_for_lower); rc = ecryptfs_write_lower(ecryptfs_inode, virt, offset, size); @@ -123,9 +123,9 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, else pos = offset; while (pos < (offset + size)) { - pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT); - size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK); - size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page); + pgoff_t ecryptfs_page_idx = (pos >> PAGE_SHIFT); + size_t start_offset_in_page = (pos & ~PAGE_MASK); + size_t num_bytes = (PAGE_SIZE - start_offset_in_page); loff_t total_remaining_bytes = ((offset + size) - pos); if (fatal_signal_pending(current)) { @@ -165,7 +165,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, * Fill in zero values to the end of the page */ memset(((char *)ecryptfs_page_virt + start_offset_in_page), 0, - PAGE_CACHE_SIZE - start_offset_in_page); + PAGE_SIZE - start_offset_in_page); } /* pos >= offset, we are now writing the data request */ @@ -186,7 +186,7 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset, ecryptfs_page, start_offset_in_page, data_offset); - page_cache_release(ecryptfs_page); + put_page(ecryptfs_page); if (rc) { printk(KERN_ERR "%s: Error encrypting " "page; rc = [%d]\n", __func__, rc); @@ -262,7 +262,7 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, loff_t offset; int rc; - offset = ((((loff_t)page_index) << PAGE_CACHE_SHIFT) + offset_in_page); + offset = ((((loff_t)page_index) << PAGE_SHIFT) + offset_in_page); virt = kmap(page_for_ecryptfs); rc = ecryptfs_read_lower(virt, offset, size, ecryptfs_inode); if (rc > 0) diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index dd029d13ea61..553c5d2db4a4 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -197,8 +197,8 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) efivarfs_sb = sb; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = EFIVARFS_MAGIC; sb->s_op = &efivarfs_ops; sb->s_d_op = &efivarfs_d_ops; diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index e5bb2abf77f9..547b93cbea63 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -41,16 +41,16 @@ static inline unsigned exofs_chunk_size(struct inode *inode) static inline void exofs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) { loff_t last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -85,13 +85,13 @@ static void exofs_check_page(struct page *page) unsigned chunk_size = exofs_chunk_size(dir); char *kaddr = page_address(page); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; struct exofs_dir_entry *p; char *error; /* if the page is the last one in the directory */ - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -138,7 +138,7 @@ bad_entry: EXOFS_ERR( "ERROR [exofs_check_page]: bad entry in directory(0x%lx): %s - " "offset=%lu, inode=0x%llu, rec_len=%d, name_len=%d\n", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no)), rec_len, p->name_len); goto fail; @@ -147,7 +147,7 @@ Eend: EXOFS_ERR("ERROR [exofs_check_page]: " "entry in directory(0x%lx) spans the page boundary" "offset=%lu, inode=0x%llx\n", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, (page->index<<PAGE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no))); fail: SetPageChecked(page); @@ -237,8 +237,8 @@ exofs_readdir(struct file *file, struct dir_context *ctx) { loff_t pos = ctx->pos; struct inode *inode = file_inode(file); - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(exofs_chunk_size(inode)-1); int need_revalidate = (file->f_version != inode->i_version); @@ -254,7 +254,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { EXOFS_ERR("ERROR: bad page in directory(0x%lx)\n", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); @@ -262,7 +262,7 @@ exofs_readdir(struct file *file, struct dir_context *ctx) if (offset) { offset = exofs_validate_entry(kaddr, offset, chunk_mask); - ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; @@ -449,7 +449,7 @@ int exofs_add_link(struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + exofs_last_byte(dir, n); de = (struct exofs_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { name_len = 0; @@ -602,7 +602,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent) kunmap_atomic(kaddr); err = exofs_commit_chunk(page, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 9eaf595aeaf8..49e1bd00b4ec 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -317,7 +317,7 @@ static int read_exec(struct page_collect *pcol) if (!pcol->ios) { int ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, true, - pcol->pg_first << PAGE_CACHE_SHIFT, + pcol->pg_first << PAGE_SHIFT, pcol->length, &pcol->ios); if (ret) @@ -383,7 +383,7 @@ static int readpage_strip(void *data, struct page *page) struct inode *inode = pcol->inode; struct exofs_i_info *oi = exofs_i(inode); loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; size_t len; int ret; @@ -397,9 +397,9 @@ static int readpage_strip(void *data, struct page *page) pcol->that_locked_page = page; if (page->index < end_index) - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; else if (page->index == end_index) - len = i_size & ~PAGE_CACHE_MASK; + len = i_size & ~PAGE_MASK; else len = 0; @@ -442,8 +442,8 @@ try_again: goto fail; } - if (len != PAGE_CACHE_SIZE) - zero_user(page, len, PAGE_CACHE_SIZE - len); + if (len != PAGE_SIZE) + zero_user(page, len, PAGE_SIZE - len); EXOFS_DBGMSG2(" readpage_strip(0x%lx, 0x%lx) len=0x%zx\n", inode->i_ino, page->index, len); @@ -609,7 +609,7 @@ static void __r4w_put_page(void *priv, struct page *page) if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { EXOFS_DBGMSG2("index=0x%lx\n", page->index); - page_cache_release(page); + put_page(page); return; } EXOFS_DBGMSG2("that_locked_page index=0x%lx\n", @@ -633,7 +633,7 @@ static int write_exec(struct page_collect *pcol) BUG_ON(pcol->ios); ret = ore_get_rw_state(&pcol->sbi->layout, &oi->oc, false, - pcol->pg_first << PAGE_CACHE_SHIFT, + pcol->pg_first << PAGE_SHIFT, pcol->length, &pcol->ios); if (unlikely(ret)) goto err; @@ -696,7 +696,7 @@ static int writepage_strip(struct page *page, struct inode *inode = pcol->inode; struct exofs_i_info *oi = exofs_i(inode); loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; size_t len; int ret; @@ -708,9 +708,9 @@ static int writepage_strip(struct page *page, if (page->index < end_index) /* in this case, the page is within the limits of the file */ - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; else { - len = i_size & ~PAGE_CACHE_MASK; + len = i_size & ~PAGE_MASK; if (page->index > end_index || !len) { /* in this case, the page is outside the limits @@ -790,10 +790,10 @@ static int exofs_writepages(struct address_space *mapping, long start, end, expected_pages; int ret; - start = wbc->range_start >> PAGE_CACHE_SHIFT; + start = wbc->range_start >> PAGE_SHIFT; end = (wbc->range_end == LLONG_MAX) ? start + mapping->nrpages : - wbc->range_end >> PAGE_CACHE_SHIFT; + wbc->range_end >> PAGE_SHIFT; if (start || end) expected_pages = end - start + 1; @@ -881,15 +881,15 @@ int exofs_write_begin(struct file *file, struct address_space *mapping, } /* read modify write */ - if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { + if (!PageUptodate(page) && (len != PAGE_SIZE)) { loff_t i_size = i_size_read(mapping->host); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; size_t rlen; if (page->index < end_index) - rlen = PAGE_CACHE_SIZE; + rlen = PAGE_SIZE; else if (page->index == end_index) - rlen = i_size & ~PAGE_CACHE_MASK; + rlen = i_size & ~PAGE_MASK; else rlen = 0; diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index c20d77df2679..622a686bb08b 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c @@ -292,11 +292,11 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 0c6638b40f21..7ff6fcfa685d 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -37,7 +37,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) { unsigned len = le16_to_cpu(dlen); -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len == EXT2_MAX_REC_LEN) return 1 << 16; #endif @@ -46,7 +46,7 @@ static inline unsigned ext2_rec_len_from_disk(__le16 dlen) static inline __le16 ext2_rec_len_to_disk(unsigned len) { -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(EXT2_MAX_REC_LEN); else @@ -67,7 +67,7 @@ static inline unsigned ext2_chunk_size(struct inode *inode) static inline void ext2_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* @@ -79,9 +79,9 @@ ext2_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -118,12 +118,12 @@ static void ext2_check_page(struct page *page, int quiet) char *kaddr = page_address(page); u32 max_inumber = le32_to_cpu(EXT2_SB(sb)->s_es->s_inodes_count); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; ext2_dirent *p; char *error; - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -176,7 +176,7 @@ bad_entry: if (!quiet) ext2_error(sb, __func__, "bad entry in directory #%lu: : %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le32_to_cpu(p->inode), rec_len, p->name_len); goto fail; @@ -186,7 +186,7 @@ Eend: ext2_error(sb, "ext2_check_page", "entry in directory #%lu spans the page boundary" "offset=%lu, inode=%lu", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le32_to_cpu(p->inode)); } fail: @@ -287,8 +287,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx) loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(ext2_chunk_size(inode)-1); unsigned char *types = NULL; @@ -309,14 +309,14 @@ ext2_readdir(struct file *file, struct dir_context *ctx) ext2_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return PTR_ERR(page); } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ext2_validate_entry(kaddr, offset, chunk_mask); - ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; @@ -406,7 +406,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir, if (++n >= npages) n = 0; /* next page is past the blocks we've got */ - if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { + if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { ext2_error(dir->i_sb, __func__, "dir %lu size %lld exceeds block count %llu", dir->i_ino, dir->i_size, @@ -511,7 +511,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + ext2_last_byte(dir, n); de = (ext2_dirent *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -655,7 +655,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent) kunmap_atomic(kaddr); err = ext2_commit_chunk(page, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 7a2be8f7f3c3..d34843925b23 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -398,7 +398,7 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); else { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } inode_dec_link_count(old_dir); } @@ -408,11 +408,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c index edc053a81914..db9ae6e18154 100644 --- a/fs/ext4/crypto.c +++ b/fs/ext4/crypto.c @@ -91,7 +91,8 @@ void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) * Return: An allocated and initialized encryption context on success; error * value or NULL otherwise. */ -struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) +struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, + gfp_t gfp_flags) { struct ext4_crypto_ctx *ctx = NULL; int res = 0; @@ -118,7 +119,7 @@ struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode) list_del(&ctx->free_list); spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags); if (!ctx) { - ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS); + ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags); if (!ctx) { res = -ENOMEM; goto out; @@ -255,7 +256,8 @@ static int ext4_page_crypto(struct inode *inode, ext4_direction_t rw, pgoff_t index, struct page *src_page, - struct page *dest_page) + struct page *dest_page, + gfp_t gfp_flags) { u8 xts_tweak[EXT4_XTS_TWEAK_SIZE]; @@ -266,7 +268,7 @@ static int ext4_page_crypto(struct inode *inode, struct crypto_skcipher *tfm = ci->ci_ctfm; int res = 0; - req = skcipher_request_alloc(tfm, GFP_NOFS); + req = skcipher_request_alloc(tfm, gfp_flags); if (!req) { printk_ratelimited(KERN_ERR "%s: crypto_request_alloc() failed\n", @@ -283,10 +285,10 @@ static int ext4_page_crypto(struct inode *inode, EXT4_XTS_TWEAK_SIZE - sizeof(index)); sg_init_table(&dst, 1); - sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0); + sg_set_page(&dst, dest_page, PAGE_SIZE, 0); sg_init_table(&src, 1); - sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0); - skcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE, + sg_set_page(&src, src_page, PAGE_SIZE, 0); + skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, xts_tweak); if (rw == EXT4_DECRYPT) res = crypto_skcipher_decrypt(req); @@ -307,9 +309,10 @@ static int ext4_page_crypto(struct inode *inode, return 0; } -static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) +static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx, + gfp_t gfp_flags) { - ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, GFP_NOWAIT); + ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags); if (ctx->w.bounce_page == NULL) return ERR_PTR(-ENOMEM); ctx->flags |= EXT4_WRITE_PATH_FL; @@ -332,7 +335,8 @@ static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx) * error value or NULL. */ struct page *ext4_encrypt(struct inode *inode, - struct page *plaintext_page) + struct page *plaintext_page, + gfp_t gfp_flags) { struct ext4_crypto_ctx *ctx; struct page *ciphertext_page = NULL; @@ -340,17 +344,17 @@ struct page *ext4_encrypt(struct inode *inode, BUG_ON(!PageLocked(plaintext_page)); - ctx = ext4_get_crypto_ctx(inode); + ctx = ext4_get_crypto_ctx(inode, gfp_flags); if (IS_ERR(ctx)) return (struct page *) ctx; /* The encryption operation will require a bounce page. */ - ciphertext_page = alloc_bounce_page(ctx); + ciphertext_page = alloc_bounce_page(ctx, gfp_flags); if (IS_ERR(ciphertext_page)) goto errout; ctx->w.control_page = plaintext_page; err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index, - plaintext_page, ciphertext_page); + plaintext_page, ciphertext_page, gfp_flags); if (err) { ciphertext_page = ERR_PTR(err); errout: @@ -378,8 +382,8 @@ int ext4_decrypt(struct page *page) { BUG_ON(!PageLocked(page)); - return ext4_page_crypto(page->mapping->host, - EXT4_DECRYPT, page->index, page, page); + return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT, + page->index, page, page, GFP_NOFS); } int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, @@ -396,13 +400,13 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, (unsigned long) inode->i_ino, lblk, len); #endif - BUG_ON(inode->i_sb->s_blocksize != PAGE_CACHE_SIZE); + BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE); - ctx = ext4_get_crypto_ctx(inode); + ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); if (IS_ERR(ctx)) return PTR_ERR(ctx); - ciphertext_page = alloc_bounce_page(ctx); + ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT); if (IS_ERR(ciphertext_page)) { err = PTR_ERR(ciphertext_page); goto errout; @@ -410,11 +414,12 @@ int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, while (len--) { err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk, - ZERO_PAGE(0), ciphertext_page); + ZERO_PAGE(0), ciphertext_page, + GFP_NOFS); if (err) goto errout; - bio = bio_alloc(GFP_KERNEL, 1); + bio = bio_alloc(GFP_NOWAIT, 1); if (!bio) { err = -ENOMEM; goto errout; @@ -473,13 +478,16 @@ uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size) */ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) { - struct inode *dir = d_inode(dentry->d_parent); - struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info; + struct dentry *dir; + struct ext4_crypt_info *ci; int dir_has_key, cached_with_key; - if (!ext4_encrypted_inode(dir)) + dir = dget_parent(dentry); + if (!ext4_encrypted_inode(d_inode(dir))) { + dput(dir); return 0; - + } + ci = EXT4_I(d_inode(dir))->i_crypt_info; if (ci && ci->ci_keyring_key && (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | (1 << KEY_FLAG_REVOKED) | @@ -489,6 +497,7 @@ static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags) /* this should eventually be an flag in d_flags */ cached_with_key = dentry->d_fsdata != NULL; dir_has_key = (ci != NULL); + dput(dir); /* * If the dentry was cached without the key, and it is a diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 50ba27cbed03..561d7308b393 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -155,13 +155,13 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) err = ext4_map_blocks(NULL, inode, &map, 0); if (err > 0) { pgoff_t index = map.m_pblk >> - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); if (!ra_has_index(&file->f_ra, index)) page_cache_sync_readahead( sb->s_bdev->bd_inode->i_mapping, &file->f_ra, file, index, 1); - file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + file->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT; bh = ext4_bread(NULL, inode, map.m_lblk, 0); if (IS_ERR(bh)) { err = PTR_ERR(bh); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index c04743519865..349afebe21ee 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -912,6 +912,29 @@ do { \ #include "extents_status.h" /* + * Lock subclasses for i_data_sem in the ext4_inode_info structure. + * + * These are needed to avoid lockdep false positives when we need to + * allocate blocks to the quota inode during ext4_map_blocks(), while + * holding i_data_sem for a normal (non-quota) inode. Since we don't + * do quota tracking for the quota inode, this avoids deadlock (as + * well as infinite recursion, since it isn't turtles all the way + * down...) + * + * I_DATA_SEM_NORMAL - Used for most inodes + * I_DATA_SEM_OTHER - Used by move_inode.c for the second normal inode + * where the second inode has larger inode number + * than the first + * I_DATA_SEM_QUOTA - Used for quota inodes only + */ +enum { + I_DATA_SEM_NORMAL = 0, + I_DATA_SEM_OTHER, + I_DATA_SEM_QUOTA, +}; + + +/* * fourth extended file system inode data in memory */ struct ext4_inode_info { @@ -1961,7 +1984,7 @@ ext4_rec_len_from_disk(__le16 dlen, unsigned blocksize) { unsigned len = le16_to_cpu(dlen); -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len == EXT4_MAX_REC_LEN || len == 0) return blocksize; return (len & 65532) | ((len & 3) << 16); @@ -1974,7 +1997,7 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize) { if ((len > blocksize) || (blocksize > (1 << 18)) || (len & 3)) BUG(); -#if (PAGE_CACHE_SIZE >= 65536) +#if (PAGE_SIZE >= 65536) if (len < 65536) return cpu_to_le16(len); if (len == blocksize) { @@ -2282,11 +2305,13 @@ extern struct kmem_cache *ext4_crypt_info_cachep; bool ext4_valid_contents_enc_mode(uint32_t mode); uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size); extern struct workqueue_struct *ext4_read_workqueue; -struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode); +struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode, + gfp_t gfp_flags); void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx); void ext4_restore_control_page(struct page *data_page); struct page *ext4_encrypt(struct inode *inode, - struct page *plaintext_page); + struct page *plaintext_page, + gfp_t gfp_flags); int ext4_decrypt(struct page *page); int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk, ext4_lblk_t len); diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 6659e216385e..fa2208bae2e1 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -329,7 +329,7 @@ static int ext4_file_open(struct inode * inode, struct file * filp) struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct vfsmount *mnt = filp->f_path.mnt; - struct inode *dir = filp->f_path.dentry->d_parent->d_inode; + struct dentry *dir; struct path path; char buf[64], *cp; int ret; @@ -373,14 +373,18 @@ static int ext4_file_open(struct inode * inode, struct file * filp) if (ext4_encryption_info(inode) == NULL) return -ENOKEY; } - if (ext4_encrypted_inode(dir) && - !ext4_is_child_context_consistent_with_parent(dir, inode)) { + + dir = dget_parent(file_dentry(filp)); + if (ext4_encrypted_inode(d_inode(dir)) && + !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) { ext4_warning(inode->i_sb, "Inconsistent encryption contexts: %lu/%lu\n", - (unsigned long) dir->i_ino, + (unsigned long) d_inode(dir)->i_ino, (unsigned long) inode->i_ino); + dput(dir); return -EPERM; } + dput(dir); /* * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present @@ -428,8 +432,8 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, lastoff = startoff; endoff = (loff_t)end_blk << blkbits; - index = startoff >> PAGE_CACHE_SHIFT; - end = endoff >> PAGE_CACHE_SHIFT; + index = startoff >> PAGE_SHIFT; + end = endoff >> PAGE_SHIFT; pagevec_init(&pvec, 0); do { diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 7cbdd3752ba5..7bc6c855cc18 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -482,7 +482,7 @@ static int ext4_read_inline_page(struct inode *inode, struct page *page) ret = ext4_read_inline_data(inode, kaddr, len, &iloc); flush_dcache_page(page); kunmap_atomic(kaddr); - zero_user_segment(page, len, PAGE_CACHE_SIZE); + zero_user_segment(page, len, PAGE_SIZE); SetPageUptodate(page); brelse(iloc.bh); @@ -507,7 +507,7 @@ int ext4_readpage_inline(struct inode *inode, struct page *page) if (!page->index) ret = ext4_read_inline_page(inode, page); else if (!PageUptodate(page)) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); } @@ -595,7 +595,7 @@ retry: if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; ext4_orphan_add(handle, inode); up_write(&EXT4_I(inode)->xattr_sem); @@ -621,7 +621,7 @@ retry: out: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } if (sem_held) up_write(&EXT4_I(inode)->xattr_sem); @@ -690,7 +690,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, if (!ext4_has_inline_data(inode)) { ret = 0; unlock_page(page); - page_cache_release(page); + put_page(page); goto out_up_read; } @@ -815,7 +815,7 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping, if (ret) { up_read(&EXT4_I(inode)->xattr_sem); unlock_page(page); - page_cache_release(page); + put_page(page); ext4_truncate_failed_write(inode); return ret; } @@ -829,7 +829,7 @@ out: up_read(&EXT4_I(inode)->xattr_sem); if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } return ret; } @@ -919,7 +919,7 @@ retry_journal: out_release_page: up_read(&EXT4_I(inode)->xattr_sem); unlock_page(page); - page_cache_release(page); + put_page(page); out_journal: ext4_journal_stop(handle); out: @@ -947,7 +947,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, i_size_changed = 1; } unlock_page(page); - page_cache_release(page); + put_page(page); /* * Don't mark the inode dirty under page lock. First, it unnecessarily diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index dab84a2530ff..981a1fc30eaa 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -763,39 +763,47 @@ int ext4_get_block_unwritten(struct inode *inode, sector_t iblock, /* Maximum number of blocks we map for direct IO at once. */ #define DIO_MAX_BLOCKS 4096 -static handle_t *start_dio_trans(struct inode *inode, - struct buffer_head *bh_result) +/* + * Get blocks function for the cases that need to start a transaction - + * generally difference cases of direct IO and DAX IO. It also handles retries + * in case of ENOSPC. + */ +static int ext4_get_block_trans(struct inode *inode, sector_t iblock, + struct buffer_head *bh_result, int flags) { int dio_credits; + handle_t *handle; + int retries = 0; + int ret; /* Trim mapping request to maximum we can map at once for DIO */ if (bh_result->b_size >> inode->i_blkbits > DIO_MAX_BLOCKS) bh_result->b_size = DIO_MAX_BLOCKS << inode->i_blkbits; dio_credits = ext4_chunk_trans_blocks(inode, bh_result->b_size >> inode->i_blkbits); - return ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); +retry: + handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, dio_credits); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + ret = _ext4_get_block(inode, iblock, bh_result, flags); + ext4_journal_stop(handle); + + if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) + goto retry; + return ret; } /* Get block function for DIO reads and writes to inodes without extents */ int ext4_dio_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { - handle_t *handle; - int ret; - /* We don't expect handle for direct IO */ WARN_ON_ONCE(ext4_journal_current_handle()); - if (create) { - handle = start_dio_trans(inode, bh); - if (IS_ERR(handle)) - return PTR_ERR(handle); - } - ret = _ext4_get_block(inode, iblock, bh, - create ? EXT4_GET_BLOCKS_CREATE : 0); - if (create) - ext4_journal_stop(handle); - return ret; + if (!create) + return _ext4_get_block(inode, iblock, bh, 0); + return ext4_get_block_trans(inode, iblock, bh, EXT4_GET_BLOCKS_CREATE); } /* @@ -806,18 +814,13 @@ int ext4_dio_get_block(struct inode *inode, sector_t iblock, static int ext4_dio_get_block_unwritten_async(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - handle_t *handle; int ret; /* We don't expect handle for direct IO */ WARN_ON_ONCE(ext4_journal_current_handle()); - handle = start_dio_trans(inode, bh_result); - if (IS_ERR(handle)) - return PTR_ERR(handle); - ret = _ext4_get_block(inode, iblock, bh_result, - EXT4_GET_BLOCKS_IO_CREATE_EXT); - ext4_journal_stop(handle); + ret = ext4_get_block_trans(inode, iblock, bh_result, + EXT4_GET_BLOCKS_IO_CREATE_EXT); /* * When doing DIO using unwritten extents, we need io_end to convert @@ -850,18 +853,13 @@ static int ext4_dio_get_block_unwritten_async(struct inode *inode, static int ext4_dio_get_block_unwritten_sync(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { - handle_t *handle; int ret; /* We don't expect handle for direct IO */ WARN_ON_ONCE(ext4_journal_current_handle()); - handle = start_dio_trans(inode, bh_result); - if (IS_ERR(handle)) - return PTR_ERR(handle); - ret = _ext4_get_block(inode, iblock, bh_result, - EXT4_GET_BLOCKS_IO_CREATE_EXT); - ext4_journal_stop(handle); + ret = ext4_get_block_trans(inode, iblock, bh_result, + EXT4_GET_BLOCKS_IO_CREATE_EXT); /* * Mark inode as having pending DIO writes to unwritten extents. @@ -1057,7 +1055,7 @@ int do_journal_get_write_access(handle_t *handle, static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, get_block_t *get_block) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); unsigned to = from + len; struct inode *inode = page->mapping->host; unsigned block_start, block_end; @@ -1069,15 +1067,15 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, bool decrypt = false; BUG_ON(!PageLocked(page)); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > PAGE_SIZE); + BUG_ON(to > PAGE_SIZE); BUG_ON(from > to); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); bbits = ilog2(blocksize); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - bbits); + block = (sector_t)page->index << (PAGE_SHIFT - bbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { @@ -1159,8 +1157,8 @@ static int ext4_write_begin(struct file *file, struct address_space *mapping, * we allocate blocks but write fails for some reason */ needed_blocks = ext4_writepage_trans_blocks(inode) + 1; - index = pos >> PAGE_CACHE_SHIFT; - from = pos & (PAGE_CACHE_SIZE - 1); + index = pos >> PAGE_SHIFT; + from = pos & (PAGE_SIZE - 1); to = from + len; if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { @@ -1188,7 +1186,7 @@ retry_grab: retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks); if (IS_ERR(handle)) { - page_cache_release(page); + put_page(page); return PTR_ERR(handle); } @@ -1196,7 +1194,7 @@ retry_journal: if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); - page_cache_release(page); + put_page(page); ext4_journal_stop(handle); goto retry_grab; } @@ -1252,7 +1250,7 @@ retry_journal: if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; - page_cache_release(page); + put_page(page); return ret; } *pagep = page; @@ -1295,7 +1293,7 @@ static int ext4_write_end(struct file *file, ret = ext4_jbd2_file_inode(handle, inode); if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); goto errout; } } @@ -1315,7 +1313,7 @@ static int ext4_write_end(struct file *file, */ i_size_changed = ext4_update_inode_size(inode, pos + copied); unlock_page(page); - page_cache_release(page); + put_page(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); @@ -1399,7 +1397,7 @@ static int ext4_journalled_write_end(struct file *file, int size_changed = 0; trace_ext4_journalled_write_end(inode, pos, len, copied); - from = pos & (PAGE_CACHE_SIZE - 1); + from = pos & (PAGE_SIZE - 1); to = from + len; BUG_ON(!ext4_handle_valid(handle)); @@ -1423,7 +1421,7 @@ static int ext4_journalled_write_end(struct file *file, ext4_set_inode_state(inode, EXT4_STATE_JDATA); EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; unlock_page(page); - page_cache_release(page); + put_page(page); if (old_size < pos) pagecache_isize_extended(inode, old_size, pos); @@ -1537,7 +1535,7 @@ static void ext4_da_page_release_reservation(struct page *page, int num_clusters; ext4_fsblk_t lblk; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + BUG_ON(stop > PAGE_SIZE || stop < length); head = page_buffers(page); bh = head; @@ -1553,7 +1551,7 @@ static void ext4_da_page_release_reservation(struct page *page, clear_buffer_delay(bh); } else if (contiguous_blks) { lblk = page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ext4_es_remove_extent(inode, lblk, contiguous_blks); @@ -1563,7 +1561,7 @@ static void ext4_da_page_release_reservation(struct page *page, } while ((bh = bh->b_this_page) != head); if (contiguous_blks) { - lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + lblk = page->index << (PAGE_SHIFT - inode->i_blkbits); lblk += (curr_off >> inode->i_blkbits) - contiguous_blks; ext4_es_remove_extent(inode, lblk, contiguous_blks); } @@ -1572,7 +1570,7 @@ static void ext4_da_page_release_reservation(struct page *page, * need to release the reserved space for that cluster. */ num_clusters = EXT4_NUM_B2C(sbi, to_release); while (num_clusters > 0) { - lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) + + lblk = (page->index << (PAGE_SHIFT - inode->i_blkbits)) + ((num_clusters - 1) << sbi->s_cluster_bits); if (sbi->s_cluster_ratio == 1 || !ext4_find_delalloc_cluster(inode, lblk)) @@ -1619,8 +1617,8 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, end = mpd->next_page - 1; if (invalidate) { ext4_lblk_t start, last; - start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits); - last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits); + start = index << (PAGE_SHIFT - inode->i_blkbits); + last = end << (PAGE_SHIFT - inode->i_blkbits); ext4_es_remove_extent(inode, start, last - start + 1); } @@ -1636,7 +1634,7 @@ static void mpage_release_unused_pages(struct mpage_da_data *mpd, BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); if (invalidate) { - block_invalidatepage(page, 0, PAGE_CACHE_SIZE); + block_invalidatepage(page, 0, PAGE_SIZE); ClearPageUptodate(page); } unlock_page(page); @@ -2007,10 +2005,10 @@ static int ext4_writepage(struct page *page, trace_ext4_writepage(page); size = i_size_read(inode); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; page_bufs = page_buffers(page); /* @@ -2034,7 +2032,7 @@ static int ext4_writepage(struct page *page, ext4_bh_delay_or_unwritten)) { redirty_page_for_writepage(wbc, page); if ((current->flags & PF_MEMALLOC) || - (inode->i_sb->s_blocksize == PAGE_CACHE_SIZE)) { + (inode->i_sb->s_blocksize == PAGE_SIZE)) { /* * For memory cleaning there's no point in writing only * some buffers. So just bail out. Warn if we came here @@ -2076,10 +2074,10 @@ static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page) int err; BUG_ON(page->index != mpd->first_page); - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; clear_page_dirty_for_io(page); err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc, false); if (!err) @@ -2213,7 +2211,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) int nr_pages, i; struct inode *inode = mpd->inode; struct buffer_head *head, *bh; - int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits; + int bpp_bits = PAGE_SHIFT - inode->i_blkbits; pgoff_t start, end; ext4_lblk_t lblk; sector_t pblock; @@ -2274,7 +2272,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd) * supports blocksize < pagesize as we will try to * convert potentially unmapped parts of inode. */ - mpd->io_submit.io_end->size += PAGE_CACHE_SIZE; + mpd->io_submit.io_end->size += PAGE_SIZE; /* Page fully mapped - let IO run! */ err = mpage_submit_page(mpd, page); if (err < 0) { @@ -2426,7 +2424,7 @@ update_disksize: * Update on-disk size after IO is submitted. Races with * truncate are avoided by checking i_size under i_data_sem. */ - disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT; + disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT; if (disksize > EXT4_I(inode)->i_disksize) { int err2; loff_t i_size; @@ -2562,7 +2560,7 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) mpd->next_page = page->index + 1; /* Add all dirty buffers to mpd */ lblk = ((ext4_lblk_t)page->index) << - (PAGE_CACHE_SHIFT - blkbits); + (PAGE_SHIFT - blkbits); head = page_buffers(page); err = mpage_process_page_bufs(mpd, head, head, lblk); if (err <= 0) @@ -2647,7 +2645,7 @@ static int ext4_writepages(struct address_space *mapping, * We may need to convert up to one extent per block in * the page and we may dirty the inode. */ - rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits); + rsv_blocks = 1 + (PAGE_SIZE >> inode->i_blkbits); } /* @@ -2678,8 +2676,8 @@ static int ext4_writepages(struct address_space *mapping, mpd.first_page = writeback_index; mpd.last_page = -1; } else { - mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT; - mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT; + mpd.first_page = wbc->range_start >> PAGE_SHIFT; + mpd.last_page = wbc->range_end >> PAGE_SHIFT; } mpd.inode = inode; @@ -2838,7 +2836,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; handle_t *handle; - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; if (ext4_nonda_switch(inode->i_sb)) { *fsdata = (void *)FALL_BACK_TO_NONDELALLOC; @@ -2881,7 +2879,7 @@ retry_journal: handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ext4_da_write_credits(inode, pos, len)); if (IS_ERR(handle)) { - page_cache_release(page); + put_page(page); return PTR_ERR(handle); } @@ -2889,7 +2887,7 @@ retry_journal: if (page->mapping != mapping) { /* The page got truncated from under us */ unlock_page(page); - page_cache_release(page); + put_page(page); ext4_journal_stop(handle); goto retry_grab; } @@ -2917,7 +2915,7 @@ retry_journal: ext4_should_retry_alloc(inode->i_sb, &retries)) goto retry_journal; - page_cache_release(page); + put_page(page); return ret; } @@ -2965,7 +2963,7 @@ static int ext4_da_write_end(struct file *file, len, copied, page, fsdata); trace_ext4_da_write_end(inode, pos, len, copied); - start = pos & (PAGE_CACHE_SIZE - 1); + start = pos & (PAGE_SIZE - 1); end = start + copied - 1; /* @@ -3187,7 +3185,7 @@ static int __ext4_journalled_invalidatepage(struct page *page, /* * If it's a full truncate we just forget about the pending dirtying */ - if (offset == 0 && length == PAGE_CACHE_SIZE) + if (offset == 0 && length == PAGE_SIZE) ClearPageChecked(page); return jbd2_journal_invalidatepage(journal, page, offset, length); @@ -3556,8 +3554,8 @@ void ext4_set_aops(struct inode *inode) static int __ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { - ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + ext4_fsblk_t index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, pos; ext4_lblk_t iblock; struct inode *inode = mapping->host; @@ -3565,14 +3563,14 @@ static int __ext4_block_zero_page_range(handle_t *handle, struct page *page; int err = 0; - page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT, + page = find_or_create_page(mapping, from >> PAGE_SHIFT, mapping_gfp_constraint(mapping, ~__GFP_FS)); if (!page) return -ENOMEM; blocksize = inode->i_sb->s_blocksize; - iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); + iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); @@ -3614,7 +3612,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, ext4_encrypted_inode(inode)) { /* We expect the key to be set. */ BUG_ON(!ext4_has_encryption_key(inode)); - BUG_ON(blocksize != PAGE_CACHE_SIZE); + BUG_ON(blocksize != PAGE_SIZE); WARN_ON_ONCE(ext4_decrypt(page)); } } @@ -3638,7 +3636,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, unlock: unlock_page(page); - page_cache_release(page); + put_page(page); return err; } @@ -3653,7 +3651,7 @@ static int ext4_block_zero_page_range(handle_t *handle, struct address_space *mapping, loff_t from, loff_t length) { struct inode *inode = mapping->host; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned max = blocksize - (offset & (blocksize - 1)); @@ -3678,7 +3676,7 @@ static int ext4_block_zero_page_range(handle_t *handle, static int ext4_block_truncate_page(handle_t *handle, struct address_space *mapping, loff_t from) { - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned offset = from & (PAGE_SIZE-1); unsigned length; unsigned blocksize; struct inode *inode = mapping->host; @@ -3816,7 +3814,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) */ if (offset + length > inode->i_size) { length = inode->i_size + - PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) - + PAGE_SIZE - (inode->i_size & (PAGE_SIZE - 1)) - offset; } @@ -4891,23 +4889,23 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode) tid_t commit_tid = 0; int ret; - offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + offset = inode->i_size & (PAGE_SIZE - 1); /* * All buffers in the last page remain valid? Then there's nothing to - * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE == + * do. We do the check mainly to optimize the common PAGE_SIZE == * blocksize case */ - if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits)) + if (offset > PAGE_SIZE - (1 << inode->i_blkbits)) return; while (1) { page = find_lock_page(inode->i_mapping, - inode->i_size >> PAGE_CACHE_SHIFT); + inode->i_size >> PAGE_SHIFT); if (!page) return; ret = __ext4_journalled_invalidatepage(page, offset, - PAGE_CACHE_SIZE - offset); + PAGE_SIZE - offset); unlock_page(page); - page_cache_release(page); + put_page(page); if (ret != -EBUSY) return; commit_tid = 0; @@ -5546,10 +5544,10 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) goto out; } - if (page->index == size >> PAGE_CACHE_SHIFT) - len = size & ~PAGE_CACHE_MASK; + if (page->index == size >> PAGE_SHIFT) + len = size & ~PAGE_MASK; else - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; /* * Return if we have all the buffers mapped. This avoids the need to do * journal_start/journal_stop which can block and take a long time @@ -5580,7 +5578,7 @@ retry_alloc: ret = block_page_mkwrite(vma, vmf, get_block); if (!ret && ext4_should_journal_data(inode)) { if (ext4_walk_page_buffers(handle, page_buffers(page), 0, - PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) { + PAGE_SIZE, NULL, do_journal_get_write_access)) { unlock_page(page); ret = VM_FAULT_SIGBUS; ext4_journal_stop(handle); diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 50e05df28f66..eeeade76012e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -119,7 +119,7 @@ MODULE_PARM_DESC(mballoc_debug, "Debugging level for ext4's mballoc"); * * * one block each for bitmap and buddy information. So for each group we - * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE / + * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / * blocksize) blocks. So it can have information regarding groups_per_page * which is blocks_per_page/2 * @@ -807,7 +807,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) * * one block each for bitmap and buddy information. * So for each group we take up 2 blocks. A page can - * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks. + * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. * So it can have information regarding groups_per_page which * is blocks_per_page/2 * @@ -839,7 +839,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) sb = inode->i_sb; ngroups = ext4_get_groups_count(sb); blocksize = 1 << inode->i_blkbits; - blocks_per_page = PAGE_CACHE_SIZE / blocksize; + blocks_per_page = PAGE_SIZE / blocksize; groups_per_page = blocks_per_page >> 1; if (groups_per_page == 0) @@ -993,7 +993,7 @@ static int ext4_mb_get_buddy_page_lock(struct super_block *sb, e4b->bd_buddy_page = NULL; e4b->bd_bitmap_page = NULL; - blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; + blocks_per_page = PAGE_SIZE / sb->s_blocksize; /* * the buddy cache inode stores the block bitmap * and buddy information in consecutive blocks. @@ -1028,11 +1028,11 @@ static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) { unlock_page(e4b->bd_bitmap_page); - page_cache_release(e4b->bd_bitmap_page); + put_page(e4b->bd_bitmap_page); } if (e4b->bd_buddy_page) { unlock_page(e4b->bd_buddy_page); - page_cache_release(e4b->bd_buddy_page); + put_page(e4b->bd_buddy_page); } } @@ -1125,7 +1125,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, might_sleep(); mb_debug(1, "load group %u\n", group); - blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; + blocks_per_page = PAGE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); e4b->bd_blkbits = sb->s_blocksize_bits; @@ -1167,7 +1167,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, * is yet to initialize the same. So * wait for it to initialize. */ - page_cache_release(page); + put_page(page); page = find_or_create_page(inode->i_mapping, pnum, gfp); if (page) { BUG_ON(page->mapping != inode->i_mapping); @@ -1203,7 +1203,7 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); if (page == NULL || !PageUptodate(page)) { if (page) - page_cache_release(page); + put_page(page); page = find_or_create_page(inode->i_mapping, pnum, gfp); if (page) { BUG_ON(page->mapping != inode->i_mapping); @@ -1238,11 +1238,11 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, err: if (page) - page_cache_release(page); + put_page(page); if (e4b->bd_bitmap_page) - page_cache_release(e4b->bd_bitmap_page); + put_page(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) - page_cache_release(e4b->bd_buddy_page); + put_page(e4b->bd_buddy_page); e4b->bd_buddy = NULL; e4b->bd_bitmap = NULL; return ret; @@ -1257,9 +1257,9 @@ static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) { if (e4b->bd_bitmap_page) - page_cache_release(e4b->bd_bitmap_page); + put_page(e4b->bd_bitmap_page); if (e4b->bd_buddy_page) - page_cache_release(e4b->bd_buddy_page); + put_page(e4b->bd_buddy_page); } @@ -2833,8 +2833,8 @@ static void ext4_free_data_callback(struct super_block *sb, /* No more items in the per group rb tree * balance refcounts from ext4_mb_free_metadata() */ - page_cache_release(e4b.bd_buddy_page); - page_cache_release(e4b.bd_bitmap_page); + put_page(e4b.bd_buddy_page); + put_page(e4b.bd_bitmap_page); } ext4_unlock_group(sb, entry->efd_group); kmem_cache_free(ext4_free_data_cachep, entry); @@ -4385,9 +4385,9 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) - page_cache_release(ac->ac_bitmap_page); + put_page(ac->ac_bitmap_page); if (ac->ac_buddy_page) - page_cache_release(ac->ac_buddy_page); + put_page(ac->ac_buddy_page); if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); @@ -4599,8 +4599,8 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, * otherwise we'll refresh it from * on-disk bitmap and lose not-yet-available * blocks */ - page_cache_get(e4b->bd_buddy_page); - page_cache_get(e4b->bd_bitmap_page); + get_page(e4b->bd_buddy_page); + get_page(e4b->bd_bitmap_page); } while (*n) { parent = *n; diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 4098acc701c3..325cef48b39a 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -60,10 +60,10 @@ ext4_double_down_write_data_sem(struct inode *first, struct inode *second) { if (first < second) { down_write(&EXT4_I(first)->i_data_sem); - down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); + down_write_nested(&EXT4_I(second)->i_data_sem, I_DATA_SEM_OTHER); } else { down_write(&EXT4_I(second)->i_data_sem); - down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING); + down_write_nested(&EXT4_I(first)->i_data_sem, I_DATA_SEM_OTHER); } } @@ -156,7 +156,7 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2, page[1] = grab_cache_page_write_begin(mapping[1], index2, fl); if (!page[1]) { unlock_page(page[0]); - page_cache_release(page[0]); + put_page(page[0]); return -ENOMEM; } /* @@ -192,7 +192,7 @@ mext_page_mkuptodate(struct page *page, unsigned from, unsigned to) create_empty_buffers(page, blocksize, 0); head = page_buffers(page); - block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + block = (sector_t)page->index << (PAGE_SHIFT - inode->i_blkbits); for (bh = head, block_start = 0; bh != head || !block_start; block++, block_start = block_end, bh = bh->b_this_page) { block_end = block_start + blocksize; @@ -268,7 +268,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, int i, err2, jblocks, retries = 0; int replaced_count = 0; int from = data_offset_in_page << orig_inode->i_blkbits; - int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits; struct super_block *sb = orig_inode->i_sb; struct buffer_head *bh = NULL; @@ -404,9 +404,9 @@ data_copy: unlock_pages: unlock_page(pagep[0]); - page_cache_release(pagep[0]); + put_page(pagep[0]); unlock_page(pagep[1]); - page_cache_release(pagep[1]); + put_page(pagep[1]); stop_journal: ext4_journal_stop(handle); if (*err == -ENOSPC && @@ -484,6 +484,13 @@ mext_check_arguments(struct inode *orig_inode, return -EBUSY; } + if (IS_NOQUOTA(orig_inode) || IS_NOQUOTA(donor_inode)) { + ext4_debug("ext4 move extent: The argument files should " + "not be quota files [ino:orig %lu, donor %lu]\n", + orig_inode->i_ino, donor_inode->i_ino); + return -EBUSY; + } + /* Ext4 move extent supports only extent based file */ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: orig file is not extents " @@ -554,7 +561,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, struct inode *orig_inode = file_inode(o_filp); struct inode *donor_inode = file_inode(d_filp); struct ext4_ext_path *path = NULL; - int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> orig_inode->i_blkbits; ext4_lblk_t o_end, o_start = orig_blk; ext4_lblk_t d_start = donor_blk; int ret; @@ -648,9 +655,9 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, if (o_end - o_start < cur_len) cur_len = o_end - o_start; - orig_page_index = o_start >> (PAGE_CACHE_SHIFT - + orig_page_index = o_start >> (PAGE_SHIFT - orig_inode->i_blkbits); - donor_page_index = d_start >> (PAGE_CACHE_SHIFT - + donor_page_index = d_start >> (PAGE_SHIFT - donor_inode->i_blkbits); offset_in_page = o_start % blocks_per_page; if (cur_len > blocks_per_page- offset_in_page) diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index d77d15f4b674..e4fc8ea45d78 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -23,6 +23,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/backing-dev.h> #include "ext4_jbd2.h" #include "xattr.h" @@ -432,8 +433,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io, * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); /* * In the first loop we prepare and mark buffers to submit. We have to * mark all buffers in the page before submitting so that @@ -470,9 +471,20 @@ int ext4_bio_write_page(struct ext4_io_submit *io, if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { - data_page = ext4_encrypt(inode, page); + gfp_t gfp_flags = GFP_NOFS; + + retry_encrypt: + data_page = ext4_encrypt(inode, page, gfp_flags); if (IS_ERR(data_page)) { ret = PTR_ERR(data_page); + if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) { + if (io->io_bio) { + ext4_io_submit(io); + congestion_wait(BLK_RW_ASYNC, HZ/50); + } + gfp_flags |= __GFP_NOFAIL; + goto retry_encrypt; + } data_page = NULL; goto out; } diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 5dc5e95063de..dc54a4b60eba 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -23,7 +23,7 @@ * * then this code just gives up and calls the buffer_head-based read function. * It does handle a page which has holes at the end - that is a common case: - * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. + * the end-of-file on blocksize < PAGE_SIZE setups. * */ @@ -140,7 +140,7 @@ int ext4_mpage_readpages(struct address_space *mapping, struct inode *inode = mapping->host; const unsigned blkbits = inode->i_blkbits; - const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; @@ -173,7 +173,7 @@ int ext4_mpage_readpages(struct address_space *mapping, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) @@ -217,7 +217,7 @@ int ext4_mpage_readpages(struct address_space *mapping, set_error_page: SetPageError(page); zero_user_segment(page, 0, - PAGE_CACHE_SIZE); + PAGE_SIZE); unlock_page(page); goto next_page; } @@ -250,7 +250,7 @@ int ext4_mpage_readpages(struct address_space *mapping, } if (first_hole != blocks_per_page) { zero_user_segment(page, first_hole << blkbits, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); @@ -279,7 +279,7 @@ int ext4_mpage_readpages(struct address_space *mapping, if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { - ctx = ext4_get_crypto_ctx(inode); + ctx = ext4_get_crypto_ctx(inode, GFP_NOFS); if (IS_ERR(ctx)) goto set_error_page; } @@ -319,7 +319,7 @@ int ext4_mpage_readpages(struct address_space *mapping, unlock_page(page); next_page: if (pages) - page_cache_release(page); + put_page(page); } BUG_ON(pages && !list_empty(pages)); if (bio) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 539297515896..304c712dbe12 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1113,6 +1113,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, static int ext4_quota_enable(struct super_block *sb, int type, int format_id, unsigned int flags); static int ext4_enable_quotas(struct super_block *sb); +static int ext4_get_next_id(struct super_block *sb, struct kqid *qid); static struct dquot **ext4_get_dquots(struct inode *inode) { @@ -1129,7 +1130,7 @@ static const struct dquot_operations ext4_quota_operations = { .alloc_dquot = dquot_alloc, .destroy_dquot = dquot_destroy, .get_projid = ext4_get_projid, - .get_next_id = dquot_get_next_id, + .get_next_id = ext4_get_next_id, }; static const struct quotactl_ops ext4_qctl_operations = { @@ -1323,9 +1324,9 @@ static int set_qf_name(struct super_block *sb, int qtype, substring_t *args) return -1; } if (ext4_has_feature_quota(sb)) { - ext4_msg(sb, KERN_ERR, "Cannot set journaled quota options " - "when QUOTA feature is enabled"); - return -1; + ext4_msg(sb, KERN_INFO, "Journaled quota options " + "ignored when QUOTA feature is enabled"); + return 1; } qname = match_strdup(args); if (!qname) { @@ -1688,10 +1689,10 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, return -1; } if (ext4_has_feature_quota(sb)) { - ext4_msg(sb, KERN_ERR, - "Cannot set journaled quota options " + ext4_msg(sb, KERN_INFO, + "Quota format mount options ignored " "when QUOTA feature is enabled"); - return -1; + return 1; } sbi->s_jquota_fmt = m->mount_opt; #endif @@ -1756,11 +1757,11 @@ static int parse_options(char *options, struct super_block *sb, #ifdef CONFIG_QUOTA if (ext4_has_feature_quota(sb) && (test_opt(sb, USRQUOTA) || test_opt(sb, GRPQUOTA))) { - ext4_msg(sb, KERN_ERR, "Cannot set quota options when QUOTA " - "feature is enabled"); - return 0; - } - if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { + ext4_msg(sb, KERN_INFO, "Quota feature enabled, usrquota and grpquota " + "mount options ignored."); + clear_opt(sb, USRQUOTA); + clear_opt(sb, GRPQUOTA); + } else if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) { if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA]) clear_opt(sb, USRQUOTA); @@ -1784,7 +1785,7 @@ static int parse_options(char *options, struct super_block *sb, int blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size); - if (blocksize < PAGE_CACHE_SIZE) { + if (blocksize < PAGE_SIZE) { ext4_msg(sb, KERN_ERR, "can't mount with " "dioread_nolock if block size != PAGE_SIZE"); return 0; @@ -3808,7 +3809,7 @@ no_journal: } if ((DUMMY_ENCRYPTION_ENABLED(sbi) || ext4_has_feature_encrypt(sb)) && - (blocksize != PAGE_CACHE_SIZE)) { + (blocksize != PAGE_SIZE)) { ext4_msg(sb, KERN_ERR, "Unsupported blocksize for fs encryption"); goto failed_mount_wq; @@ -5028,6 +5029,20 @@ static int ext4_quota_on_mount(struct super_block *sb, int type) EXT4_SB(sb)->s_jquota_fmt, type); } +static void lockdep_set_quota_inode(struct inode *inode, int subclass) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + + /* The first argument of lockdep_set_subclass has to be + * *exactly* the same as the argument to init_rwsem() --- in + * this case, in init_once() --- or lockdep gets unhappy + * because the name of the lock is set using the + * stringification of the argument to init_rwsem(). + */ + (void) ei; /* shut up clang warning if !CONFIG_LOCKDEP */ + lockdep_set_subclass(&ei->i_data_sem, subclass); +} + /* * Standard function to be called on quota_on */ @@ -5067,8 +5082,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id, if (err) return err; } - - return dquot_quota_on(sb, type, format_id, path); + lockdep_set_quota_inode(path->dentry->d_inode, I_DATA_SEM_QUOTA); + err = dquot_quota_on(sb, type, format_id, path); + if (err) + lockdep_set_quota_inode(path->dentry->d_inode, + I_DATA_SEM_NORMAL); + return err; } static int ext4_quota_enable(struct super_block *sb, int type, int format_id, @@ -5095,8 +5114,11 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id, /* Don't account quota for quota files to avoid recursion */ qf_inode->i_flags |= S_NOQUOTA; + lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA); err = dquot_enable(qf_inode, type, format_id, flags); iput(qf_inode); + if (err) + lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL); return err; } @@ -5253,6 +5275,17 @@ out: return len; } +static int ext4_get_next_id(struct super_block *sb, struct kqid *qid) +{ + const struct quota_format_ops *ops; + + if (!sb_has_quota_loaded(sb, qid->type)) + return -ESRCH; + ops = sb_dqopt(sb)->ops[qid->type]; + if (!ops || !ops->get_next_id) + return -ENOSYS; + return dquot_get_next_id(sb, qid); +} #endif static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c index 6f7ee30a89ce..75ed5c2f0c16 100644 --- a/fs/ext4/symlink.c +++ b/fs/ext4/symlink.c @@ -80,12 +80,12 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry, if (res <= plen) paddr[res] = '\0'; if (cpage) - page_cache_release(cpage); + put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: if (cpage) - page_cache_release(cpage); + put_page(cpage); kfree(paddr); return ERR_PTR(res); } diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 0441e055c8e8..e79bd32b9b79 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -230,6 +230,27 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) return error; } +static int +__xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header, + void *end, const char *function, unsigned int line) +{ + struct ext4_xattr_entry *entry = IFIRST(header); + int error = -EFSCORRUPTED; + + if (((void *) header >= end) || + (header->h_magic != le32_to_cpu(EXT4_XATTR_MAGIC))) + goto errout; + error = ext4_xattr_check_names(entry, end, entry); +errout: + if (error) + __ext4_error_inode(inode, function, line, 0, + "corrupted in-inode xattr"); + return error; +} + +#define xattr_check_inode(inode, header, end) \ + __xattr_check_inode((inode), (header), (end), __func__, __LINE__) + static inline int ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) { @@ -341,7 +362,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, header = IHDR(inode, raw_inode); entry = IFIRST(header); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - error = ext4_xattr_check_names(entry, end, entry); + error = xattr_check_inode(inode, header, end); if (error) goto cleanup; error = ext4_xattr_find_entry(&entry, name_index, name, @@ -477,7 +498,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) raw_inode = ext4_raw_inode(&iloc); header = IHDR(inode, raw_inode); end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; - error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); + error = xattr_check_inode(inode, header, end); if (error) goto cleanup; error = ext4_xattr_list_entries(dentry, IFIRST(header), @@ -1040,8 +1061,7 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, is->s.here = is->s.first; is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { - error = ext4_xattr_check_names(IFIRST(header), is->s.end, - IFIRST(header)); + error = xattr_check_inode(inode, header, is->s.end); if (error) return error; /* Find the named attribute. */ @@ -1356,6 +1376,10 @@ retry: last = entry; total_ino = sizeof(struct ext4_xattr_ibody_header); + error = xattr_check_inode(inode, header, end); + if (error) + goto cleanup; + free = ext4_xattr_free_space(last, &min_offs, base, &total_ino); if (free >= new_extra_isize) { entry = IFIRST(header); diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index e5c762b37239..53fec0872e60 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -223,7 +223,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw)); - if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; } @@ -265,8 +265,8 @@ alloc_new: bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; - if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < - PAGE_CACHE_SIZE) { + if (bio_add_page(io->bio, bio_page, PAGE_SIZE, 0) < + PAGE_SIZE) { __submit_merged_bio(io); goto alloc_new; } @@ -406,7 +406,7 @@ got_it: * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. */ if (dn.data_blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); unlock_page(page); return page; @@ -517,7 +517,7 @@ struct page *get_new_data_page(struct inode *inode, goto got_it; if (dn.data_blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); } else { f2fs_put_page(page, 1); @@ -530,8 +530,8 @@ struct page *get_new_data_page(struct inode *inode, } got_it: if (new_i_size && i_size_read(inode) < - ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { - i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); + ((loff_t)(index + 1) << PAGE_SHIFT)) { + i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT)); /* Only the directory inode sets new_i_size */ set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); } @@ -570,9 +570,9 @@ alloc: /* update i_size */ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) + dn->ofs_in_node; - if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) + if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT)) i_size_write(dn->inode, - ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); + ((loff_t)(fofs + 1) << PAGE_SHIFT)); return 0; } @@ -971,7 +971,7 @@ got_it: goto confused; } } else { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); SetPageUptodate(page); unlock_page(page); goto next_page; @@ -1021,7 +1021,7 @@ submit_and_realloc: goto next_page; set_error_page: SetPageError(page); - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); unlock_page(page); goto next_page; confused: @@ -1032,7 +1032,7 @@ confused: unlock_page(page); next_page: if (pages) - page_cache_release(page); + put_page(page); } BUG_ON(pages && !list_empty(pages)); if (bio) @@ -1136,7 +1136,7 @@ static int f2fs_write_data_page(struct page *page, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); loff_t i_size = i_size_read(inode); const pgoff_t end_index = ((unsigned long long) i_size) - >> PAGE_CACHE_SHIFT; + >> PAGE_SHIFT; unsigned offset = 0; bool need_balance_fs = false; int err = 0; @@ -1157,11 +1157,11 @@ static int f2fs_write_data_page(struct page *page, * If the offset is out-of-range of file size, * this page does not have to be written to disk. */ - offset = i_size & (PAGE_CACHE_SIZE - 1); + offset = i_size & (PAGE_SIZE - 1); if ((page->index >= end_index + 1) || !offset) goto out; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); write: if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) goto redirty_out; @@ -1267,8 +1267,8 @@ next: cycled = 0; end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ @@ -1448,11 +1448,11 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, * the block addresses when there is no need to fill the page. */ if (!f2fs_has_inline_data(inode) && !f2fs_encrypted_inode(inode) && - len == PAGE_CACHE_SIZE) + len == PAGE_SIZE) return 0; if (f2fs_has_inline_data(inode) || - (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { + (pos & PAGE_MASK) >= i_size_read(inode)) { f2fs_lock_op(sbi); locked = true; } @@ -1513,7 +1513,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct page *page = NULL; - pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; + pgoff_t index = ((unsigned long long) pos) >> PAGE_SHIFT; bool need_balance = false; block_t blkaddr = NULL_ADDR; int err = 0; @@ -1561,22 +1561,22 @@ repeat: if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); - if (len == PAGE_CACHE_SIZE) + if (len == PAGE_SIZE) goto out_update; if (PageUptodate(page)) goto out_clear; - if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + if ((pos & PAGE_MASK) >= i_size_read(inode)) { + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ - zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); + zero_user_segments(page, 0, start, end, PAGE_SIZE); goto out_update; } if (blkaddr == NEW_ADDR) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); } else { struct f2fs_io_info fio = { .sbi = sbi, @@ -1688,7 +1688,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset, struct f2fs_sb_info *sbi = F2FS_I_SB(inode); if (inode->i_ino >= F2FS_ROOT_INO(sbi) && - (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) + (offset % PAGE_SIZE || length != PAGE_SIZE)) return; if (PageDirty(page)) { diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 4fb6ef88a34f..f4a61a5ff79f 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -164,7 +164,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) /* build curseg */ si->base_mem += sizeof(struct curseg_info) * NR_CURSEG_TYPE; - si->base_mem += PAGE_CACHE_SIZE * NR_CURSEG_TYPE; + si->base_mem += PAGE_SIZE * NR_CURSEG_TYPE; /* build dirty segmap */ si->base_mem += sizeof(struct dirty_seglist_info); @@ -201,9 +201,9 @@ get_cache: si->page_mem = 0; npages = NODE_MAPPING(sbi)->nrpages; - si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; + si->page_mem += (unsigned long long)npages << PAGE_SHIFT; npages = META_MAPPING(sbi)->nrpages; - si->page_mem += (unsigned long long)npages << PAGE_CACHE_SHIFT; + si->page_mem += (unsigned long long)npages << PAGE_SHIFT; } static int stat_show(struct seq_file *s, void *v) diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 80641ad82745..af819571bce7 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -17,8 +17,8 @@ static unsigned long dir_blocks(struct inode *inode) { - return ((unsigned long long) (i_size_read(inode) + PAGE_CACHE_SIZE - 1)) - >> PAGE_CACHE_SHIFT; + return ((unsigned long long) (i_size_read(inode) + PAGE_SIZE - 1)) + >> PAGE_SHIFT; } static unsigned int dir_buckets(unsigned int level, int dir_level) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index bbe2cd1265d0..7a4558d17f36 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1294,7 +1294,7 @@ static inline void f2fs_put_page(struct page *page, int unlock) f2fs_bug_on(F2FS_P_SB(page), !PageLocked(page)); unlock_page(page); } - page_cache_release(page); + put_page(page); } static inline void f2fs_put_dnode(struct dnode_of_data *dn) diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index b41c3579ea9e..443e07705c2a 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -74,11 +74,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, goto mapped; /* page is wholly or partially inside EOF */ - if (((loff_t)(page->index + 1) << PAGE_CACHE_SHIFT) > + if (((loff_t)(page->index + 1) << PAGE_SHIFT) > i_size_read(inode)) { unsigned offset; - offset = i_size_read(inode) & ~PAGE_CACHE_MASK; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + offset = i_size_read(inode) & ~PAGE_MASK; + zero_user_segment(page, offset, PAGE_SIZE); } set_page_dirty(page); SetPageUptodate(page); @@ -346,11 +346,11 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) goto found; } - pgofs = (pgoff_t)(offset >> PAGE_CACHE_SHIFT); + pgofs = (pgoff_t)(offset >> PAGE_SHIFT); dirty = __get_first_dirty_index(inode->i_mapping, pgofs, whence); - for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { + for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) { set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA); if (err && err != -ENOENT) { @@ -370,7 +370,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) /* find data/hole in dnode block */ for (; dn.ofs_in_node < end_offset; dn.ofs_in_node++, pgofs++, - data_ofs = (loff_t)pgofs << PAGE_CACHE_SHIFT) { + data_ofs = (loff_t)pgofs << PAGE_SHIFT) { block_t blkaddr; blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); @@ -508,8 +508,8 @@ void truncate_data_blocks(struct dnode_of_data *dn) static int truncate_partial_data_page(struct inode *inode, u64 from, bool cache_only) { - unsigned offset = from & (PAGE_CACHE_SIZE - 1); - pgoff_t index = from >> PAGE_CACHE_SHIFT; + unsigned offset = from & (PAGE_SIZE - 1); + pgoff_t index = from >> PAGE_SHIFT; struct address_space *mapping = inode->i_mapping; struct page *page; @@ -529,7 +529,7 @@ static int truncate_partial_data_page(struct inode *inode, u64 from, return 0; truncate_out: f2fs_wait_on_page_writeback(page, DATA, true); - zero_user(page, offset, PAGE_CACHE_SIZE - offset); + zero_user(page, offset, PAGE_SIZE - offset); if (!cache_only || !f2fs_encrypted_inode(inode) || !S_ISREG(inode->i_mode)) set_page_dirty(page); @@ -799,11 +799,11 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; - pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - off_start = offset & (PAGE_CACHE_SIZE - 1); - off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + off_start = offset & (PAGE_SIZE - 1); + off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, @@ -813,7 +813,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, - PAGE_CACHE_SIZE - off_start); + PAGE_SIZE - off_start); if (ret) return ret; } @@ -830,8 +830,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) f2fs_balance_fs(sbi, true); - blk_start = (loff_t)pg_start << PAGE_CACHE_SHIFT; - blk_end = (loff_t)pg_end << PAGE_CACHE_SHIFT; + blk_start = (loff_t)pg_start << PAGE_SHIFT; + blk_end = (loff_t)pg_end << PAGE_SHIFT; truncate_inode_pages_range(mapping, blk_start, blk_end - 1); @@ -954,8 +954,8 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) if (ret) return ret; - pg_start = offset >> PAGE_CACHE_SHIFT; - pg_end = (offset + len) >> PAGE_CACHE_SHIFT; + pg_start = offset >> PAGE_SHIFT; + pg_end = (offset + len) >> PAGE_SHIFT; /* write out all dirty pages from offset */ ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); @@ -1006,11 +1006,11 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, truncate_pagecache_range(inode, offset, offset + len - 1); - pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - off_start = offset & (PAGE_CACHE_SIZE - 1); - off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + off_start = offset & (PAGE_SIZE - 1); + off_end = (offset + len) & (PAGE_SIZE - 1); if (pg_start == pg_end) { ret = fill_zero(inode, pg_start, off_start, @@ -1024,12 +1024,12 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, } else { if (off_start) { ret = fill_zero(inode, pg_start++, off_start, - PAGE_CACHE_SIZE - off_start); + PAGE_SIZE - off_start); if (ret) return ret; new_size = max_t(loff_t, new_size, - (loff_t)pg_start << PAGE_CACHE_SHIFT); + (loff_t)pg_start << PAGE_SHIFT); } for (index = pg_start; index < pg_end; index++) { @@ -1060,7 +1060,7 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, f2fs_unlock_op(sbi); new_size = max_t(loff_t, new_size, - (loff_t)(index + 1) << PAGE_CACHE_SHIFT); + (loff_t)(index + 1) << PAGE_SHIFT); } if (off_end) { @@ -1117,8 +1117,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) truncate_pagecache(inode, offset); - pg_start = offset >> PAGE_CACHE_SHIFT; - pg_end = (offset + len) >> PAGE_CACHE_SHIFT; + pg_start = offset >> PAGE_SHIFT; + pg_end = (offset + len) >> PAGE_SHIFT; delta = pg_end - pg_start; nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; @@ -1158,11 +1158,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, f2fs_balance_fs(sbi, true); - pg_start = ((unsigned long long) offset) >> PAGE_CACHE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_CACHE_SHIFT; + pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; + pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - off_start = offset & (PAGE_CACHE_SIZE - 1); - off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); + off_start = offset & (PAGE_SIZE - 1); + off_end = (offset + len) & (PAGE_SIZE - 1); f2fs_lock_op(sbi); @@ -1180,12 +1180,12 @@ noalloc: if (pg_start == pg_end) new_size = offset + len; else if (index == pg_start && off_start) - new_size = (loff_t)(index + 1) << PAGE_CACHE_SHIFT; + new_size = (loff_t)(index + 1) << PAGE_SHIFT; else if (index == pg_end) - new_size = ((loff_t)index << PAGE_CACHE_SHIFT) + + new_size = ((loff_t)index << PAGE_SHIFT) + off_end; else - new_size += PAGE_CACHE_SIZE; + new_size += PAGE_SIZE; } if (!(mode & FALLOC_FL_KEEP_SIZE) && @@ -1652,8 +1652,8 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi, if (need_inplace_update(inode)) return -EINVAL; - pg_start = range->start >> PAGE_CACHE_SHIFT; - pg_end = (range->start + range->len) >> PAGE_CACHE_SHIFT; + pg_start = range->start >> PAGE_SHIFT; + pg_end = (range->start + range->len) >> PAGE_SHIFT; f2fs_balance_fs(sbi, true); @@ -1770,7 +1770,7 @@ clear_out: out: inode_unlock(inode); if (!err) - range->len = (u64)total << PAGE_CACHE_SHIFT; + range->len = (u64)total << PAGE_SHIFT; return err; } diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 358214e9f707..a2fbe6f427d3 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -51,7 +51,7 @@ void read_inline_data(struct page *page, struct page *ipage) f2fs_bug_on(F2FS_P_SB(page), page->index); - zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); + zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE); /* Copy the whole inline data block */ src_addr = inline_data_addr(ipage); @@ -93,7 +93,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page) } if (page->index) - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); else read_inline_data(page, ipage); @@ -375,7 +375,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, goto out; f2fs_wait_on_page_writeback(page, DATA, true); - zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); + zero_user_segment(page, MAX_INLINE_DATA, PAGE_SIZE); dentry_blk = kmap_atomic(page); @@ -405,8 +405,8 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, stat_dec_inline_dir(dir); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); - if (i_size_read(dir) < PAGE_CACHE_SIZE) { - i_size_write(dir, PAGE_CACHE_SIZE); + if (i_size_read(dir) < PAGE_SIZE) { + i_size_write(dir, PAGE_SIZE); set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); } diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 7876f1052101..013e57932d61 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -1027,12 +1027,6 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, goto errout; } - /* this is broken symlink case */ - if (unlikely(cstr.name[0] == 0)) { - res = -ENOENT; - goto errout; - } - if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) { /* Symlink data on the disk is corrupted */ res = -EIO; @@ -1046,17 +1040,23 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, if (res < 0) goto errout; + /* this is broken symlink case */ + if (unlikely(pstr.name[0] == 0)) { + res = -ENOENT; + goto errout; + } + paddr = pstr.name; /* Null-terminate the name */ paddr[res] = '\0'; - page_cache_release(cpage); + put_page(cpage); set_delayed_call(done, kfree_link, paddr); return paddr; errout: fscrypt_fname_free_buffer(&pstr); - page_cache_release(cpage); + put_page(cpage); return ERR_PTR(res); } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 118321bd1a7f..1a33de9d84b1 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -46,11 +46,11 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) */ if (type == FREE_NIDS) { mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == NAT_ENTRIES) { mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2); } else if (type == DIRTY_DENTS) { if (sbi->sb->s_bdi->wb.dirty_exceeded) @@ -62,13 +62,13 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type) for (i = 0; i <= UPDATE_INO; i++) mem_size += (sbi->im[i].ino_num * - sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT; + sizeof(struct ino_entry)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else if (type == EXTENT_CACHE) { mem_size = (atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree) + atomic_read(&sbi->total_ext_node) * - sizeof(struct extent_node)) >> PAGE_CACHE_SHIFT; + sizeof(struct extent_node)) >> PAGE_SHIFT; res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1); } else { if (!sbi->sb->s_bdi->wb.dirty_exceeded) @@ -121,7 +121,7 @@ static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid) src_addr = page_address(src_page); dst_addr = page_address(dst_page); - memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 0b30cd2aeebd..011942f94d64 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -591,7 +591,7 @@ out: /* truncate meta pages to be used by the recovery */ truncate_inode_pages_range(META_MAPPING(sbi), - (loff_t)MAIN_BLKADDR(sbi) << PAGE_CACHE_SHIFT, -1); + (loff_t)MAIN_BLKADDR(sbi) << PAGE_SHIFT, -1); if (err) { truncate_inode_pages_final(NODE_MAPPING(sbi)); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 6f16b39f0b52..540669d6978e 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -885,12 +885,12 @@ int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra) } } - sum_in_page = (PAGE_CACHE_SIZE - 2 * SUM_JOURNAL_SIZE - + sum_in_page = (PAGE_SIZE - 2 * SUM_JOURNAL_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE; if (valid_sum_count <= sum_in_page) return 1; else if ((valid_sum_count - sum_in_page) <= - (PAGE_CACHE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) + (PAGE_SIZE - SUM_FOOTER_SIZE) / SUMMARY_SIZE) return 2; return 3; } @@ -909,9 +909,9 @@ void update_meta_page(struct f2fs_sb_info *sbi, void *src, block_t blk_addr) void *dst = page_address(page); if (src) - memcpy(dst, src, PAGE_CACHE_SIZE); + memcpy(dst, src, PAGE_SIZE); else - memset(dst, 0, PAGE_CACHE_SIZE); + memset(dst, 0, PAGE_SIZE); set_page_dirty(page); f2fs_put_page(page, 1); } @@ -1596,7 +1596,7 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi) s = (struct f2fs_summary *)(kaddr + offset); seg_i->sum_blk->entries[j] = *s; offset += SUMMARY_SIZE; - if (offset + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + if (offset + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; @@ -1757,7 +1757,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr) *summary = seg_i->sum_blk->entries[j]; written_size += SUMMARY_SIZE; - if (written_size + SUMMARY_SIZE <= PAGE_CACHE_SIZE - + if (written_size + SUMMARY_SIZE <= PAGE_SIZE - SUM_FOOTER_SIZE) continue; @@ -1844,7 +1844,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, src_addr = page_address(src_page); dst_addr = page_address(dst_page); - memcpy(dst_addr, src_addr, PAGE_CACHE_SIZE); + memcpy(dst_addr, src_addr, PAGE_SIZE); set_page_dirty(dst_page); f2fs_put_page(src_page, 1); @@ -2171,7 +2171,7 @@ static int build_curseg(struct f2fs_sb_info *sbi) for (i = 0; i < NR_CURSEG_TYPE; i++) { mutex_init(&array[i].curseg_mutex); - array[i].sum_blk = kzalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + array[i].sum_blk = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!array[i].sum_blk) return -ENOMEM; init_rwsem(&array[i].journal_rwsem); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 15bb81f8dac2..006f87d69921 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -984,9 +984,25 @@ static loff_t max_file_blocks(void) return result; } +static int __f2fs_commit_super(struct buffer_head *bh, + struct f2fs_super_block *super) +{ + lock_buffer(bh); + if (super) + memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); + set_buffer_uptodate(bh); + set_buffer_dirty(bh); + unlock_buffer(bh); + + /* it's rare case, we can do fua all the time */ + return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); +} + static inline bool sanity_check_area_boundary(struct super_block *sb, - struct f2fs_super_block *raw_super) + struct buffer_head *bh) { + struct f2fs_super_block *raw_super = (struct f2fs_super_block *) + (bh->b_data + F2FS_SUPER_OFFSET); u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); @@ -1000,6 +1016,10 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main); u32 segment_count = le32_to_cpu(raw_super->segment_count); u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); + u64 main_end_blkaddr = main_blkaddr + + (segment_count_main << log_blocks_per_seg); + u64 seg_end_blkaddr = segment0_blkaddr + + (segment_count << log_blocks_per_seg); if (segment0_blkaddr != cp_blkaddr) { f2fs_msg(sb, KERN_INFO, @@ -1044,22 +1064,45 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, return true; } - if (main_blkaddr + (segment_count_main << log_blocks_per_seg) != - segment0_blkaddr + (segment_count << log_blocks_per_seg)) { + if (main_end_blkaddr > seg_end_blkaddr) { f2fs_msg(sb, KERN_INFO, - "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)", + "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", main_blkaddr, - segment0_blkaddr + (segment_count << log_blocks_per_seg), + segment0_blkaddr + + (segment_count << log_blocks_per_seg), segment_count_main << log_blocks_per_seg); return true; + } else if (main_end_blkaddr < seg_end_blkaddr) { + int err = 0; + char *res; + + /* fix in-memory information all the time */ + raw_super->segment_count = cpu_to_le32((main_end_blkaddr - + segment0_blkaddr) >> log_blocks_per_seg); + + if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) { + res = "internally"; + } else { + err = __f2fs_commit_super(bh, NULL); + res = err ? "failed" : "done"; + } + f2fs_msg(sb, KERN_INFO, + "Fix alignment : %s, start(%u) end(%u) block(%u)", + res, main_blkaddr, + segment0_blkaddr + + (segment_count << log_blocks_per_seg), + segment_count_main << log_blocks_per_seg); + if (err) + return true; } - return false; } static int sanity_check_raw_super(struct super_block *sb, - struct f2fs_super_block *raw_super) + struct buffer_head *bh) { + struct f2fs_super_block *raw_super = (struct f2fs_super_block *) + (bh->b_data + F2FS_SUPER_OFFSET); unsigned int blocksize; if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { @@ -1070,10 +1113,10 @@ static int sanity_check_raw_super(struct super_block *sb, } /* Currently, support only 4KB page cache size */ - if (F2FS_BLKSIZE != PAGE_CACHE_SIZE) { + if (F2FS_BLKSIZE != PAGE_SIZE) { f2fs_msg(sb, KERN_INFO, "Invalid page_cache_size (%lu), supports only 4KB\n", - PAGE_CACHE_SIZE); + PAGE_SIZE); return 1; } @@ -1126,7 +1169,7 @@ static int sanity_check_raw_super(struct super_block *sb, } /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ - if (sanity_check_area_boundary(sb, raw_super)) + if (sanity_check_area_boundary(sb, bh)) return 1; return 0; @@ -1202,7 +1245,7 @@ static int read_raw_super_block(struct super_block *sb, { int block; struct buffer_head *bh; - struct f2fs_super_block *super, *buf; + struct f2fs_super_block *super; int err = 0; super = kzalloc(sizeof(struct f2fs_super_block), GFP_KERNEL); @@ -1218,11 +1261,8 @@ static int read_raw_super_block(struct super_block *sb, continue; } - buf = (struct f2fs_super_block *) - (bh->b_data + F2FS_SUPER_OFFSET); - /* sanity checking of raw super */ - if (sanity_check_raw_super(sb, buf)) { + if (sanity_check_raw_super(sb, bh)) { f2fs_msg(sb, KERN_ERR, "Can't find valid F2FS filesystem in %dth superblock", block + 1); @@ -1232,7 +1272,8 @@ static int read_raw_super_block(struct super_block *sb, } if (!*raw_super) { - memcpy(super, buf, sizeof(*super)); + memcpy(super, bh->b_data + F2FS_SUPER_OFFSET, + sizeof(*super)); *valid_super_block = block; *raw_super = super; } @@ -1252,42 +1293,29 @@ static int read_raw_super_block(struct super_block *sb, return err; } -static int __f2fs_commit_super(struct f2fs_sb_info *sbi, int block) +int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) { - struct f2fs_super_block *super = F2FS_RAW_SUPER(sbi); struct buffer_head *bh; int err; - bh = sb_getblk(sbi->sb, block); + /* write back-up superblock first */ + bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1); if (!bh) return -EIO; - - lock_buffer(bh); - memcpy(bh->b_data + F2FS_SUPER_OFFSET, super, sizeof(*super)); - set_buffer_uptodate(bh); - set_buffer_dirty(bh); - unlock_buffer(bh); - - /* it's rare case, we can do fua all the time */ - err = __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); + err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); brelse(bh); - return err; -} - -int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) -{ - int err; - - /* write back-up superblock first */ - err = __f2fs_commit_super(sbi, sbi->valid_super_block ? 0 : 1); - /* if we are in recovery path, skip writing valid superblock */ if (recover || err) return err; /* write current valid superblock */ - return __f2fs_commit_super(sbi, sbi->valid_super_block); + bh = sb_getblk(sbi->sb, sbi->valid_super_block); + if (!bh) + return -EIO; + err = __f2fs_commit_super(bh, F2FS_RAW_SUPER(sbi)); + brelse(bh); + return err; } static int f2fs_fill_super(struct super_block *sb, void *data, int silent) @@ -1442,7 +1470,7 @@ try_onemore: seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE); if (__exist_node_summaries(sbi)) sbi->kbytes_written = - le64_to_cpu(seg_i->sum_blk->journal.info.kbytes_written); + le64_to_cpu(seg_i->journal->info.kbytes_written); build_gc_manager(sbi); diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c index cb84f0fcc72a..bfc780c682fb 100644 --- a/fs/freevxfs/vxfs_immed.c +++ b/fs/freevxfs/vxfs_immed.c @@ -66,11 +66,11 @@ static int vxfs_immed_readpage(struct file *fp, struct page *pp) { struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); - u_int64_t offset = (u_int64_t)pp->index << PAGE_CACHE_SHIFT; + u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT; caddr_t kaddr; kaddr = kmap(pp); - memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_CACHE_SIZE); + memcpy(kaddr, vip->vii_immed.vi_immed + offset, PAGE_SIZE); kunmap(pp); flush_dcache_page(pp); diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 1cff72df0389..a49e0cfbb686 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -45,7 +45,7 @@ /* * Number of VxFS blocks per page. */ -#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_CACHE_SIZE / (sbp)->s_blocksize)) +#define VXFS_BLOCK_PER_PAGE(sbp) ((PAGE_SIZE / (sbp)->s_blocksize)) static struct dentry * vxfs_lookup(struct inode *, struct dentry *, unsigned int); @@ -175,7 +175,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp) if (de) { ino = de->d_ino; kunmap(pp); - page_cache_release(pp); + put_page(pp); } return (ino); @@ -255,8 +255,8 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) nblocks = dir_blocks(ip); pblocks = VXFS_BLOCK_PER_PAGE(sbp); - page = pos >> PAGE_CACHE_SHIFT; - offset = pos & ~PAGE_CACHE_MASK; + page = pos >> PAGE_SHIFT; + offset = pos & ~PAGE_MASK; block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks; for (; page < npages; page++, block = 0) { @@ -289,7 +289,7 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) continue; offset = (char *)de - kaddr; - ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; + ctx->pos = ((page << PAGE_SHIFT) | offset) + 2; if (!dir_emit(ctx, de->d_name, de->d_namelen, de->d_ino, DT_UNKNOWN)) { vxfs_put_page(pp); @@ -301,6 +301,6 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx) vxfs_put_page(pp); offset = 0; } - ctx->pos = ((page << PAGE_CACHE_SHIFT) | offset) + 2; + ctx->pos = ((page << PAGE_SHIFT) | offset) + 2; return 0; } diff --git a/fs/freevxfs/vxfs_subr.c b/fs/freevxfs/vxfs_subr.c index 5d318c44f855..e806694d4145 100644 --- a/fs/freevxfs/vxfs_subr.c +++ b/fs/freevxfs/vxfs_subr.c @@ -50,7 +50,7 @@ inline void vxfs_put_page(struct page *pp) { kunmap(pp); - page_cache_release(pp); + put_page(pp); } /** diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index fee81e8768c9..592cea54cea0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -33,7 +33,7 @@ /* * 4MB minimal write chunk size */ -#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10)) +#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10)) struct wb_completion { atomic_t cnt; diff --git a/fs/fscache/page.c b/fs/fscache/page.c index 6b35fc4860a0..3078b679fcd1 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c @@ -113,7 +113,7 @@ try_again: wake_up_bit(&cookie->flags, 0); if (xpage) - page_cache_release(xpage); + put_page(xpage); __fscache_uncache_page(cookie, page); return true; @@ -164,7 +164,7 @@ static void fscache_end_page_write(struct fscache_object *object, } spin_unlock(&object->lock); if (xpage) - page_cache_release(xpage); + put_page(xpage); } /* @@ -884,7 +884,7 @@ void fscache_invalidate_writes(struct fscache_cookie *cookie) spin_unlock(&cookie->stores_lock); for (i = n - 1; i >= 0; i--) - page_cache_release(results[i]); + put_page(results[i]); } _leave(""); @@ -982,7 +982,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, radix_tree_tag_set(&cookie->stores, page->index, FSCACHE_COOKIE_PENDING_TAG); - page_cache_get(page); + get_page(page); /* we only want one writer at a time, but we do need to queue new * writers after exclusive ops */ @@ -1026,7 +1026,7 @@ submit_failed: radix_tree_delete(&cookie->stores, page->index); spin_unlock(&cookie->stores_lock); wake_cookie = __fscache_unuse_cookie(cookie); - page_cache_release(page); + put_page(page); ret = -ENOBUFS; goto nobufs; diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ebb5e37455a0..cbece1221417 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -897,7 +897,7 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) return err; } - page_cache_get(newpage); + get_page(newpage); if (!(buf->flags & PIPE_BUF_FLAG_LRU)) lru_cache_add_file(newpage); @@ -912,12 +912,12 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep) if (err) { unlock_page(newpage); - page_cache_release(newpage); + put_page(newpage); return err; } unlock_page(oldpage); - page_cache_release(oldpage); + put_page(oldpage); cs->len = 0; return 0; @@ -951,7 +951,7 @@ static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page, fuse_copy_finish(cs); buf = cs->pipebufs; - page_cache_get(page); + get_page(page); buf->page = page; buf->offset = offset; buf->len = count; @@ -1435,7 +1435,7 @@ out_unlock: out: for (; page_nr < cs.nr_segs; page_nr++) - page_cache_release(bufs[page_nr].page); + put_page(bufs[page_nr].page); kfree(bufs); return ret; @@ -1632,8 +1632,8 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, goto out_up_killsb; mapping = inode->i_mapping; - index = outarg.offset >> PAGE_CACHE_SHIFT; - offset = outarg.offset & ~PAGE_CACHE_MASK; + index = outarg.offset >> PAGE_SHIFT; + offset = outarg.offset & ~PAGE_MASK; file_size = i_size_read(inode); end = outarg.offset + outarg.size; if (end > file_size) { @@ -1652,13 +1652,13 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size, if (!page) goto out_iput; - this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); + this_num = min_t(unsigned, num, PAGE_SIZE - offset); err = fuse_copy_page(cs, &page, offset, this_num, 0); if (!err && offset == 0 && - (this_num == PAGE_CACHE_SIZE || file_size == end)) + (this_num == PAGE_SIZE || file_size == end)) SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (err) goto out_iput; @@ -1697,7 +1697,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, size_t total_len = 0; int num_pages; - offset = outarg->offset & ~PAGE_CACHE_MASK; + offset = outarg->offset & ~PAGE_MASK; file_size = i_size_read(inode); num = outarg->size; @@ -1720,7 +1720,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, req->page_descs[0].offset = offset; req->end = fuse_retrieve_end; - index = outarg->offset >> PAGE_CACHE_SHIFT; + index = outarg->offset >> PAGE_SHIFT; while (num && req->num_pages < num_pages) { struct page *page; @@ -1730,7 +1730,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, if (!page) break; - this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset); + this_num = min_t(unsigned, num, PAGE_SIZE - offset); req->pages[req->num_pages] = page; req->page_descs[req->num_pages].length = this_num; req->num_pages++; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 9dde38f12c07..719924d6c706 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -348,7 +348,7 @@ static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, pgoff_t curr_index; BUG_ON(req->inode != inode); - curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; + curr_index = req->misc.write.in.offset >> PAGE_SHIFT; if (idx_from < curr_index + req->num_pages && curr_index <= idx_to) { found = true; @@ -683,11 +683,11 @@ static void fuse_short_read(struct fuse_req *req, struct inode *inode, * present there. */ int i; - int start_idx = num_read >> PAGE_CACHE_SHIFT; - size_t off = num_read & (PAGE_CACHE_SIZE - 1); + int start_idx = num_read >> PAGE_SHIFT; + size_t off = num_read & (PAGE_SIZE - 1); for (i = start_idx; i < req->num_pages; i++) { - zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); + zero_user_segment(req->pages[i], off, PAGE_SIZE); off = 0; } } else { @@ -704,7 +704,7 @@ static int fuse_do_readpage(struct file *file, struct page *page) struct fuse_req *req; size_t num_read; loff_t pos = page_offset(page); - size_t count = PAGE_CACHE_SIZE; + size_t count = PAGE_SIZE; u64 attr_ver; int err; @@ -789,7 +789,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) else SetPageError(page); unlock_page(page); - page_cache_release(page); + put_page(page); } if (req->ff) fuse_file_put(req->ff, false); @@ -800,7 +800,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file) struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; loff_t pos = page_offset(req->pages[0]); - size_t count = req->num_pages << PAGE_CACHE_SHIFT; + size_t count = req->num_pages << PAGE_SHIFT; req->out.argpages = 1; req->out.page_zeroing = 1; @@ -836,7 +836,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) if (req->num_pages && (req->num_pages == FUSE_MAX_PAGES_PER_REQ || - (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || + (req->num_pages + 1) * PAGE_SIZE > fc->max_read || req->pages[req->num_pages - 1]->index + 1 != page->index)) { int nr_alloc = min_t(unsigned, data->nr_pages, FUSE_MAX_PAGES_PER_REQ); @@ -858,7 +858,7 @@ static int fuse_readpages_fill(void *_data, struct page *page) return -EIO; } - page_cache_get(page); + get_page(page); req->pages[req->num_pages] = page; req->page_descs[req->num_pages].length = PAGE_SIZE; req->num_pages++; @@ -1003,17 +1003,17 @@ static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, for (i = 0; i < req->num_pages; i++) { struct page *page = req->pages[i]; - if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) + if (!req->out.h.error && !offset && count >= PAGE_SIZE) SetPageUptodate(page); - if (count > PAGE_CACHE_SIZE - offset) - count -= PAGE_CACHE_SIZE - offset; + if (count > PAGE_SIZE - offset) + count -= PAGE_SIZE - offset; else count = 0; offset = 0; unlock_page(page); - page_cache_release(page); + put_page(page); } return res; @@ -1024,7 +1024,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, struct iov_iter *ii, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(mapping->host); - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned offset = pos & (PAGE_SIZE - 1); size_t count = 0; int err; @@ -1034,8 +1034,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, do { size_t tmp; struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, + pgoff_t index = pos >> PAGE_SHIFT; + size_t bytes = min_t(size_t, PAGE_SIZE - offset, iov_iter_count(ii)); bytes = min_t(size_t, bytes, fc->max_write - count); @@ -1059,7 +1059,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, iov_iter_advance(ii, tmp); if (!tmp) { unlock_page(page); - page_cache_release(page); + put_page(page); bytes = min(bytes, iov_iter_single_seg_count(ii)); goto again; } @@ -1072,7 +1072,7 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, count += tmp; pos += tmp; offset += tmp; - if (offset == PAGE_CACHE_SIZE) + if (offset == PAGE_SIZE) offset = 0; if (!fc->big_writes) @@ -1086,8 +1086,8 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req, static inline unsigned fuse_wr_pages(loff_t pos, size_t len) { return min_t(unsigned, - ((pos + len - 1) >> PAGE_CACHE_SHIFT) - - (pos >> PAGE_CACHE_SHIFT) + 1, + ((pos + len - 1) >> PAGE_SHIFT) - + (pos >> PAGE_SHIFT) + 1, FUSE_MAX_PAGES_PER_REQ); } @@ -1205,8 +1205,8 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) goto out; invalidate_mapping_pages(file->f_mapping, - pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); written += written_buffered; iocb->ki_pos = pos + written_buffered; @@ -1315,8 +1315,8 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; size_t count = iov_iter_count(iter); - pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT; - pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t idx_from = pos >> PAGE_SHIFT; + pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; ssize_t res = 0; struct fuse_req *req; int err = 0; @@ -1466,7 +1466,7 @@ __acquires(fc->lock) { struct fuse_inode *fi = get_fuse_inode(req->inode); struct fuse_write_in *inarg = &req->misc.write.in; - __u64 data_size = req->num_pages * PAGE_CACHE_SIZE; + __u64 data_size = req->num_pages * PAGE_SIZE; if (!fc->connected) goto out_free; @@ -1727,7 +1727,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, list_del(&new_req->writepages_entry); list_for_each_entry(old_req, &fi->writepages, writepages_entry) { BUG_ON(old_req->inode != new_req->inode); - curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT; + curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT; if (curr_index <= page->index && page->index < curr_index + old_req->num_pages) { found = true; @@ -1742,7 +1742,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, new_req->num_pages = 1; for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { BUG_ON(tmp->inode != new_req->inode); - curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT; + curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT; if (tmp->num_pages == 1 && curr_index == page->index) { old_req = tmp; @@ -1799,7 +1799,7 @@ static int fuse_writepages_fill(struct page *page, if (req && req->num_pages && (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || - (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || + (req->num_pages + 1) * PAGE_SIZE > fc->max_write || data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { fuse_writepages_send(data); data->req = NULL; @@ -1924,7 +1924,7 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct fuse_conn *fc = get_fuse_conn(file_inode(file)); struct page *page; loff_t fsize; @@ -1938,15 +1938,15 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, fuse_wait_on_page_writeback(mapping->host, page->index); - if (PageUptodate(page) || len == PAGE_CACHE_SIZE) + if (PageUptodate(page) || len == PAGE_SIZE) goto success; /* * Check if the start this page comes after the end of file, in which * case the readpage can be optimized away. */ fsize = i_size_read(mapping->host); - if (fsize <= (pos & PAGE_CACHE_MASK)) { - size_t off = pos & ~PAGE_CACHE_MASK; + if (fsize <= (pos & PAGE_MASK)) { + size_t off = pos & ~PAGE_MASK; if (off) zero_user_segment(page, 0, off); goto success; @@ -1960,7 +1960,7 @@ success: cleanup: unlock_page(page); - page_cache_release(page); + put_page(page); error: return err; } @@ -1973,16 +1973,16 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, if (!PageUptodate(page)) { /* Zero any unwritten bytes at the end of the page */ - size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK; + size_t endoff = (pos + copied) & ~PAGE_MASK; if (endoff) - zero_user_segment(page, endoff, PAGE_CACHE_SIZE); + zero_user_segment(page, endoff, PAGE_SIZE); SetPageUptodate(page); } fuse_write_update_size(inode, pos + copied); set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 4d69d5c0bedc..1ce67668a8e1 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -339,11 +339,11 @@ int fuse_reverse_inval_inode(struct super_block *sb, u64 nodeid, fuse_invalidate_attr(inode); if (offset >= 0) { - pg_start = offset >> PAGE_CACHE_SHIFT; + pg_start = offset >> PAGE_SHIFT; if (len <= 0) pg_end = -1; else - pg_end = (offset + len - 1) >> PAGE_CACHE_SHIFT; + pg_end = (offset + len - 1) >> PAGE_SHIFT; invalidate_inode_pages2_range(inode->i_mapping, pg_start, pg_end); } @@ -864,7 +864,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) process_init_limits(fc, arg); if (arg->minor >= 6) { - ra_pages = arg->max_readahead / PAGE_CACHE_SIZE; + ra_pages = arg->max_readahead / PAGE_SIZE; if (arg->flags & FUSE_ASYNC_READ) fc->async_read = 1; if (!(arg->flags & FUSE_POSIX_LOCKS)) @@ -901,7 +901,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) if (arg->time_gran && arg->time_gran <= 1000000000) fc->sb->s_time_gran = arg->time_gran; } else { - ra_pages = fc->max_read / PAGE_CACHE_SIZE; + ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; fc->no_flock = 1; } @@ -922,7 +922,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->major = FUSE_KERNEL_VERSION; arg->minor = FUSE_KERNEL_MINOR_VERSION; - arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; + arg->max_readahead = fc->bdi.ra_pages * PAGE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | @@ -955,7 +955,7 @@ static int fuse_bdi_init(struct fuse_conn *fc, struct super_block *sb) int err; fc->bdi.name = "fuse"; - fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; + fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_SIZE; /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB | BDI_CAP_STRICTLIMIT; @@ -1053,8 +1053,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) goto err; #endif } else { - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; } sb->s_magic = FUSE_SUPER_MAGIC; sb->s_op = &fuse_super_operations; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index aa016e4b8bec..1bbbee945f46 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -101,7 +101,7 @@ static int gfs2_writepage_common(struct page *page, struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl))) @@ -109,9 +109,9 @@ static int gfs2_writepage_common(struct page *page, if (current->journal_info) goto redirty; /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (page->index > end_index || (page->index == end_index && !offset)) { - page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE); + page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE); goto out; } return 1; @@ -238,7 +238,7 @@ static int gfs2_write_jdata_pagevec(struct address_space *mapping, { struct inode *inode = mapping->host; struct gfs2_sbd *sdp = GFS2_SB(inode); - unsigned nrblocks = nr_pages * (PAGE_CACHE_SIZE/inode->i_sb->s_blocksize); + unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize); int i; int ret; @@ -366,8 +366,8 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, cycled = 0; end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ @@ -458,7 +458,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) * so we need to supply one here. It doesn't happen often. */ if (unlikely(page->index)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); return 0; } @@ -471,7 +471,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page) if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); - memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); + memset(kaddr + dsize, 0, PAGE_SIZE - dsize); kunmap_atomic(kaddr); flush_dcache_page(page); brelse(dibh); @@ -560,8 +560,8 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, unsigned size) { struct address_space *mapping = ip->i_inode.i_mapping; - unsigned long index = *pos / PAGE_CACHE_SIZE; - unsigned offset = *pos & (PAGE_CACHE_SIZE - 1); + unsigned long index = *pos / PAGE_SIZE; + unsigned offset = *pos & (PAGE_SIZE - 1); unsigned copied = 0; unsigned amt; struct page *page; @@ -569,15 +569,15 @@ int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos, do { amt = size - copied; - if (offset + size > PAGE_CACHE_SIZE) - amt = PAGE_CACHE_SIZE - offset; + if (offset + size > PAGE_SIZE) + amt = PAGE_SIZE - offset; page = read_cache_page(mapping, index, __gfs2_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); p = kmap_atomic(page); memcpy(buf + copied, p + offset, amt); kunmap_atomic(p); - page_cache_release(page); + put_page(page); copied += amt; index++; offset = 0; @@ -651,8 +651,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, unsigned requested = 0; int alloc_required; int error = 0; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + pgoff_t index = pos >> PAGE_SHIFT; + unsigned from = pos & (PAGE_SIZE - 1); struct page *page; gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh); @@ -697,7 +697,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, rblocks += gfs2_rg_blocks(ip, requested); error = gfs2_trans_begin(sdp, rblocks, - PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); + PAGE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; @@ -727,7 +727,7 @@ out: return 0; unlock_page(page); - page_cache_release(page); + put_page(page); gfs2_trans_end(sdp); if (pos + len > ip->i_inode.i_size) @@ -827,7 +827,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh, if (!PageUptodate(page)) SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (copied) { if (inode->i_size < to) @@ -877,7 +877,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); struct buffer_head *dibh; - unsigned int from = pos & (PAGE_CACHE_SIZE - 1); + unsigned int from = pos & (PAGE_SIZE - 1); unsigned int to = from + len; int ret; struct gfs2_trans *tr = current->journal_info; @@ -888,7 +888,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, ret = gfs2_meta_inode_buffer(ip, &dibh); if (unlikely(ret)) { unlock_page(page); - page_cache_release(page); + put_page(page); goto failed; } @@ -992,7 +992,7 @@ static void gfs2_invalidatepage(struct page *page, unsigned int offset, { struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host); unsigned int stop = offset + length; - int partial_page = (offset || length < PAGE_CACHE_SIZE); + int partial_page = (offset || length < PAGE_SIZE); struct buffer_head *bh, *head; unsigned long pos = 0; @@ -1082,7 +1082,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, * the first place, mapping->nr_pages will always be zero. */ if (mapping->nrpages) { - loff_t lstart = offset & ~(PAGE_CACHE_SIZE - 1); + loff_t lstart = offset & ~(PAGE_SIZE - 1); loff_t len = iov_iter_count(iter); loff_t end = PAGE_ALIGN(offset + len) - 1; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 0860f0b5b3f1..24ce1cdd434a 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -75,7 +75,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, dsize = dibh->b_size - sizeof(struct gfs2_dinode); memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); - memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); + memset(kaddr + dsize, 0, PAGE_SIZE - dsize); kunmap(page); SetPageUptodate(page); @@ -98,7 +98,7 @@ static int gfs2_unstuffer_page(struct gfs2_inode *ip, struct buffer_head *dibh, if (release) { unlock_page(page); - page_cache_release(page); + put_page(page); } return 0; @@ -932,8 +932,8 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) { struct inode *inode = mapping->host; struct gfs2_inode *ip = GFS2_I(inode); - unsigned long index = from >> PAGE_CACHE_SHIFT; - unsigned offset = from & (PAGE_CACHE_SIZE-1); + unsigned long index = from >> PAGE_SHIFT; + unsigned offset = from & (PAGE_SIZE-1); unsigned blocksize, iblock, length, pos; struct buffer_head *bh; struct page *page; @@ -945,7 +945,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) blocksize = inode->i_sb->s_blocksize; length = blocksize - (offset & (blocksize - 1)); - iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); + iblock = index << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); @@ -989,7 +989,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from) mark_buffer_dirty(bh); unlock: unlock_page(page); - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index c9384f932975..208efc70ad49 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -354,8 +354,8 @@ static int gfs2_allocate_page_backing(struct page *page) { struct inode *inode = page->mapping->host; struct buffer_head bh; - unsigned long size = PAGE_CACHE_SIZE; - u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + unsigned long size = PAGE_SIZE; + u64 lblock = page->index << (PAGE_SHIFT - inode->i_blkbits); do { bh.b_state = 0; @@ -386,7 +386,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) struct gfs2_sbd *sdp = GFS2_SB(inode); struct gfs2_alloc_parms ap = { .aflags = 0, }; unsigned long last_index; - u64 pos = page->index << PAGE_CACHE_SHIFT; + u64 pos = page->index << PAGE_SHIFT; unsigned int data_blocks, ind_blocks, rblocks; struct gfs2_holder gh; loff_t size; @@ -401,7 +401,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto out; - gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); + gfs2_size_hint(vma->vm_file, pos, PAGE_SIZE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); @@ -411,7 +411,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); set_bit(GIF_SW_PAGED, &ip->i_flags); - if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) { + if (!gfs2_write_alloc_required(ip, pos, PAGE_SIZE)) { lock_page(page); if (!PageUptodate(page) || page->mapping != inode->i_mapping) { ret = -EAGAIN; @@ -424,7 +424,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) goto out_unlock; - gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); + gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); ap.target = data_blocks + ind_blocks; ret = gfs2_quota_lock_check(ip, &ap); if (ret) @@ -447,7 +447,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) lock_page(page); ret = -EINVAL; size = i_size_read(inode); - last_index = (size - 1) >> PAGE_CACHE_SHIFT; + last_index = (size - 1) >> PAGE_SHIFT; /* Check page index against inode size */ if (size == 0 || (page->index > last_index)) goto out_trans_end; @@ -873,7 +873,7 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t rblocks += data_blocks ? data_blocks : 1; error = gfs2_trans_begin(sdp, rblocks, - PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); + PAGE_SIZE/sdp->sd_sb.sb_bsize); if (error) goto out_trans_fail; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index e137d96f1b17..0448524c11bc 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -124,7 +124,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) if (mapping == NULL) mapping = &sdp->sd_aspace; - shift = PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift; + shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; index = blkno >> shift; /* convert block to page */ bufnum = blkno - (index << shift); /* block buf index within page */ @@ -154,7 +154,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) map_bh(bh, sdp->sd_vfs, blkno); unlock_page(page); - page_cache_release(page); + put_page(page); return bh; } diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index a39891344259..ce7d69a2fdc0 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -701,7 +701,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, unsigned to_write = bytes, pg_off = off; int done = 0; - blk = index << (PAGE_CACHE_SHIFT - sdp->sd_sb.sb_bsize_shift); + blk = index << (PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift); boff = off % bsize; page = find_or_create_page(mapping, index, GFP_NOFS); @@ -753,13 +753,13 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index, flush_dcache_page(page); kunmap_atomic(kaddr); unlock_page(page); - page_cache_release(page); + put_page(page); return 0; unlock_out: unlock_page(page); - page_cache_release(page); + put_page(page); return -EIO; } @@ -773,13 +773,13 @@ static int gfs2_write_disk_quota(struct gfs2_inode *ip, struct gfs2_quota *qp, nbytes = sizeof(struct gfs2_quota); - pg_beg = loc >> PAGE_CACHE_SHIFT; - pg_off = loc % PAGE_CACHE_SIZE; + pg_beg = loc >> PAGE_SHIFT; + pg_off = loc % PAGE_SIZE; /* If the quota straddles a page boundary, split the write in two */ - if ((pg_off + nbytes) > PAGE_CACHE_SIZE) { + if ((pg_off + nbytes) > PAGE_SIZE) { pg_oflow = 1; - overflow = (pg_off + nbytes) - PAGE_CACHE_SIZE; + overflow = (pg_off + nbytes) - PAGE_SIZE; } ptr = qp; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 07c0265aa195..99a0bdac8796 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -918,9 +918,8 @@ static int read_rindex_entry(struct gfs2_inode *ip) goto fail; rgd->rd_gl->gl_object = rgd; - rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_CACHE_MASK; - rgd->rd_gl->gl_vm.end = PAGE_CACHE_ALIGN((rgd->rd_addr + - rgd->rd_length) * bsize) - 1; + rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; + rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr; rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED); if (rgd->rd_data > sdp->sd_max_rg_data) diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c index 221719eac5de..d77d844b668b 100644 --- a/fs/hfs/bnode.c +++ b/fs/hfs/bnode.c @@ -278,14 +278,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) mapping = tree->inode->i_mapping; off = (loff_t)cnid * tree->node_size; - block = off >> PAGE_CACHE_SHIFT; - node->page_offset = off & ~PAGE_CACHE_MASK; + block = off >> PAGE_SHIFT; + node->page_offset = off & ~PAGE_MASK; for (i = 0; i < tree->pages_per_bnode; i++) { page = read_mapping_page(mapping, block++, NULL); if (IS_ERR(page)) goto fail; if (PageError(page)) { - page_cache_release(page); + put_page(page); goto fail; } node->page[i] = page; @@ -401,7 +401,7 @@ void hfs_bnode_free(struct hfs_bnode *node) for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) - page_cache_release(node->page[i]); + put_page(node->page[i]); kfree(node); } @@ -429,11 +429,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) pagep = node->page; memset(kmap(*pagep) + node->page_offset, 0, - min((int)PAGE_CACHE_SIZE, (int)tree->node_size)); + min((int)PAGE_SIZE, (int)tree->node_size)); set_page_dirty(*pagep); kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); + memset(kmap(*++pagep), 0, PAGE_SIZE); set_page_dirty(*pagep); kunmap(*pagep); } diff --git a/fs/hfs/btree.c b/fs/hfs/btree.c index 1ab19e660e69..37cdd955eceb 100644 --- a/fs/hfs/btree.c +++ b/fs/hfs/btree.c @@ -116,14 +116,14 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke } tree->node_size_shift = ffs(size) - 1; - tree->pages_per_bnode = (tree->node_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT; kunmap(page); - page_cache_release(page); + put_page(page); return tree; fail_page: - page_cache_release(page); + put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfs_aops; iput(tree->inode); @@ -257,9 +257,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; idx = 0; for (;;) { @@ -279,7 +279,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } } - if (++off >= PAGE_CACHE_SIZE) { + if (++off >= PAGE_SIZE) { kunmap(*pagep); data = kmap(*++pagep); off = 0; @@ -302,9 +302,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; } } @@ -348,9 +348,9 @@ void hfs_bmap_free(struct hfs_bnode *node) len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; - page = node->page[off >> PAGE_CACHE_SHIFT]; + page = node->page[off >> PAGE_SHIFT]; data = kmap(page); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 6686bf39a5b5..cb1e5faa2fb7 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -91,8 +91,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask) if (!tree) return 0; - if (tree->node_size >= PAGE_CACHE_SIZE) { - nidx = page->index >> (tree->node_size_shift - PAGE_CACHE_SHIFT); + if (tree->node_size >= PAGE_SIZE) { + nidx = page->index >> (tree->node_size_shift - PAGE_SHIFT); spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, nidx); if (!node) @@ -105,8 +105,8 @@ static int hfs_releasepage(struct page *page, gfp_t mask) } spin_unlock(&tree->hash_lock); } else { - nidx = page->index << (PAGE_CACHE_SHIFT - tree->node_size_shift); - i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); + nidx = page->index << (PAGE_SHIFT - tree->node_size_shift); + i = 1 << (PAGE_SHIFT - tree->node_size_shift); spin_lock(&tree->hash_lock); do { node = hfs_bnode_findhash(tree, nidx++); diff --git a/fs/hfsplus/bitmap.c b/fs/hfsplus/bitmap.c index d2954451519e..c0ae274c0a22 100644 --- a/fs/hfsplus/bitmap.c +++ b/fs/hfsplus/bitmap.c @@ -13,7 +13,7 @@ #include "hfsplus_fs.h" #include "hfsplus_raw.h" -#define PAGE_CACHE_BITS (PAGE_CACHE_SIZE * 8) +#define PAGE_CACHE_BITS (PAGE_SIZE * 8) int hfsplus_block_allocate(struct super_block *sb, u32 size, u32 offset, u32 *max) diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c index 63924662aaf3..ce014ceb89ef 100644 --- a/fs/hfsplus/bnode.c +++ b/fs/hfsplus/bnode.c @@ -24,16 +24,16 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len) int l; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min_t(int, len, PAGE_CACHE_SIZE - off); + l = min_t(int, len, PAGE_SIZE - off); memcpy(buf, kmap(*pagep) + off, l); kunmap(*pagep); while ((len -= l) != 0) { buf += l; - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memcpy(buf, kmap(*++pagep), l); kunmap(*pagep); } @@ -77,17 +77,17 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len) int l; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min_t(int, len, PAGE_CACHE_SIZE - off); + l = min_t(int, len, PAGE_SIZE - off); memcpy(kmap(*pagep) + off, buf, l); set_page_dirty(*pagep); kunmap(*pagep); while ((len -= l) != 0) { buf += l; - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memcpy(kmap(*++pagep), buf, l); set_page_dirty(*pagep); kunmap(*pagep); @@ -107,16 +107,16 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len) int l; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); - off &= ~PAGE_CACHE_MASK; + pagep = node->page + (off >> PAGE_SHIFT); + off &= ~PAGE_MASK; - l = min_t(int, len, PAGE_CACHE_SIZE - off); + l = min_t(int, len, PAGE_SIZE - off); memset(kmap(*pagep) + off, 0, l); set_page_dirty(*pagep); kunmap(*pagep); while ((len -= l) != 0) { - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memset(kmap(*++pagep), 0, l); set_page_dirty(*pagep); kunmap(*pagep); @@ -136,20 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, tree = src_node->tree; src += src_node->page_offset; dst += dst_node->page_offset; - src_page = src_node->page + (src >> PAGE_CACHE_SHIFT); - src &= ~PAGE_CACHE_MASK; - dst_page = dst_node->page + (dst >> PAGE_CACHE_SHIFT); - dst &= ~PAGE_CACHE_MASK; + src_page = src_node->page + (src >> PAGE_SHIFT); + src &= ~PAGE_MASK; + dst_page = dst_node->page + (dst >> PAGE_SHIFT); + dst &= ~PAGE_MASK; if (src == dst) { - l = min_t(int, len, PAGE_CACHE_SIZE - src); + l = min_t(int, len, PAGE_SIZE - src); memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); while ((len -= l) != 0) { - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memcpy(kmap(*++dst_page), kmap(*++src_page), l); kunmap(*src_page); set_page_dirty(*dst_page); @@ -161,12 +161,12 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst, do { src_ptr = kmap(*src_page) + src; dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < PAGE_CACHE_SIZE - dst) { - l = PAGE_CACHE_SIZE - src; + if (PAGE_SIZE - src < PAGE_SIZE - dst) { + l = PAGE_SIZE - src; src = 0; dst += l; } else { - l = PAGE_CACHE_SIZE - dst; + l = PAGE_SIZE - dst; src += l; dst = 0; } @@ -195,11 +195,11 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) dst += node->page_offset; if (dst > src) { src += len - 1; - src_page = node->page + (src >> PAGE_CACHE_SHIFT); - src = (src & ~PAGE_CACHE_MASK) + 1; + src_page = node->page + (src >> PAGE_SHIFT); + src = (src & ~PAGE_MASK) + 1; dst += len - 1; - dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); - dst = (dst & ~PAGE_CACHE_MASK) + 1; + dst_page = node->page + (dst >> PAGE_SHIFT); + dst = (dst & ~PAGE_MASK) + 1; if (src == dst) { while (src < len) { @@ -208,7 +208,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) set_page_dirty(*dst_page); kunmap(*dst_page); len -= src; - src = PAGE_CACHE_SIZE; + src = PAGE_SIZE; src_page--; dst_page--; } @@ -226,32 +226,32 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) dst_ptr = kmap(*dst_page) + dst; if (src < dst) { l = src; - src = PAGE_CACHE_SIZE; + src = PAGE_SIZE; dst -= l; } else { l = dst; src -= l; - dst = PAGE_CACHE_SIZE; + dst = PAGE_SIZE; } l = min(len, l); memmove(dst_ptr - l, src_ptr - l, l); kunmap(*src_page); set_page_dirty(*dst_page); kunmap(*dst_page); - if (dst == PAGE_CACHE_SIZE) + if (dst == PAGE_SIZE) dst_page--; else src_page--; } while ((len -= l)); } } else { - src_page = node->page + (src >> PAGE_CACHE_SHIFT); - src &= ~PAGE_CACHE_MASK; - dst_page = node->page + (dst >> PAGE_CACHE_SHIFT); - dst &= ~PAGE_CACHE_MASK; + src_page = node->page + (src >> PAGE_SHIFT); + src &= ~PAGE_MASK; + dst_page = node->page + (dst >> PAGE_SHIFT); + dst &= ~PAGE_MASK; if (src == dst) { - l = min_t(int, len, PAGE_CACHE_SIZE - src); + l = min_t(int, len, PAGE_SIZE - src); memmove(kmap(*dst_page) + src, kmap(*src_page) + src, l); kunmap(*src_page); @@ -259,7 +259,7 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) kunmap(*dst_page); while ((len -= l) != 0) { - l = min_t(int, len, PAGE_CACHE_SIZE); + l = min_t(int, len, PAGE_SIZE); memmove(kmap(*++dst_page), kmap(*++src_page), l); kunmap(*src_page); @@ -272,13 +272,13 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len) do { src_ptr = kmap(*src_page) + src; dst_ptr = kmap(*dst_page) + dst; - if (PAGE_CACHE_SIZE - src < - PAGE_CACHE_SIZE - dst) { - l = PAGE_CACHE_SIZE - src; + if (PAGE_SIZE - src < + PAGE_SIZE - dst) { + l = PAGE_SIZE - src; src = 0; dst += l; } else { - l = PAGE_CACHE_SIZE - dst; + l = PAGE_SIZE - dst; src += l; dst = 0; } @@ -444,14 +444,14 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) mapping = tree->inode->i_mapping; off = (loff_t)cnid << tree->node_size_shift; - block = off >> PAGE_CACHE_SHIFT; - node->page_offset = off & ~PAGE_CACHE_MASK; + block = off >> PAGE_SHIFT; + node->page_offset = off & ~PAGE_MASK; for (i = 0; i < tree->pages_per_bnode; block++, i++) { page = read_mapping_page(mapping, block, NULL); if (IS_ERR(page)) goto fail; if (PageError(page)) { - page_cache_release(page); + put_page(page); goto fail; } node->page[i] = page; @@ -569,7 +569,7 @@ void hfs_bnode_free(struct hfs_bnode *node) for (i = 0; i < node->tree->pages_per_bnode; i++) if (node->page[i]) - page_cache_release(node->page[i]); + put_page(node->page[i]); kfree(node); } @@ -597,11 +597,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num) pagep = node->page; memset(kmap(*pagep) + node->page_offset, 0, - min_t(int, PAGE_CACHE_SIZE, tree->node_size)); + min_t(int, PAGE_SIZE, tree->node_size)); set_page_dirty(*pagep); kunmap(*pagep); for (i = 1; i < tree->pages_per_bnode; i++) { - memset(kmap(*++pagep), 0, PAGE_CACHE_SIZE); + memset(kmap(*++pagep), 0, PAGE_SIZE); set_page_dirty(*pagep); kunmap(*pagep); } diff --git a/fs/hfsplus/btree.c b/fs/hfsplus/btree.c index 3345c7553edc..d9d1a36ba826 100644 --- a/fs/hfsplus/btree.c +++ b/fs/hfsplus/btree.c @@ -236,15 +236,15 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id) tree->node_size_shift = ffs(size) - 1; tree->pages_per_bnode = - (tree->node_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + (tree->node_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; kunmap(page); - page_cache_release(page); + put_page(page); return tree; fail_page: - page_cache_release(page); + put_page(page); free_inode: tree->inode->i_mapping->a_ops = &hfsplus_aops; iput(tree->inode); @@ -380,9 +380,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; idx = 0; for (;;) { @@ -403,7 +403,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) } } } - if (++off >= PAGE_CACHE_SIZE) { + if (++off >= PAGE_SIZE) { kunmap(*pagep); data = kmap(*++pagep); off = 0; @@ -426,9 +426,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree) len = hfs_brec_lenoff(node, 0, &off16); off = off16; off += node->page_offset; - pagep = node->page + (off >> PAGE_CACHE_SHIFT); + pagep = node->page + (off >> PAGE_SHIFT); data = kmap(*pagep); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; } } @@ -475,9 +475,9 @@ void hfs_bmap_free(struct hfs_bnode *node) len = hfs_brec_lenoff(node, 0, &off); } off += node->page_offset + nidx / 8; - page = node->page[off >> PAGE_CACHE_SHIFT]; + page = node->page[off >> PAGE_SHIFT]; data = kmap(page); - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; m = 1 << (~nidx & 7); byte = data[off]; if (!(byte & m)) { diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 1a6394cdb54e..b28f39865c3a 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -87,9 +87,9 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) } if (!tree) return 0; - if (tree->node_size >= PAGE_CACHE_SIZE) { + if (tree->node_size >= PAGE_SIZE) { nidx = page->index >> - (tree->node_size_shift - PAGE_CACHE_SHIFT); + (tree->node_size_shift - PAGE_SHIFT); spin_lock(&tree->hash_lock); node = hfs_bnode_findhash(tree, nidx); if (!node) @@ -103,8 +103,8 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) spin_unlock(&tree->hash_lock); } else { nidx = page->index << - (PAGE_CACHE_SHIFT - tree->node_size_shift); - i = 1 << (PAGE_CACHE_SHIFT - tree->node_size_shift); + (PAGE_SHIFT - tree->node_size_shift); + i = 1 << (PAGE_SHIFT - tree->node_size_shift); spin_lock(&tree->hash_lock); do { node = hfs_bnode_findhash(tree, nidx++); diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index 5d54490a136d..c35911362ff9 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -438,7 +438,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent) err = -EFBIG; last_fs_block = sbi->total_blocks - 1; last_fs_page = (last_fs_block << sbi->alloc_blksz_shift) >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if ((last_fs_block > (sector_t)(~0ULL) >> (sbi->alloc_blksz_shift - 9)) || (last_fs_page > (pgoff_t)(~0ULL))) { diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index ab01530b4930..70e445ff0cff 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -220,7 +220,7 @@ check_attr_tree_state_again: index = 0; written = 0; - for (; written < node_size; index++, written += PAGE_CACHE_SIZE) { + for (; written < node_size; index++, written += PAGE_SIZE) { void *kaddr; page = read_mapping_page(mapping, index, NULL); @@ -231,11 +231,11 @@ check_attr_tree_state_again: kaddr = kmap_atomic(page); memcpy(kaddr, buf + written, - min_t(size_t, PAGE_CACHE_SIZE, node_size - written)); + min_t(size_t, PAGE_SIZE, node_size - written)); kunmap_atomic(kaddr); set_page_dirty(page); - page_cache_release(page); + put_page(page); } hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY); diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index d1abbee281d1..7016653f3e41 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -410,12 +410,12 @@ static int hostfs_writepage(struct page *page, struct writeback_control *wbc) struct inode *inode = mapping->host; char *buffer; loff_t base = page_offset(page); - int count = PAGE_CACHE_SIZE; - int end_index = inode->i_size >> PAGE_CACHE_SHIFT; + int count = PAGE_SIZE; + int end_index = inode->i_size >> PAGE_SHIFT; int err; if (page->index >= end_index) - count = inode->i_size & (PAGE_CACHE_SIZE-1); + count = inode->i_size & (PAGE_SIZE-1); buffer = kmap(page); @@ -447,7 +447,7 @@ static int hostfs_readpage(struct file *file, struct page *page) buffer = kmap(page); bytes_read = read_file(FILE_HOSTFS_I(file)->fd, &start, buffer, - PAGE_CACHE_SIZE); + PAGE_SIZE); if (bytes_read < 0) { ClearPageUptodate(page); SetPageError(page); @@ -455,7 +455,7 @@ static int hostfs_readpage(struct file *file, struct page *page) goto out; } - memset(buffer + bytes_read, 0, PAGE_CACHE_SIZE - bytes_read); + memset(buffer + bytes_read, 0, PAGE_SIZE - bytes_read); ClearPageError(page); SetPageUptodate(page); @@ -471,7 +471,7 @@ static int hostfs_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; *pagep = grab_cache_page_write_begin(mapping, index, flags); if (!*pagep) @@ -485,14 +485,14 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; void *buffer; - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); int err; buffer = kmap(page); err = write_file(FILE_HOSTFS_I(file)->fd, &pos, buffer + from, copied); kunmap(page); - if (!PageUptodate(page) && err == PAGE_CACHE_SIZE) + if (!PageUptodate(page) && err == PAGE_SIZE) SetPageUptodate(page); /* @@ -502,7 +502,7 @@ static int hostfs_write_end(struct file *file, struct address_space *mapping, if (err > 0 && (pos > inode->i_size)) inode->i_size = pos; unlock_page(page); - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index e1f465a389d5..4ea71eba40a5 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -213,12 +213,12 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, int i, chunksize; /* Find which 4k chunk and offset with in that chunk */ - i = offset >> PAGE_CACHE_SHIFT; - offset = offset & ~PAGE_CACHE_MASK; + i = offset >> PAGE_SHIFT; + offset = offset & ~PAGE_MASK; while (size) { size_t n; - chunksize = PAGE_CACHE_SIZE; + chunksize = PAGE_SIZE; if (offset) chunksize -= offset; if (chunksize > size) @@ -237,7 +237,7 @@ hugetlbfs_read_actor(struct page *page, unsigned long offset, /* * Support for read() - Find the page attached to f_mapping and copy out the * data. Its *very* similar to do_generic_mapping_read(), we can't use that - * since it has PAGE_CACHE_SIZE assumptions. + * since it has PAGE_SIZE assumptions. */ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { @@ -285,7 +285,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) * We have the page, copy it to user space buffer. */ copied = hugetlbfs_read_actor(page, offset, to, nr); - page_cache_release(page); + put_page(page); } offset += copied; retval += copied; diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c index f311bf084015..2e4e834d1a98 100644 --- a/fs/isofs/compress.c +++ b/fs/isofs/compress.c @@ -26,7 +26,7 @@ #include "zisofs.h" /* This should probably be global. */ -static char zisofs_sink_page[PAGE_CACHE_SIZE]; +static char zisofs_sink_page[PAGE_SIZE]; /* * This contains the zlib memory allocation and the mutex for the @@ -70,11 +70,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, for ( i = 0 ; i < pcount ; i++ ) { if (!pages[i]) continue; - memset(page_address(pages[i]), 0, PAGE_CACHE_SIZE); + memset(page_address(pages[i]), 0, PAGE_SIZE); flush_dcache_page(pages[i]); SetPageUptodate(pages[i]); } - return ((loff_t)pcount) << PAGE_CACHE_SHIFT; + return ((loff_t)pcount) << PAGE_SHIFT; } /* Because zlib is not thread-safe, do all the I/O at the top. */ @@ -121,11 +121,11 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start, if (pages[curpage]) { stream.next_out = page_address(pages[curpage]) + poffset; - stream.avail_out = PAGE_CACHE_SIZE - poffset; + stream.avail_out = PAGE_SIZE - poffset; poffset = 0; } else { stream.next_out = (void *)&zisofs_sink_page; - stream.avail_out = PAGE_CACHE_SIZE; + stream.avail_out = PAGE_SIZE; } } if (!stream.avail_in) { @@ -220,14 +220,14 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, * pages with the data we have anyway... */ start_off = page_offset(pages[full_page]); - end_off = min_t(loff_t, start_off + PAGE_CACHE_SIZE, inode->i_size); + end_off = min_t(loff_t, start_off + PAGE_SIZE, inode->i_size); cstart_block = start_off >> zisofs_block_shift; cend_block = (end_off + (1 << zisofs_block_shift) - 1) >> zisofs_block_shift; - WARN_ON(start_off - (full_page << PAGE_CACHE_SHIFT) != - ((cstart_block << zisofs_block_shift) & PAGE_CACHE_MASK)); + WARN_ON(start_off - (full_page << PAGE_SHIFT) != + ((cstart_block << zisofs_block_shift) & PAGE_MASK)); /* Find the pointer to this specific chunk */ /* Note: we're not using isonum_731() here because the data is known aligned */ @@ -260,10 +260,10 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, ret = zisofs_uncompress_block(inode, block_start, block_end, pcount, pages, poffset, &err); poffset += ret; - pages += poffset >> PAGE_CACHE_SHIFT; - pcount -= poffset >> PAGE_CACHE_SHIFT; - full_page -= poffset >> PAGE_CACHE_SHIFT; - poffset &= ~PAGE_CACHE_MASK; + pages += poffset >> PAGE_SHIFT; + pcount -= poffset >> PAGE_SHIFT; + full_page -= poffset >> PAGE_SHIFT; + poffset &= ~PAGE_MASK; if (err) { brelse(bh); @@ -282,7 +282,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount, if (poffset && *pages) { memset(page_address(*pages) + poffset, 0, - PAGE_CACHE_SIZE - poffset); + PAGE_SIZE - poffset); flush_dcache_page(*pages); SetPageUptodate(*pages); } @@ -302,12 +302,12 @@ static int zisofs_readpage(struct file *file, struct page *page) int i, pcount, full_page; unsigned int zisofs_block_shift = ISOFS_I(inode)->i_format_parm[1]; unsigned int zisofs_pages_per_cblock = - PAGE_CACHE_SHIFT <= zisofs_block_shift ? - (1 << (zisofs_block_shift - PAGE_CACHE_SHIFT)) : 0; + PAGE_SHIFT <= zisofs_block_shift ? + (1 << (zisofs_block_shift - PAGE_SHIFT)) : 0; struct page *pages[max_t(unsigned, zisofs_pages_per_cblock, 1)]; pgoff_t index = page->index, end_index; - end_index = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + end_index = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; /* * If this page is wholly outside i_size we just return zero; * do_generic_file_read() will handle this for us @@ -318,7 +318,7 @@ static int zisofs_readpage(struct file *file, struct page *page) return 0; } - if (PAGE_CACHE_SHIFT <= zisofs_block_shift) { + if (PAGE_SHIFT <= zisofs_block_shift) { /* We have already been given one page, this is the one we must do. */ full_page = index & (zisofs_pages_per_cblock - 1); @@ -351,7 +351,7 @@ static int zisofs_readpage(struct file *file, struct page *page) kunmap(pages[i]); unlock_page(pages[i]); if (i != full_page) - page_cache_release(pages[i]); + put_page(pages[i]); } } diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index bcd2d41b318a..131dedc920d8 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -1021,7 +1021,7 @@ int isofs_get_blocks(struct inode *inode, sector_t iblock, * the page with useless information without generating any * I/O errors. */ - if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { + if (b_off > ((inode->i_size + PAGE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) { printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n", __func__, b_off, (unsigned long long)inode->i_size); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 517f2de784cf..2ad98d6e19f4 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -81,11 +81,11 @@ static void release_buffer_page(struct buffer_head *bh) if (!trylock_page(page)) goto nope; - page_cache_get(page); + get_page(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); - page_cache_release(page); + put_page(page); return; nope: diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index de73a9516a54..435f0b26ac20 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -2221,7 +2221,7 @@ void jbd2_journal_ack_err(journal_t *journal) int jbd2_journal_blocks_per_page(struct inode *inode) { - return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); + return 1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits); } /* diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 01e4652d88f6..67c103867bf8 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2263,7 +2263,7 @@ int jbd2_journal_invalidatepage(journal_t *journal, struct buffer_head *head, *bh, *next; unsigned int stop = offset + length; unsigned int curr_off = 0; - int partial_page = (offset || length < PAGE_CACHE_SIZE); + int partial_page = (offset || length < PAGE_SIZE); int may_free = 1; int ret = 0; @@ -2272,7 +2272,7 @@ int jbd2_journal_invalidatepage(journal_t *journal, if (!page_has_buffers(page)) return 0; - BUG_ON(stop > PAGE_CACHE_SIZE || stop < length); + BUG_ON(stop > PAGE_SIZE || stop < length); /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be diff --git a/fs/jffs2/debug.c b/fs/jffs2/debug.c index 1090eb64b90d..9d26b1b9fc01 100644 --- a/fs/jffs2/debug.c +++ b/fs/jffs2/debug.c @@ -95,15 +95,15 @@ __jffs2_dbg_fragtree_paranoia_check_nolock(struct jffs2_inode_info *f) rather than mucking around with actually reading the node and checking the compression type, which is the real way to tell a hole node. */ - if (frag->ofs & (PAGE_CACHE_SIZE-1) && frag_prev(frag) - && frag_prev(frag)->size < PAGE_CACHE_SIZE && frag_prev(frag)->node) { + if (frag->ofs & (PAGE_SIZE-1) && frag_prev(frag) + && frag_prev(frag)->size < PAGE_SIZE && frag_prev(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x had a previous non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw)); bitched = 1; } - if ((frag->ofs+frag->size) & (PAGE_CACHE_SIZE-1) && frag_next(frag) - && frag_next(frag)->size < PAGE_CACHE_SIZE && frag_next(frag)->node) { + if ((frag->ofs+frag->size) & (PAGE_SIZE-1) && frag_next(frag) + && frag_next(frag)->size < PAGE_SIZE && frag_next(frag)->node) { JFFS2_ERROR("REF_PRISTINE node at 0x%08x (%08x-%08x) had a following non-hole frag in the same page. Tell dwmw2.\n", ref_offset(fn->raw), frag->ofs, frag->ofs+frag->size); bitched = 1; diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index cad86bac3453..0e62dec3effc 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -87,14 +87,15 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) int ret; jffs2_dbg(2, "%s(): ino #%lu, page at offset 0x%lx\n", - __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT); + __func__, inode->i_ino, pg->index << PAGE_SHIFT); BUG_ON(!PageLocked(pg)); pg_buf = kmap(pg); /* FIXME: Can kmap fail? */ - ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE); + ret = jffs2_read_inode_range(c, f, pg_buf, pg->index << PAGE_SHIFT, + PAGE_SIZE); if (ret) { ClearPageUptodate(pg); @@ -137,8 +138,8 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, struct page *pg; struct inode *inode = mapping->host; struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); - pgoff_t index = pos >> PAGE_CACHE_SHIFT; - uint32_t pageofs = index << PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; + uint32_t pageofs = index << PAGE_SHIFT; int ret = 0; pg = grab_cache_page_write_begin(mapping, index, flags); @@ -230,7 +231,7 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, out_page: unlock_page(pg); - page_cache_release(pg); + put_page(pg); return ret; } @@ -245,14 +246,14 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); struct jffs2_raw_inode *ri; - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + copied; unsigned aligned_start = start & ~3; int ret = 0; uint32_t writtenlen = 0; jffs2_dbg(1, "%s(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n", - __func__, inode->i_ino, pg->index << PAGE_CACHE_SHIFT, + __func__, inode->i_ino, pg->index << PAGE_SHIFT, start, end, pg->flags); /* We need to avoid deadlock with page_cache_read() in @@ -261,7 +262,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, to re-lock it. */ BUG_ON(!PageUptodate(pg)); - if (end == PAGE_CACHE_SIZE) { + if (end == PAGE_SIZE) { /* When writing out the end of a page, write out the _whole_ page. This helps to reduce the number of nodes in files which have many short writes, like @@ -275,7 +276,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, jffs2_dbg(1, "%s(): Allocation of raw inode failed\n", __func__); unlock_page(pg); - page_cache_release(pg); + put_page(pg); return -ENOMEM; } @@ -292,7 +293,7 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, kmap(pg); ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + aligned_start, - (pg->index << PAGE_CACHE_SHIFT) + aligned_start, + (pg->index << PAGE_SHIFT) + aligned_start, end - aligned_start, &writtenlen); kunmap(pg); @@ -329,6 +330,6 @@ static int jffs2_write_end(struct file *filp, struct address_space *mapping, jffs2_dbg(1, "%s() returning %d\n", __func__, writtenlen > 0 ? writtenlen : ret); unlock_page(pg); - page_cache_release(pg); + put_page(pg); return writtenlen > 0 ? writtenlen : ret; } diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index bead25ae8fe4..ae2ebb26b446 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -586,8 +586,8 @@ int jffs2_do_fill_super(struct super_block *sb, void *data, int silent) goto out_root; sb->s_maxbytes = 0xFFFFFFFF; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = JFFS2_SUPER_MAGIC; if (!(sb->s_flags & MS_RDONLY)) jffs2_start_garbage_collect_thread(c); @@ -685,7 +685,7 @@ unsigned char *jffs2_gc_fetch_page(struct jffs2_sb_info *c, struct inode *inode = OFNI_EDONI_2SFFJ(f); struct page *pg; - pg = read_cache_page(inode->i_mapping, offset >> PAGE_CACHE_SHIFT, + pg = read_cache_page(inode->i_mapping, offset >> PAGE_SHIFT, (void *)jffs2_do_readpage_unlock, inode); if (IS_ERR(pg)) return (void *)pg; @@ -701,7 +701,7 @@ void jffs2_gc_release_page(struct jffs2_sb_info *c, struct page *pg = (void *)*priv; kunmap(pg); - page_cache_release(pg); + put_page(pg); } static int jffs2_flash_setup(struct jffs2_sb_info *c) { diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c index 7e553f286775..9ed0f26cf023 100644 --- a/fs/jffs2/gc.c +++ b/fs/jffs2/gc.c @@ -552,7 +552,7 @@ static int jffs2_garbage_collect_live(struct jffs2_sb_info *c, struct jffs2_era goto upnout; } /* We found a datanode. Do the GC */ - if((start >> PAGE_CACHE_SHIFT) < ((end-1) >> PAGE_CACHE_SHIFT)) { + if((start >> PAGE_SHIFT) < ((end-1) >> PAGE_SHIFT)) { /* It crosses a page boundary. Therefore, it must be a hole. */ ret = jffs2_garbage_collect_hole(c, jeb, f, fn, start, end); } else { @@ -1192,8 +1192,8 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era struct jffs2_node_frag *frag; uint32_t min, max; - min = start & ~(PAGE_CACHE_SIZE-1); - max = min + PAGE_CACHE_SIZE; + min = start & ~(PAGE_SIZE-1); + max = min + PAGE_SIZE; frag = jffs2_lookup_node_frag(&f->fragtree, start); @@ -1351,7 +1351,7 @@ static int jffs2_garbage_collect_dnode(struct jffs2_sb_info *c, struct jffs2_era cdatalen = min_t(uint32_t, alloclen - sizeof(ri), end - offset); datalen = end - offset; - writebuf = pg_ptr + (offset & (PAGE_CACHE_SIZE -1)); + writebuf = pg_ptr + (offset & (PAGE_SIZE -1)); comprtype = jffs2_compress(c, f, writebuf, &comprbuf, &datalen, &cdatalen); diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index 9a5449bc3afb..b86c78d178c6 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c @@ -90,7 +90,7 @@ uint32_t jffs2_truncate_fragtree(struct jffs2_sb_info *c, struct rb_root *list, /* If the last fragment starts at the RAM page boundary, it is * REF_PRISTINE irrespective of its size. */ - if (frag->node && (frag->ofs & (PAGE_CACHE_SIZE - 1)) == 0) { + if (frag->node && (frag->ofs & (PAGE_SIZE - 1)) == 0) { dbg_fragtree2("marking the last fragment 0x%08x-0x%08x REF_PRISTINE.\n", frag->ofs, frag->ofs + frag->size); frag->node->raw->flash_offset = ref_offset(frag->node->raw) | REF_PRISTINE; @@ -237,7 +237,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r If so, both 'this' and the new node get marked REF_NORMAL so the GC can take a look. */ - if (lastend && (lastend-1) >> PAGE_CACHE_SHIFT == newfrag->ofs >> PAGE_CACHE_SHIFT) { + if (lastend && (lastend-1) >> PAGE_SHIFT == newfrag->ofs >> PAGE_SHIFT) { if (this->node) mark_ref_normal(this->node->raw); mark_ref_normal(newfrag->node->raw); @@ -382,7 +382,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in /* If we now share a page with other nodes, mark either previous or next node REF_NORMAL, as appropriate. */ - if (newfrag->ofs & (PAGE_CACHE_SIZE-1)) { + if (newfrag->ofs & (PAGE_SIZE-1)) { struct jffs2_node_frag *prev = frag_prev(newfrag); mark_ref_normal(fn->raw); @@ -391,7 +391,7 @@ int jffs2_add_full_dnode_to_inode(struct jffs2_sb_info *c, struct jffs2_inode_in mark_ref_normal(prev->node->raw); } - if ((newfrag->ofs+newfrag->size) & (PAGE_CACHE_SIZE-1)) { + if ((newfrag->ofs+newfrag->size) & (PAGE_SIZE-1)) { struct jffs2_node_frag *next = frag_next(newfrag); if (next) { diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c index b634de4c8101..7fb187ab2682 100644 --- a/fs/jffs2/write.c +++ b/fs/jffs2/write.c @@ -172,8 +172,8 @@ struct jffs2_full_dnode *jffs2_write_dnode(struct jffs2_sb_info *c, struct jffs2 beginning of a page and runs to the end of the file, or if it's a hole node, mark it REF_PRISTINE, else REF_NORMAL. */ - if ((je32_to_cpu(ri->dsize) >= PAGE_CACHE_SIZE) || - ( ((je32_to_cpu(ri->offset)&(PAGE_CACHE_SIZE-1))==0) && + if ((je32_to_cpu(ri->dsize) >= PAGE_SIZE) || + ( ((je32_to_cpu(ri->offset)&(PAGE_SIZE-1))==0) && (je32_to_cpu(ri->dsize)+je32_to_cpu(ri->offset) == je32_to_cpu(ri->isize)))) { flash_ofs |= REF_PRISTINE; } else { @@ -366,7 +366,8 @@ int jffs2_write_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f, break; } mutex_lock(&f->sem); - datalen = min_t(uint32_t, writelen, PAGE_CACHE_SIZE - (offset & (PAGE_CACHE_SIZE-1))); + datalen = min_t(uint32_t, writelen, + PAGE_SIZE - (offset & (PAGE_SIZE-1))); cdatalen = min_t(uint32_t, alloclen - sizeof(*ri), datalen); comprtype = jffs2_compress(c, f, buf, &comprbuf, &datalen, &cdatalen); diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index a3eb316b1ac3..b60e015cc757 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -80,7 +80,7 @@ static inline void lock_metapage(struct metapage *mp) static struct kmem_cache *metapage_cache; static mempool_t *metapage_mempool; -#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE) +#define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE) #if MPS_PER_PAGE > 1 @@ -316,7 +316,7 @@ static void last_write_complete(struct page *page) struct metapage *mp; unsigned int offset; - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (mp && test_bit(META_io, &mp->flag)) { if (mp->lsn) @@ -366,12 +366,12 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) int bad_blocks = 0; page_start = (sector_t)page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); BUG_ON(!PageLocked(page)); BUG_ON(PageWriteback(page)); set_page_writeback(page); - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (!mp || !test_bit(META_dirty, &mp->flag)) @@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc) bio = NULL; } else inc_io(page); - xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits; + xlen = (PAGE_SIZE - offset) >> inode->i_blkbits; pblock = metapage_get_blocks(inode, lblock, &xlen); if (!pblock) { printk(KERN_ERR "JFS: metapage_get_blocks failed\n"); @@ -485,7 +485,7 @@ static int metapage_readpage(struct file *fp, struct page *page) struct inode *inode = page->mapping->host; struct bio *bio = NULL; int block_offset; - int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits; + int blocks_per_page = PAGE_SIZE >> inode->i_blkbits; sector_t page_start; /* address of page in fs blocks */ sector_t pblock; int xlen; @@ -494,7 +494,7 @@ static int metapage_readpage(struct file *fp, struct page *page) BUG_ON(!PageLocked(page)); page_start = (sector_t)page->index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); block_offset = 0; while (block_offset < blocks_per_page) { @@ -542,7 +542,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) int ret = 1; int offset; - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (!mp) @@ -568,7 +568,7 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) static void metapage_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { - BUG_ON(offset || length < PAGE_CACHE_SIZE); + BUG_ON(offset || length < PAGE_SIZE); BUG_ON(PageWriteback(page)); @@ -599,10 +599,10 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, inode->i_ino, lblock, absolute); l2bsize = inode->i_blkbits; - l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize; + l2BlocksPerPage = PAGE_SHIFT - l2bsize; page_index = lblock >> l2BlocksPerPage; page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize; - if ((page_offset + size) > PAGE_CACHE_SIZE) { + if ((page_offset + size) > PAGE_SIZE) { jfs_err("MetaData crosses page boundary!!"); jfs_err("lblock = %lx, size = %d", lblock, size); dump_stack(); @@ -621,7 +621,7 @@ struct metapage *__get_metapage(struct inode *inode, unsigned long lblock, mapping = inode->i_mapping; } - if (new && (PSIZE == PAGE_CACHE_SIZE)) { + if (new && (PSIZE == PAGE_SIZE)) { page = grab_cache_page(mapping, page_index); if (!page) { jfs_err("grab_cache_page failed!"); @@ -693,7 +693,7 @@ unlock: void grab_metapage(struct metapage * mp) { jfs_info("grab_metapage: mp = 0x%p", mp); - page_cache_get(mp->page); + get_page(mp->page); lock_page(mp->page); mp->count++; lock_metapage(mp); @@ -706,12 +706,12 @@ void force_metapage(struct metapage *mp) jfs_info("force_metapage: mp = 0x%p", mp); set_bit(META_forcewrite, &mp->flag); clear_bit(META_sync, &mp->flag); - page_cache_get(page); + get_page(page); lock_page(page); set_page_dirty(page); write_one_page(page, 1); clear_bit(META_forcewrite, &mp->flag); - page_cache_release(page); + put_page(page); } void hold_metapage(struct metapage *mp) @@ -726,7 +726,7 @@ void put_metapage(struct metapage *mp) unlock_page(mp->page); return; } - page_cache_get(mp->page); + get_page(mp->page); mp->count++; lock_metapage(mp); unlock_page(mp->page); @@ -746,7 +746,7 @@ void release_metapage(struct metapage * mp) assert(mp->count); if (--mp->count || mp->nohomeok) { unlock_page(page); - page_cache_release(page); + put_page(page); return; } @@ -764,13 +764,13 @@ void release_metapage(struct metapage * mp) drop_metapage(page, mp); unlock_page(page); - page_cache_release(page); + put_page(page); } void __invalidate_metapages(struct inode *ip, s64 addr, int len) { sector_t lblock; - int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits; + int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits; int BlocksPerPage = 1 << l2BlocksPerPage; /* All callers are interested in block device's mapping */ struct address_space *mapping = @@ -788,7 +788,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len) page = find_lock_page(mapping, lblock >> l2BlocksPerPage); if (!page) continue; - for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) { + for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) { mp = page_to_mp(page, offset); if (!mp) continue; @@ -803,7 +803,7 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len) remove_from_logsync(mp); } unlock_page(page); - page_cache_release(page); + put_page(page); } } diff --git a/fs/jfs/jfs_metapage.h b/fs/jfs/jfs_metapage.h index 337e9e51ac06..a869fb4a20d6 100644 --- a/fs/jfs/jfs_metapage.h +++ b/fs/jfs/jfs_metapage.h @@ -106,7 +106,7 @@ static inline void metapage_nohomeok(struct metapage *mp) lock_page(page); if (!mp->nohomeok++) { mark_metapage_dirty(mp); - page_cache_get(page); + get_page(page); wait_on_page_writeback(page); } unlock_page(page); @@ -128,7 +128,7 @@ static inline void metapage_wait_for_io(struct metapage *mp) static inline void _metapage_homeok(struct metapage *mp) { if (!--mp->nohomeok) - page_cache_release(mp->page); + put_page(mp->page); } static inline void metapage_homeok(struct metapage *mp) diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 4f5d85ba8e23..78d599198bf5 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -596,7 +596,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) * Page cache is indexed by long. * I would use MAX_LFS_FILESIZE, but it's only half as big */ - sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, + sb->s_maxbytes = min(((u64) PAGE_SIZE << 32) - 1, (u64)sb->s_maxbytes); #endif sb->s_time_gran = 1; diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index b67dbccdaf88..f73541fbe7af 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -138,8 +138,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic) struct dentry *root; info->sb = sb; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = magic; sb->s_op = &kernfs_sops; sb->s_time_gran = 1; diff --git a/fs/libfs.c b/fs/libfs.c index 0ca80b2af420..f3fa82ce9b70 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -25,7 +25,7 @@ int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, { struct inode *inode = d_inode(dentry); generic_fillattr(inode, stat); - stat->blocks = inode->i_mapping->nrpages << (PAGE_CACHE_SHIFT - 9); + stat->blocks = inode->i_mapping->nrpages << (PAGE_SHIFT - 9); return 0; } EXPORT_SYMBOL(simple_getattr); @@ -33,7 +33,7 @@ EXPORT_SYMBOL(simple_getattr); int simple_statfs(struct dentry *dentry, struct kstatfs *buf) { buf->f_type = dentry->d_sb->s_magic; - buf->f_bsize = PAGE_CACHE_SIZE; + buf->f_bsize = PAGE_SIZE; buf->f_namelen = NAME_MAX; return 0; } @@ -395,7 +395,7 @@ int simple_write_begin(struct file *file, struct address_space *mapping, struct page *page; pgoff_t index; - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -403,10 +403,10 @@ int simple_write_begin(struct file *file, struct address_space *mapping, *pagep = page; - if (!PageUptodate(page) && (len != PAGE_CACHE_SIZE)) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + if (!PageUptodate(page) && (len != PAGE_SIZE)) { + unsigned from = pos & (PAGE_SIZE - 1); - zero_user_segments(page, 0, from, from + len, PAGE_CACHE_SIZE); + zero_user_segments(page, 0, from, from + len, PAGE_SIZE); } return 0; } @@ -442,7 +442,7 @@ int simple_write_end(struct file *file, struct address_space *mapping, /* zero the stale part of the page if we did a short copy */ if (copied < len) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + unsigned from = pos & (PAGE_SIZE - 1); zero_user(page, from + copied, len - copied); } @@ -458,7 +458,7 @@ int simple_write_end(struct file *file, struct address_space *mapping, set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -477,8 +477,8 @@ int simple_fill_super(struct super_block *s, unsigned long magic, struct dentry *dentry; int i; - s->s_blocksize = PAGE_CACHE_SIZE; - s->s_blocksize_bits = PAGE_CACHE_SHIFT; + s->s_blocksize = PAGE_SIZE; + s->s_blocksize_bits = PAGE_SHIFT; s->s_magic = magic; s->s_op = &simple_super_operations; s->s_time_gran = 1; @@ -994,12 +994,12 @@ int generic_check_addressable(unsigned blocksize_bits, u64 num_blocks) { u64 last_fs_block = num_blocks - 1; u64 last_fs_page = - last_fs_block >> (PAGE_CACHE_SHIFT - blocksize_bits); + last_fs_block >> (PAGE_SHIFT - blocksize_bits); if (unlikely(num_blocks == 0)) return 0; - if ((blocksize_bits < 9) || (blocksize_bits > PAGE_CACHE_SHIFT)) + if ((blocksize_bits < 9) || (blocksize_bits > PAGE_SHIFT)) return -EINVAL; if ((last_fs_block > (sector_t)(~0ULL) >> (blocksize_bits - 9)) || diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index a709d80c8ebc..cc26f8f215f5 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c @@ -64,7 +64,7 @@ static void writeseg_end_io(struct bio *bio) bio_for_each_segment_all(bvec, bio, i) { end_page_writeback(bvec->bv_page); - page_cache_release(bvec->bv_page); + put_page(bvec->bv_page); } bio_put(bio); if (atomic_dec_and_test(&super->s_pending_writes)) diff --git a/fs/logfs/dev_mtd.c b/fs/logfs/dev_mtd.c index 9c501449450d..b76a62b1978f 100644 --- a/fs/logfs/dev_mtd.c +++ b/fs/logfs/dev_mtd.c @@ -46,9 +46,9 @@ static int loffs_mtd_write(struct super_block *sb, loff_t ofs, size_t len, BUG_ON((ofs >= mtd->size) || (len > mtd->size - ofs)); BUG_ON(ofs != (ofs >> super->s_writeshift) << super->s_writeshift); - BUG_ON(len > PAGE_CACHE_SIZE); - page_start = ofs & PAGE_CACHE_MASK; - page_end = PAGE_CACHE_ALIGN(ofs + len) - 1; + BUG_ON(len > PAGE_SIZE); + page_start = ofs & PAGE_MASK; + page_end = PAGE_ALIGN(ofs + len) - 1; ret = mtd_write(mtd, ofs, len, &retlen, buf); if (ret || (retlen != len)) return -EIO; @@ -82,7 +82,7 @@ static int logfs_mtd_erase_mapping(struct super_block *sb, loff_t ofs, if (!page) continue; memset(page_address(page), 0xFF, PAGE_SIZE); - page_cache_release(page); + put_page(page); } return 0; } @@ -195,7 +195,7 @@ static int __logfs_mtd_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, err = loffs_mtd_write(sb, page->index << PAGE_SHIFT, PAGE_SIZE, page_address(page)); unlock_page(page); - page_cache_release(page); + put_page(page); if (err) return err; } diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 542468e9bfb4..ddbed2be5366 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -183,7 +183,7 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry) if (name->len != be16_to_cpu(dd->namelen) || memcmp(name->name, dd->name, name->len)) { kunmap_atomic(dd); - page_cache_release(page); + put_page(page); continue; } @@ -238,7 +238,7 @@ static int logfs_unlink(struct inode *dir, struct dentry *dentry) return PTR_ERR(page); } index = page->index; - page_cache_release(page); + put_page(page); mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(dir, ta); @@ -316,7 +316,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx) be16_to_cpu(dd->namelen), be64_to_cpu(dd->ino), dd->type); kunmap(page); - page_cache_release(page); + put_page(page); if (full) break; } @@ -349,7 +349,7 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, dd = kmap_atomic(page); ino = be64_to_cpu(dd->ino); kunmap_atomic(dd); - page_cache_release(page); + put_page(page); inode = logfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) @@ -392,7 +392,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry, err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); - page_cache_release(page); + put_page(page); if (!err) grow_dir(dir, index); return err; @@ -561,7 +561,7 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry, map = kmap_atomic(page); memcpy(dd, map, sizeof(*dd)); kunmap_atomic(map); - page_cache_release(page); + put_page(page); return 0; } diff --git a/fs/logfs/file.c b/fs/logfs/file.c index 61eaeb1b6cac..f01ddfb1a03b 100644 --- a/fs/logfs/file.c +++ b/fs/logfs/file.c @@ -15,21 +15,21 @@ static int logfs_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; struct page *page; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; *pagep = page; - if ((len == PAGE_CACHE_SIZE) || PageUptodate(page)) + if ((len == PAGE_SIZE) || PageUptodate(page)) return 0; - if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + if ((pos & PAGE_MASK) >= i_size_read(inode)) { + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + len; /* Reading beyond i_size is simple: memset to zero */ - zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); + zero_user_segments(page, 0, start, end, PAGE_SIZE); return 0; } return logfs_readpage_nolock(page); @@ -41,11 +41,11 @@ static int logfs_write_end(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; pgoff_t index = page->index; - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned start = pos & (PAGE_SIZE - 1); unsigned end = start + copied; int ret = 0; - BUG_ON(PAGE_CACHE_SIZE != inode->i_sb->s_blocksize); + BUG_ON(PAGE_SIZE != inode->i_sb->s_blocksize); BUG_ON(page->index > I3_BLOCKS); if (copied < len) { @@ -61,8 +61,8 @@ static int logfs_write_end(struct file *file, struct address_space *mapping, if (copied == 0) goto out; /* FIXME: do we need to update inode? */ - if (i_size_read(inode) < (index << PAGE_CACHE_SHIFT) + end) { - i_size_write(inode, (index << PAGE_CACHE_SHIFT) + end); + if (i_size_read(inode) < (index << PAGE_SHIFT) + end) { + i_size_write(inode, (index << PAGE_SHIFT) + end); mark_inode_dirty_sync(inode); } @@ -75,7 +75,7 @@ static int logfs_write_end(struct file *file, struct address_space *mapping, } out: unlock_page(page); - page_cache_release(page); + put_page(page); return ret ? ret : copied; } @@ -118,7 +118,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; loff_t i_size = i_size_read(inode); - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; + pgoff_t end_index = i_size >> PAGE_SHIFT; unsigned offset; u64 bix; level_t level; @@ -142,7 +142,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) return __logfs_writepage(page); /* Is the page fully outside i_size? (truncate in progress) */ - offset = i_size & (PAGE_CACHE_SIZE-1); + offset = i_size & (PAGE_SIZE-1); if (bix > end_index || offset == 0) { unlock_page(page); return 0; /* don't care */ @@ -155,7 +155,7 @@ static int logfs_writepage(struct page *page, struct writeback_control *wbc) * the page size, the remaining memory is zeroed when mapped, and * writes to that region are not written out to the file." */ - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); return __logfs_writepage(page); } diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c index 20973c9e52f8..3fb8c6d67303 100644 --- a/fs/logfs/readwrite.c +++ b/fs/logfs/readwrite.c @@ -281,7 +281,7 @@ static struct page *logfs_get_read_page(struct inode *inode, u64 bix, static void logfs_put_read_page(struct page *page) { unlock_page(page); - page_cache_release(page); + put_page(page); } static void logfs_lock_write_page(struct page *page) @@ -323,7 +323,7 @@ repeat: return NULL; err = add_to_page_cache_lru(page, mapping, index, GFP_NOFS); if (unlikely(err)) { - page_cache_release(page); + put_page(page); if (err == -EEXIST) goto repeat; return NULL; @@ -342,7 +342,7 @@ static void logfs_unlock_write_page(struct page *page) static void logfs_put_write_page(struct page *page) { logfs_unlock_write_page(page); - page_cache_release(page); + put_page(page); } static struct page *logfs_get_page(struct inode *inode, u64 bix, level_t level, @@ -562,7 +562,7 @@ static void indirect_free_block(struct super_block *sb, if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); set_page_private(page, 0); } __free_block(sb, block); @@ -655,7 +655,7 @@ static void alloc_data_block(struct inode *inode, struct page *page) block->page = page; SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long) block); block->ops = &indirect_block_ops; @@ -709,7 +709,7 @@ static u64 block_get_pointer(struct page *page, int index) static int logfs_read_empty(struct page *page) { - zero_user_segment(page, 0, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_SIZE); return 0; } @@ -1660,7 +1660,7 @@ static int truncate_data_block(struct inode *inode, struct page *page, if (err) return err; - zero_user_segment(page, size - pageofs, PAGE_CACHE_SIZE); + zero_user_segment(page, size - pageofs, PAGE_SIZE); return logfs_segment_write(inode, page, shadow); } @@ -1919,7 +1919,7 @@ static void move_page_to_inode(struct inode *inode, struct page *page) block->page = NULL; if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); set_page_private(page, 0); } } @@ -1940,7 +1940,7 @@ static void move_inode_to_page(struct page *page, struct inode *inode) if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long) block); } @@ -1971,7 +1971,7 @@ int logfs_read_inode(struct inode *inode) logfs_disk_to_inode(di, inode); kunmap_atomic(di); move_page_to_inode(inode, page); - page_cache_release(page); + put_page(page); return 0; } diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c index d270e4b2ab6b..1efd6055f4b0 100644 --- a/fs/logfs/segment.c +++ b/fs/logfs/segment.c @@ -90,9 +90,9 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len, if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); } - page_cache_release(page); + put_page(page); buf += copylen; len -= copylen; @@ -117,9 +117,9 @@ static void pad_partial_page(struct logfs_area *area) memset(page_address(page) + offset, 0xff, len); if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); } - page_cache_release(page); + put_page(page); } } @@ -129,20 +129,20 @@ static void pad_full_pages(struct logfs_area *area) struct logfs_super *super = logfs_super(sb); u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes); u32 len = super->s_segsize - area->a_used_bytes; - pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT; - pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT; + pgoff_t index = PAGE_ALIGN(ofs) >> PAGE_SHIFT; + pgoff_t no_indizes = len >> PAGE_SHIFT; struct page *page; while (no_indizes) { page = get_mapping_page(sb, index, 0); BUG_ON(!page); /* FIXME: reserve a pool */ SetPageUptodate(page); - memset(page_address(page), 0xff, PAGE_CACHE_SIZE); + memset(page_address(page), 0xff, PAGE_SIZE); if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); } - page_cache_release(page); + put_page(page); index++; no_indizes--; } @@ -411,7 +411,7 @@ int wbuf_read(struct super_block *sb, u64 ofs, size_t len, void *buf) if (IS_ERR(page)) return PTR_ERR(page); memcpy(buf, page_address(page) + offset, copylen); - page_cache_release(page); + put_page(page); buf += copylen; len -= copylen; @@ -499,7 +499,7 @@ static void move_btree_to_page(struct inode *inode, struct page *page, if (!PagePrivate(page)) { SetPagePrivate(page); - page_cache_get(page); + get_page(page); set_page_private(page, (unsigned long) block); } block->ops = &indirect_block_ops; @@ -554,7 +554,7 @@ void move_page_to_btree(struct page *page) if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); set_page_private(page, 0); } block->ops = &btree_block_ops; @@ -723,9 +723,9 @@ void freeseg(struct super_block *sb, u32 segno) continue; if (PagePrivate(page)) { ClearPagePrivate(page); - page_cache_release(page); + put_page(page); } - page_cache_release(page); + put_page(page); } } diff --git a/fs/logfs/super.c b/fs/logfs/super.c index 54360293bcb5..5751082dba52 100644 --- a/fs/logfs/super.c +++ b/fs/logfs/super.c @@ -48,7 +48,7 @@ void emergency_read_end(struct page *page) if (page == emergency_page) mutex_unlock(&emergency_mutex); else - page_cache_release(page); + put_page(page); } static void dump_segfile(struct super_block *sb) @@ -206,7 +206,7 @@ static int write_one_sb(struct super_block *sb, logfs_set_segment_erased(sb, segno, ec, 0); logfs_write_ds(sb, ds, segno, ec); err = super->s_devops->write_sb(sb, page); - page_cache_release(page); + put_page(page); return err; } @@ -366,24 +366,24 @@ static struct page *find_super_block(struct super_block *sb) return NULL; last = super->s_devops->find_last_sb(sb, &super->s_sb_ofs[1]); if (!last || IS_ERR(last)) { - page_cache_release(first); + put_page(first); return NULL; } if (!logfs_check_ds(page_address(first))) { - page_cache_release(last); + put_page(last); return first; } /* First one didn't work, try the second superblock */ if (!logfs_check_ds(page_address(last))) { - page_cache_release(first); + put_page(first); return last; } /* Neither worked, sorry folks */ - page_cache_release(first); - page_cache_release(last); + put_page(first); + put_page(last); return NULL; } @@ -425,7 +425,7 @@ static int __logfs_read_sb(struct super_block *sb) super->s_data_levels = ds->ds_data_levels; super->s_total_levels = super->s_ifile_levels + super->s_iblock_levels + super->s_data_levels; - page_cache_release(page); + put_page(page); return 0; } diff --git a/fs/minix/dir.c b/fs/minix/dir.c index d19ac258105a..33957c07cd11 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -28,7 +28,7 @@ const struct file_operations minix_dir_operations = { static inline void dir_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* @@ -38,10 +38,10 @@ static inline void dir_put_page(struct page *page) static unsigned minix_last_byte(struct inode *inode, unsigned long page_nr) { - unsigned last_byte = PAGE_CACHE_SIZE; + unsigned last_byte = PAGE_SIZE; - if (page_nr == (inode->i_size >> PAGE_CACHE_SHIFT)) - last_byte = inode->i_size & (PAGE_CACHE_SIZE - 1); + if (page_nr == (inode->i_size >> PAGE_SHIFT)) + last_byte = inode->i_size & (PAGE_SIZE - 1); return last_byte; } @@ -92,8 +92,8 @@ static int minix_readdir(struct file *file, struct dir_context *ctx) if (pos >= inode->i_size) return 0; - offset = pos & ~PAGE_CACHE_MASK; - n = pos >> PAGE_CACHE_SHIFT; + offset = pos & ~PAGE_MASK; + n = pos >> PAGE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *p, *kaddr, *limit; @@ -229,7 +229,7 @@ int minix_add_link(struct dentry *dentry, struct inode *inode) lock_page(page); kaddr = (char*)page_address(page); dir_end = kaddr + minix_last_byte(dir, n); - limit = kaddr + PAGE_CACHE_SIZE - sbi->s_dirsize; + limit = kaddr + PAGE_SIZE - sbi->s_dirsize; for (p = kaddr; p <= limit; p = minix_next_entry(p, sbi)) { de = (minix_dirent *)p; de3 = (minix3_dirent *)p; @@ -327,7 +327,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) } kaddr = kmap_atomic(page); - memset(kaddr, 0, PAGE_CACHE_SIZE); + memset(kaddr, 0, PAGE_SIZE); if (sbi->s_version == MINIX_V3) { minix3_dirent *de3 = (minix3_dirent *)kaddr; @@ -350,7 +350,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir) err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/minix/namei.c b/fs/minix/namei.c index a795a11e50c7..2887d1d95ce2 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c @@ -243,11 +243,11 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/mpage.c b/fs/mpage.c index 6bd9fd90964e..eedc644b78d7 100644 --- a/fs/mpage.c +++ b/fs/mpage.c @@ -107,7 +107,7 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) * don't make any buffers if there is only one buffer on * the page and the page just needs to be set up to date */ - if (inode->i_blkbits == PAGE_CACHE_SHIFT && + if (inode->i_blkbits == PAGE_SHIFT && buffer_uptodate(bh)) { SetPageUptodate(page); return; @@ -145,7 +145,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, { struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; - const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; sector_t block_in_file; sector_t last_block; @@ -162,7 +162,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; if (last_block > last_block_in_file) @@ -249,7 +249,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, } if (first_hole != blocks_per_page) { - zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE); + zero_user_segment(page, first_hole << blkbits, PAGE_SIZE); if (first_hole == 0) { SetPageUptodate(page); unlock_page(page); @@ -331,7 +331,7 @@ confused: * * then this code just gives up and calls the buffer_head-based read function. * It does handle a page which has holes at the end - that is a common case: - * the end-of-file on blocksize < PAGE_CACHE_SIZE setups. + * the end-of-file on blocksize < PAGE_SIZE setups. * * BH_Boundary explanation: * @@ -380,7 +380,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, &first_logical_block, get_block, gfp); } - page_cache_release(page); + put_page(page); } BUG_ON(!list_empty(pages)); if (bio) @@ -472,7 +472,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, struct inode *inode = page->mapping->host; const unsigned blkbits = inode->i_blkbits; unsigned long end_index; - const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits; + const unsigned blocks_per_page = PAGE_SIZE >> blkbits; sector_t last_block; sector_t block_in_file; sector_t blocks[MAX_BUF_PER_PAGE]; @@ -542,7 +542,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, * The page has no buffers: map it to disk */ BUG_ON(!PageUptodate(page)); - block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); + block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = (i_size - 1) >> blkbits; map_bh.b_page = page; for (page_block = 0; page_block < blocks_per_page; ) { @@ -574,7 +574,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc, first_unmapped = page_block; page_is_mapped: - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; if (page->index >= end_index) { /* * The page straddles i_size. It must be zeroed out on each @@ -584,11 +584,11 @@ page_is_mapped: * is zeroed when mapped, and writes to that region are not * written out to the file." */ - unsigned offset = i_size & (PAGE_CACHE_SIZE - 1); + unsigned offset = i_size & (PAGE_SIZE - 1); if (page->index > end_index || !offset) goto confused; - zero_user_segment(page, offset, PAGE_CACHE_SIZE); + zero_user_segment(page, offset, PAGE_SIZE); } /* diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index b7f8eaeea5d8..bfdad003ee56 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c @@ -510,7 +510,7 @@ static int ncp_readdir(struct file *file, struct dir_context *ctx) kunmap(ctl.page); SetPageUptodate(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); ctl.page = NULL; } ctl.idx = 0; @@ -520,7 +520,7 @@ invalid_cache: if (ctl.page) { kunmap(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); ctl.page = NULL; } ctl.cache = cache; @@ -554,14 +554,14 @@ finished: kunmap(ctl.page); SetPageUptodate(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); } if (page) { cache->head = ctl.head; kunmap(page); SetPageUptodate(page); unlock_page(page); - page_cache_release(page); + put_page(page); } out: return result; @@ -649,7 +649,7 @@ ncp_fill_cache(struct file *file, struct dir_context *ctx, kunmap(ctl.page); SetPageUptodate(ctl.page); unlock_page(ctl.page); - page_cache_release(ctl.page); + put_page(ctl.page); } ctl.cache = NULL; ctl.idx -= NCP_DIRCACHE_SIZE; diff --git a/fs/ncpfs/ncplib_kernel.h b/fs/ncpfs/ncplib_kernel.h index 5233fbc1747a..17cfb743b5bf 100644 --- a/fs/ncpfs/ncplib_kernel.h +++ b/fs/ncpfs/ncplib_kernel.h @@ -191,7 +191,7 @@ struct ncp_cache_head { int eof; }; -#define NCP_DIRCACHE_SIZE ((int)(PAGE_CACHE_SIZE/sizeof(struct dentry *))) +#define NCP_DIRCACHE_SIZE ((int)(PAGE_SIZE/sizeof(struct dentry *))) union ncp_dir_cache { struct ncp_cache_head head; struct dentry *dentry[NCP_DIRCACHE_SIZE]; diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 02e4d87d2ed3..17a42e4eb872 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -231,7 +231,7 @@ bl_read_pagelist(struct nfs_pgio_header *header) size_t bytes_left = header->args.count; unsigned int pg_offset = header->args.pgbase, pg_len; struct page **pages = header->args.pages; - int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; + int pg_index = header->args.pgbase >> PAGE_SHIFT; const bool is_dio = (header->dreq != NULL); struct blk_plug plug; int i; @@ -263,13 +263,13 @@ bl_read_pagelist(struct nfs_pgio_header *header) } if (is_dio) { - if (pg_offset + bytes_left > PAGE_CACHE_SIZE) - pg_len = PAGE_CACHE_SIZE - pg_offset; + if (pg_offset + bytes_left > PAGE_SIZE) + pg_len = PAGE_SIZE - pg_offset; else pg_len = bytes_left; } else { BUG_ON(pg_offset != 0); - pg_len = PAGE_CACHE_SIZE; + pg_len = PAGE_SIZE; } if (is_hole(&be)) { @@ -339,9 +339,9 @@ static void bl_write_cleanup(struct work_struct *work) if (likely(!hdr->pnfs_error)) { struct pnfs_block_layout *bl = BLK_LSEG2EXT(hdr->lseg); - u64 start = hdr->args.offset & (loff_t)PAGE_CACHE_MASK; + u64 start = hdr->args.offset & (loff_t)PAGE_MASK; u64 end = (hdr->args.offset + hdr->args.count + - PAGE_CACHE_SIZE - 1) & (loff_t)PAGE_CACHE_MASK; + PAGE_SIZE - 1) & (loff_t)PAGE_MASK; ext_tree_mark_written(bl, start >> SECTOR_SHIFT, (end - start) >> SECTOR_SHIFT); @@ -373,7 +373,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) loff_t offset = header->args.offset; size_t count = header->args.count; struct page **pages = header->args.pages; - int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; + int pg_index = header->args.pgbase >> PAGE_SHIFT; unsigned int pg_len; struct blk_plug plug; int i; @@ -392,7 +392,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) blk_start_plug(&plug); /* we always write out the whole page */ - offset = offset & (loff_t)PAGE_CACHE_MASK; + offset = offset & (loff_t)PAGE_MASK; isect = offset >> SECTOR_SHIFT; for (i = pg_index; i < header->page_array.npages; i++) { @@ -408,7 +408,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync) extent_length = be.be_length - (isect - be.be_f_offset); } - pg_len = PAGE_CACHE_SIZE; + pg_len = PAGE_SIZE; bio = do_add_page_to_bio(bio, header->page_array.npages - i, WRITE, isect, pages[i], &map, &be, bl_end_io_write, par, @@ -820,7 +820,7 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) pgoff_t end; /* Optimize common case that writes from 0 to end of file */ - end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE); + end = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); if (end != inode->i_mapping->nrpages) { rcu_read_lock(); end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX); @@ -828,9 +828,9 @@ static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx) } if (!end) - return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT); + return i_size_read(inode) - (idx << PAGE_SHIFT); else - return (end - idx) << PAGE_CACHE_SHIFT; + return (end - idx) << PAGE_SHIFT; } static void diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index bc21205309e0..18e6fd0b9506 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -40,8 +40,8 @@ #include "../pnfs.h" #include "../netns.h" -#define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT) -#define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT) +#define PAGE_CACHE_SECTORS (PAGE_SIZE >> SECTOR_SHIFT) +#define PAGE_CACHE_SECTOR_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) #define SECTOR_SIZE (1 << SECTOR_SHIFT) struct pnfs_block_dev; diff --git a/fs/nfs/client.c b/fs/nfs/client.c index d6d5d2a48e83..0c96528db94a 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -736,7 +736,7 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) server->rsize = NFS_MAX_FILE_IO_SIZE; - server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + server->rpages = (server->rsize + PAGE_SIZE - 1) >> PAGE_SHIFT; server->backing_dev_info.name = "nfs"; server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; @@ -745,13 +745,13 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->wsize = max_rpc_payload; if (server->wsize > NFS_MAX_FILE_IO_SIZE) server->wsize = NFS_MAX_FILE_IO_SIZE; - server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + server->wpages = (server->wsize + PAGE_SIZE - 1) >> PAGE_SHIFT; server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); - if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) - server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; + if (server->dtsize > PAGE_SIZE * NFS_MAX_READDIR_PAGES) + server->dtsize = PAGE_SIZE * NFS_MAX_READDIR_PAGES; if (server->dtsize > server->rsize) server->dtsize = server->rsize; diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4bfa7d8bcade..33eb81738d03 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -377,7 +377,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc, again: timestamp = jiffies; gencount = nfs_inc_attr_generation_counter(); - error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, entry->cookie, pages, + error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages, NFS_SERVER(inode)->dtsize, desc->plus); if (error < 0) { /* We requested READDIRPLUS, but the server doesn't grok it */ @@ -560,7 +560,7 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en count++; if (desc->plus != 0) - nfs_prime_dcache(desc->file->f_path.dentry, entry); + nfs_prime_dcache(file_dentry(desc->file), entry); status = nfs_readdir_add_to_array(entry, page); if (status != 0) @@ -707,7 +707,7 @@ void cache_page_release(nfs_readdir_descriptor_t *desc) { if (!desc->page->mapping) nfs_readdir_clear_array(desc->page); - page_cache_release(desc->page); + put_page(desc->page); desc->page = NULL; } @@ -864,7 +864,7 @@ static bool nfs_dir_mapping_need_revalidate(struct inode *dir) */ static int nfs_readdir(struct file *file, struct dir_context *ctx) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); nfs_readdir_descriptor_t my_desc, *desc = &my_desc; @@ -1923,7 +1923,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) * add_to_page_cache_lru() grabs an extra page refcount. * Drop it here to avoid leaking this page later. */ - page_cache_release(page); + put_page(page); } else __free_page(page); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 7a0cfd3266e5..c93826e4a8c6 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -269,7 +269,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages) { unsigned int i; for (i = 0; i < npages; i++) - page_cache_release(pages[i]); + put_page(pages[i]); } void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, @@ -1003,7 +1003,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) iov_iter_count(iter)); pos = iocb->ki_pos; - end = (pos + iov_iter_count(iter) - 1) >> PAGE_CACHE_SHIFT; + end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT; inode_lock(inode); @@ -1013,7 +1013,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) if (mapping->nrpages) { result = invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, end); + pos >> PAGE_SHIFT, end); if (result) goto out_unlock; } @@ -1042,7 +1042,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, end); + pos >> PAGE_SHIFT, end); } inode_unlock(inode); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 89bf093d342a..be01095b97ae 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -320,7 +320,7 @@ static int nfs_want_read_modify_write(struct file *file, struct page *page, loff_t pos, unsigned len) { unsigned int pglen = nfs_page_length(page); - unsigned int offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned int offset = pos & (PAGE_SIZE - 1); unsigned int end = offset + len; if (pnfs_ld_read_whole_page(file->f_mapping->host)) { @@ -351,7 +351,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { int ret; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int once_thru = 0; @@ -380,12 +380,12 @@ start: ret = nfs_flush_incompatible(file, page); if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); } else if (!once_thru && nfs_want_read_modify_write(file, page, pos, len)) { once_thru = 1; ret = nfs_readpage(file, page); - page_cache_release(page); + put_page(page); if (!ret) goto start; } @@ -396,7 +396,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *fsdata) { - unsigned offset = pos & (PAGE_CACHE_SIZE - 1); + unsigned offset = pos & (PAGE_SIZE - 1); struct nfs_open_context *ctx = nfs_file_open_context(file); int status; @@ -413,20 +413,20 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, if (pglen == 0) { zero_user_segments(page, 0, offset, - end, PAGE_CACHE_SIZE); + end, PAGE_SIZE); SetPageUptodate(page); } else if (end >= pglen) { - zero_user_segment(page, end, PAGE_CACHE_SIZE); + zero_user_segment(page, end, PAGE_SIZE); if (offset == 0) SetPageUptodate(page); } else - zero_user_segment(page, pglen, PAGE_CACHE_SIZE); + zero_user_segment(page, pglen, PAGE_SIZE); } status = nfs_updatepage(file, page, offset, copied); unlock_page(page); - page_cache_release(page); + put_page(page); if (status < 0) return status; @@ -454,7 +454,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset, dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %u, %u)\n", page, offset, length); - if (offset != 0 || length < PAGE_CACHE_SIZE) + if (offset != 0 || length < PAGE_SIZE) return; /* Cancel any unstarted writes on this page */ nfs_wb_page_cancel(page_file_mapping(page)->host, page); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 33d18c411905..738c84a42eb0 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -940,7 +940,7 @@ int nfs_open(struct inode *inode, struct file *filp) { struct nfs_open_context *ctx; - ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); + ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode); if (IS_ERR(ctx)) return PTR_ERR(ctx); nfs_file_set_open_context(filp, ctx); diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 565f8135ae1f..f1d1d2c472e9 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -638,11 +638,11 @@ unsigned int nfs_page_length(struct page *page) if (i_size > 0) { pgoff_t page_index = page_file_index(page); - pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + pgoff_t end_index = (i_size - 1) >> PAGE_SHIFT; if (page_index < end_index) - return PAGE_CACHE_SIZE; + return PAGE_SIZE; if (page_index == end_index) - return ((i_size - 1) & ~PAGE_CACHE_MASK) + 1; + return ((i_size - 1) & ~PAGE_MASK) + 1; } return 0; } diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 22c35abbee9d..d0390516467c 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -26,7 +26,7 @@ static int nfs4_file_open(struct inode *inode, struct file *filp) { struct nfs_open_context *ctx; - struct dentry *dentry = filp->f_path.dentry; + struct dentry *dentry = file_dentry(filp); struct dentry *parent = NULL; struct inode *dir; unsigned openflags = filp->f_flags; @@ -57,7 +57,7 @@ nfs4_file_open(struct inode *inode, struct file *filp) parent = dget_parent(dentry); dir = d_inode(parent); - ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); + ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode); err = PTR_ERR(ctx); if (IS_ERR(ctx)) goto out; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4e4441216804..88474a4fc669 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -5001,7 +5001,7 @@ static int decode_space_limit(struct xdr_stream *xdr, blocksize = be32_to_cpup(p); maxsize = (uint64_t)nblocks * (uint64_t)blocksize; } - maxsize >>= PAGE_CACHE_SHIFT; + maxsize >>= PAGE_SHIFT; *pagemod_limit = min_t(u64, maxsize, ULONG_MAX); return 0; out_overflow: diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c index 9aebffb40505..049c1b1f2932 100644 --- a/fs/nfs/objlayout/objio_osd.c +++ b/fs/nfs/objlayout/objio_osd.c @@ -486,7 +486,7 @@ static void __r4w_put_page(void *priv, struct page *page) dprintk("%s: index=0x%lx\n", __func__, (page == ZERO_PAGE(0)) ? -1UL : page->index); if (ZERO_PAGE(0) != page) - page_cache_release(page); + put_page(page); return; } diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 8ce4f61cbaa5..1f6db4231057 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -342,7 +342,7 @@ nfs_create_request(struct nfs_open_context *ctx, struct page *page, * update_nfs_request below if the region is not locked. */ req->wb_page = page; req->wb_index = page_file_index(page); - page_cache_get(page); + get_page(page); req->wb_offset = offset; req->wb_pgbase = offset; req->wb_bytes = count; @@ -392,7 +392,7 @@ static void nfs_clear_request(struct nfs_page *req) struct nfs_lock_context *l_ctx = req->wb_lock_context; if (page != NULL) { - page_cache_release(page); + put_page(page); req->wb_page = NULL; } if (l_ctx != NULL) { @@ -904,7 +904,7 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, return false; } else { if (req->wb_pgbase != 0 || - prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) + prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE) return false; } } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 2fa483e6dbe2..89a5ef4df08a 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -841,7 +841,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, i_size = i_size_read(ino); - lgp->args.minlength = PAGE_CACHE_SIZE; + lgp->args.minlength = PAGE_SIZE; if (lgp->args.minlength > range->length) lgp->args.minlength = range->length; if (range->iomode == IOMODE_READ) { @@ -1618,13 +1618,13 @@ lookup_again: spin_unlock(&clp->cl_lock); } - pg_offset = arg.offset & ~PAGE_CACHE_MASK; + pg_offset = arg.offset & ~PAGE_MASK; if (pg_offset) { arg.offset -= pg_offset; arg.length += pg_offset; } if (arg.length != NFS4_MAX_UINT64) - arg.length = PAGE_CACHE_ALIGN(arg.length); + arg.length = PAGE_ALIGN(arg.length); lseg = send_layoutget(lo, ctx, &arg, gfp_flags); atomic_dec(&lo->plh_outstanding); diff --git a/fs/nfs/read.c b/fs/nfs/read.c index eb31e23e7def..6776d7a7839e 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -46,7 +46,7 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr) static int nfs_return_empty_page(struct page *page) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); SetPageUptodate(page); unlock_page(page); return 0; @@ -118,8 +118,8 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, unlock_page(page); return PTR_ERR(new); } - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); nfs_pageio_init_read(&pgio, inode, false, &nfs_async_read_completion_ops); @@ -295,7 +295,7 @@ int nfs_readpage(struct file *file, struct page *page) int error; dprintk("NFS: nfs_readpage (%p %ld@%lu)\n", - page, PAGE_CACHE_SIZE, page_file_index(page)); + page, PAGE_SIZE, page_file_index(page)); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); nfs_add_stats(inode, NFSIOS_READPAGES, 1); @@ -361,8 +361,8 @@ readpage_async_filler(void *data, struct page *page) if (IS_ERR(new)) goto out_error; - if (len < PAGE_CACHE_SIZE) - zero_user_segment(page, len, PAGE_CACHE_SIZE); + if (len < PAGE_SIZE) + zero_user_segment(page, len, PAGE_SIZE); if (!nfs_pageio_add_request(desc->pgio, new)) { nfs_list_remove_request(new); nfs_readpage_release(new); @@ -424,8 +424,8 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, pgm = &pgio.pg_mirrors[0]; NFS_I(inode)->read_io += pgm->pg_bytes_written; - npages = (pgm->pg_bytes_written + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> + PAGE_SHIFT; nfs_add_stats(inode, NFSIOS_READPAGES, npages); read_complete: put_nfs_open_context(desc.ctx); diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5754835a2886..5f4fd53e5764 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -150,7 +150,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c spin_lock(&inode->i_lock); i_size = i_size_read(inode); - end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size - 1) >> PAGE_SHIFT; if (i_size > 0 && page_file_index(page) < end_index) goto out; end = page_file_offset(page) + ((loff_t)offset+count); @@ -1942,7 +1942,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page) int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder) { loff_t range_start = page_file_offset(page); - loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); + loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); struct writeback_control wbc = { .sync_mode = WB_SYNC_ALL, .nr_to_write = 0, diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index 27f75bcbeb30..a9fb3636c142 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -458,7 +458,7 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap, struct buffer_head *pbh; __u64 key; - key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT - + key = page_index(bh->b_page) << (PAGE_SHIFT - bmap->b_inode->i_blkbits); for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page) key++; diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index a35ae35e6932..e0c9daf9aa22 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr) set_buffer_uptodate(bh); unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); return bh; } @@ -128,7 +128,7 @@ found: out_locked: unlock_page(page); - page_cache_release(page); + put_page(page); return err; } @@ -146,7 +146,7 @@ void nilfs_btnode_delete(struct buffer_head *bh) pgoff_t index = page_index(page); int still_dirty; - page_cache_get(page); + get_page(page); lock_page(page); wait_on_page_writeback(page); @@ -154,7 +154,7 @@ void nilfs_btnode_delete(struct buffer_head *bh) still_dirty = PageDirty(page); mapping = page->mapping; unlock_page(page); - page_cache_release(page); + put_page(page); if (!still_dirty && mapping) invalidate_inode_pages2_range(mapping, index, index); @@ -181,7 +181,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, obh = ctxt->bh; ctxt->newbh = NULL; - if (inode->i_blkbits == PAGE_CACHE_SHIFT) { + if (inode->i_blkbits == PAGE_SHIFT) { lock_page(obh->b_page); /* * We cannot call radix_tree_preload for the kernels older diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 6b8b92b19cec..e08f064e4bd7 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -58,7 +58,7 @@ static inline unsigned nilfs_chunk_size(struct inode *inode) static inline void nilfs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /* @@ -69,9 +69,9 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -109,12 +109,12 @@ static void nilfs_check_page(struct page *page) unsigned chunk_size = nilfs_chunk_size(dir); char *kaddr = page_address(page); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; struct nilfs_dir_entry *p; char *error; - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & (chunk_size - 1)) goto Ebadsize; if (!limit) @@ -161,7 +161,7 @@ Espan: bad_entry: nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - " "offset=%lu, inode=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le64_to_cpu(p->inode), rec_len, p->name_len); goto fail; @@ -170,7 +170,7 @@ Eend: nilfs_error(sb, "nilfs_check_page", "entry in directory #%lu spans the page boundary" "offset=%lu, inode=%lu", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le64_to_cpu(p->inode)); fail: SetPageChecked(page); @@ -256,8 +256,8 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); /* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ @@ -272,7 +272,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { nilfs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return -EIO; } kaddr = page_address(page); @@ -361,7 +361,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, if (++n >= npages) n = 0; /* next page is past the blocks we've got */ - if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) { + if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) { nilfs_error(dir->i_sb, __func__, "dir %lu size %lld exceeds block count %llu", dir->i_ino, dir->i_size, @@ -401,7 +401,7 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) if (de) { res = le64_to_cpu(de->inode); kunmap(page); - page_cache_release(page); + put_page(page); } return res; } @@ -460,7 +460,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + nilfs_last_byte(dir, n); de = (struct nilfs_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -603,7 +603,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) kunmap_atomic(kaddr); nilfs_commit_chunk(page, mapping, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 748ca238915a..0224b7826ace 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -115,7 +115,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, failed: unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); return err; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 21a1e2e0d92f..534631358b13 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -249,7 +249,7 @@ static int nilfs_set_page_dirty(struct page *page) if (nr_dirty) nilfs_set_file_dirty(inode, nr_dirty); } else if (ret) { - unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); + unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); nilfs_set_file_dirty(inode, nr_dirty); } @@ -291,7 +291,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = mapping->host; - unsigned start = pos & (PAGE_CACHE_SIZE - 1); + unsigned start = pos & (PAGE_SIZE - 1); unsigned nr_dirty; int err; diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 1125f40233ff..f6982b9153d5 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -110,7 +110,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, failed_bh: unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); brelse(bh); failed_unlock: @@ -170,7 +170,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, failed_bh: unlock_page(bh->b_page); - page_cache_release(bh->b_page); + put_page(bh->b_page); brelse(bh); failed: return ret; @@ -363,7 +363,7 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) { pgoff_t index = (pgoff_t)block >> - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); struct page *page; unsigned long first_block; int ret = 0; @@ -376,7 +376,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) wait_on_page_writeback(page); first_block = (unsigned long)index << - (PAGE_CACHE_SHIFT - inode->i_blkbits); + (PAGE_SHIFT - inode->i_blkbits); if (page_has_buffers(page)) { struct buffer_head *bh; @@ -385,7 +385,7 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) } still_dirty = PageDirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); if (still_dirty || invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) @@ -578,7 +578,7 @@ int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) } unlock_page(page); - page_cache_release(page); + put_page(page); return 0; } @@ -597,7 +597,7 @@ nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) bh_frozen = nilfs_page_get_nth_block(page, n); } unlock_page(page); - page_cache_release(page); + put_page(page); } return bh_frozen; } diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 7ccdb961eea9..151bc19d47c0 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -431,11 +431,11 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: nilfs_transaction_abort(old_dir->i_sb); return err; diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index c20df77eff99..489391561cda 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -50,7 +50,7 @@ __nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index, if (!page_has_buffers(page)) create_empty_buffers(page, 1 << blkbits, b_state); - first_block = (unsigned long)index << (PAGE_CACHE_SHIFT - blkbits); + first_block = (unsigned long)index << (PAGE_SHIFT - blkbits); bh = nilfs_page_get_nth_block(page, block - first_block); touch_buffer(bh); @@ -64,7 +64,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, unsigned long b_state) { int blkbits = inode->i_blkbits; - pgoff_t index = blkoff >> (PAGE_CACHE_SHIFT - blkbits); + pgoff_t index = blkoff >> (PAGE_SHIFT - blkbits); struct page *page; struct buffer_head *bh; @@ -75,7 +75,7 @@ struct buffer_head *nilfs_grab_buffer(struct inode *inode, bh = __nilfs_get_page_block(page, blkoff, index, blkbits, b_state); if (unlikely(!bh)) { unlock_page(page); - page_cache_release(page); + put_page(page); return NULL; } return bh; @@ -288,7 +288,7 @@ repeat: __set_page_dirty_nobuffers(dpage); unlock_page(dpage); - page_cache_release(dpage); + put_page(dpage); unlock_page(page); } pagevec_release(&pvec); @@ -333,7 +333,7 @@ repeat: WARN_ON(PageDirty(dpage)); nilfs_copy_page(dpage, page, 0); unlock_page(dpage); - page_cache_release(dpage); + put_page(dpage); } else { struct page *page2; @@ -350,7 +350,7 @@ repeat: if (unlikely(err < 0)) { WARN_ON(err == -EEXIST); page->mapping = NULL; - page_cache_release(page); /* for cache */ + put_page(page); /* for cache */ } else { page->mapping = dmap; dmap->nrpages++; @@ -523,8 +523,8 @@ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, if (inode->i_mapping->nrpages == 0) return 0; - index = start_blk >> (PAGE_CACHE_SHIFT - inode->i_blkbits); - nblocks_in_page = 1U << (PAGE_CACHE_SHIFT - inode->i_blkbits); + index = start_blk >> (PAGE_SHIFT - inode->i_blkbits); + nblocks_in_page = 1U << (PAGE_SHIFT - inode->i_blkbits); pagevec_init(&pvec, 0); @@ -537,7 +537,7 @@ repeat: if (length > 0 && pvec.pages[0]->index > index) goto out; - b = pvec.pages[0]->index << (PAGE_CACHE_SHIFT - inode->i_blkbits); + b = pvec.pages[0]->index << (PAGE_SHIFT - inode->i_blkbits); i = 0; do { page = pvec.pages[i]; diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 9b4f205d1173..5afa77fadc11 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -544,14 +544,14 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, blocksize, page, NULL); unlock_page(page); - page_cache_release(page); + put_page(page); (*nr_salvaged_blocks)++; goto next; failed_page: unlock_page(page); - page_cache_release(page); + put_page(page); failed_inode: printk(KERN_WARNING diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 3b65adaae7e4..4317f72568e6 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2070,7 +2070,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode) goto failed_to_write; if (nilfs_sc_cstage_get(sci) == NILFS_ST_DONE || - nilfs->ns_blocksize_bits != PAGE_CACHE_SHIFT) { + nilfs->ns_blocksize_bits != PAGE_SHIFT) { /* * At this point, we avoid double buffering * for blocksize < pagesize because page dirty diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index 7521e11db728..97768a1379f2 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -74,7 +74,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) set_buffer_uptodate(bh); - file_ofs = ((s64)page->index << PAGE_CACHE_SHIFT) + + file_ofs = ((s64)page->index << PAGE_SHIFT) + bh_offset(bh); read_lock_irqsave(&ni->size_lock, flags); init_size = ni->initialized_size; @@ -142,7 +142,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) u32 rec_size; rec_size = ni->itype.index.block_size; - recs = PAGE_CACHE_SIZE / rec_size; + recs = PAGE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); local_irq_save(flags); @@ -229,7 +229,7 @@ static int ntfs_read_block(struct page *page) * fully truncated, truncate will throw it away as soon as we unlock * it so no need to worry what we do with it. */ - iblock = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + iblock = (s64)page->index << (PAGE_SHIFT - blocksize_bits); read_lock_irqsave(&ni->size_lock, flags); lblock = (ni->allocated_size + blocksize - 1) >> blocksize_bits; init_size = ni->initialized_size; @@ -412,9 +412,9 @@ retry_readpage: vi = page->mapping->host; i_size = i_size_read(vi); /* Is the page fully outside i_size? (truncate in progress) */ - if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT)) { + zero_user(page, 0, PAGE_SIZE); ntfs_debug("Read outside i_size - truncated?"); goto done; } @@ -463,7 +463,7 @@ retry_readpage: * ok to ignore the compressed flag here. */ if (unlikely(page->index > 0)) { - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); goto done; } if (!NInoAttr(ni)) @@ -509,7 +509,7 @@ retry_readpage: le16_to_cpu(ctx->attr->data.resident.value_offset), attr_len); /* Zero the remainder of the page. */ - memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(addr + attr_len, 0, PAGE_SIZE - attr_len); flush_dcache_page(page); kunmap_atomic(addr); put_unm_err_out: @@ -599,7 +599,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) /* NOTE: Different naming scheme to ntfs_read_block()! */ /* The first block in the page. */ - block = (s64)page->index << (PAGE_CACHE_SHIFT - blocksize_bits); + block = (s64)page->index << (PAGE_SHIFT - blocksize_bits); read_lock_irqsave(&ni->size_lock, flags); i_size = i_size_read(vi); @@ -674,7 +674,7 @@ static int ntfs_write_block(struct page *page, struct writeback_control *wbc) // in the inode. // Again, for each page do: // __set_page_dirty_buffers(); - // page_cache_release() + // put_page() // We don't need to wait on the writes. // Update iblock. } @@ -925,7 +925,7 @@ static int ntfs_write_mst_block(struct page *page, ntfs_volume *vol = ni->vol; u8 *kaddr; unsigned int rec_size = ni->itype.index.block_size; - ntfs_inode *locked_nis[PAGE_CACHE_SIZE / rec_size]; + ntfs_inode *locked_nis[PAGE_SIZE / rec_size]; struct buffer_head *bh, *head, *tbh, *rec_start_bh; struct buffer_head *bhs[MAX_BUF_PER_PAGE]; runlist_element *rl; @@ -949,7 +949,7 @@ static int ntfs_write_mst_block(struct page *page, (NInoAttr(ni) && ni->type == AT_INDEX_ALLOCATION))); bh_size = vol->sb->s_blocksize; bh_size_bits = vol->sb->s_blocksize_bits; - max_bhs = PAGE_CACHE_SIZE / bh_size; + max_bhs = PAGE_SIZE / bh_size; BUG_ON(!max_bhs); BUG_ON(max_bhs > MAX_BUF_PER_PAGE); @@ -961,13 +961,13 @@ static int ntfs_write_mst_block(struct page *page, BUG_ON(!bh); rec_size_bits = ni->itype.index.block_size_bits; - BUG_ON(!(PAGE_CACHE_SIZE >> rec_size_bits)); + BUG_ON(!(PAGE_SIZE >> rec_size_bits)); bhs_per_rec = rec_size >> bh_size_bits; BUG_ON(!bhs_per_rec); /* The first block in the page. */ rec_block = block = (sector_t)page->index << - (PAGE_CACHE_SHIFT - bh_size_bits); + (PAGE_SHIFT - bh_size_bits); /* The first out of bounds block for the data size. */ dblock = (i_size_read(vi) + bh_size - 1) >> bh_size_bits; @@ -1133,7 +1133,7 @@ lock_retry_remap: unsigned long mft_no; /* Get the mft record number. */ - mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) + mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) >> rec_size_bits; /* Check whether to write this mft record. */ tni = NULL; @@ -1249,7 +1249,7 @@ do_mirror: continue; ofs = bh_offset(tbh); /* Get the mft record number. */ - mft_no = (((s64)page->index << PAGE_CACHE_SHIFT) + ofs) + mft_no = (((s64)page->index << PAGE_SHIFT) + ofs) >> rec_size_bits; if (mft_no < vol->mftmirr_size) ntfs_sync_mft_mirror(vol, mft_no, @@ -1300,7 +1300,7 @@ done: * Set page error if there is only one ntfs record in the page. * Otherwise we would loose per-record granularity. */ - if (ni->itype.index.block_size == PAGE_CACHE_SIZE) + if (ni->itype.index.block_size == PAGE_SIZE) SetPageError(page); NVolSetErrors(vol); } @@ -1308,7 +1308,7 @@ done: ntfs_debug("Page still contains one or more dirty ntfs " "records. Redirtying the page starting at " "record 0x%lx.", page->index << - (PAGE_CACHE_SHIFT - rec_size_bits)); + (PAGE_SHIFT - rec_size_bits)); redirty_page_for_writepage(wbc, page); unlock_page(page); } else { @@ -1365,13 +1365,13 @@ retry_writepage: BUG_ON(!PageLocked(page)); i_size = i_size_read(vi); /* Is the page fully outside i_size? (truncate in progress) */ - if (unlikely(page->index >= (i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) { + if (unlikely(page->index >= (i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT)) { /* * The page may have dirty, unmapped buffers. Make them * freeable here, so the page does not leak. */ - block_invalidatepage(page, 0, PAGE_CACHE_SIZE); + block_invalidatepage(page, 0, PAGE_SIZE); unlock_page(page); ntfs_debug("Write outside i_size - truncated?"); return 0; @@ -1414,10 +1414,10 @@ retry_writepage: /* NInoNonResident() == NInoIndexAllocPresent() */ if (NInoNonResident(ni)) { /* We have to zero every time due to mmap-at-end-of-file. */ - if (page->index >= (i_size >> PAGE_CACHE_SHIFT)) { + if (page->index >= (i_size >> PAGE_SHIFT)) { /* The page straddles i_size. */ - unsigned int ofs = i_size & ~PAGE_CACHE_MASK; - zero_user_segment(page, ofs, PAGE_CACHE_SIZE); + unsigned int ofs = i_size & ~PAGE_MASK; + zero_user_segment(page, ofs, PAGE_SIZE); } /* Handle mst protected attributes. */ if (NInoMstProtected(ni)) @@ -1500,7 +1500,7 @@ retry_writepage: le16_to_cpu(ctx->attr->data.resident.value_offset), addr, attr_len); /* Zero out of bounds area in the page cache page. */ - memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(addr + attr_len, 0, PAGE_SIZE - attr_len); kunmap_atomic(addr); flush_dcache_page(page); flush_dcache_mft_record_page(ctx->ntfs_ino); diff --git a/fs/ntfs/aops.h b/fs/ntfs/aops.h index caecc58f529c..820d6eabf60f 100644 --- a/fs/ntfs/aops.h +++ b/fs/ntfs/aops.h @@ -40,7 +40,7 @@ static inline void ntfs_unmap_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } /** @@ -49,7 +49,7 @@ static inline void ntfs_unmap_page(struct page *page) * @index: index into the page cache for @mapping of the page to map * * Read a page from the page cache of the address space @mapping at position - * @index, where @index is in units of PAGE_CACHE_SIZE, and not in bytes. + * @index, where @index is in units of PAGE_SIZE, and not in bytes. * * If the page is not in memory it is loaded from disk first using the readpage * method defined in the address space operations of @mapping and the page is diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c index 250ed5b20c8f..44a39a099b54 100644 --- a/fs/ntfs/attrib.c +++ b/fs/ntfs/attrib.c @@ -152,7 +152,7 @@ int ntfs_map_runlist_nolock(ntfs_inode *ni, VCN vcn, ntfs_attr_search_ctx *ctx) if (old_ctx.base_ntfs_ino && old_ctx.ntfs_ino != old_ctx.base_ntfs_ino) { put_this_page = old_ctx.ntfs_ino->page; - page_cache_get(put_this_page); + get_page(put_this_page); } /* * Reinitialize the search context so we can lookup the @@ -275,7 +275,7 @@ retry_map: * the pieces anyway. */ if (put_this_page) - page_cache_release(put_this_page); + put_page(put_this_page); } return err; } @@ -1660,7 +1660,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) memcpy(kaddr, (u8*)a + le16_to_cpu(a->data.resident.value_offset), attr_size); - memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); + memset(kaddr + attr_size, 0, PAGE_SIZE - attr_size); kunmap_atomic(kaddr); flush_dcache_page(page); SetPageUptodate(page); @@ -1748,7 +1748,7 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size) if (page) { set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); } ntfs_debug("Done."); return 0; @@ -1835,7 +1835,7 @@ rl_err_out: ntfs_free(rl); page_err_out: unlock_page(page); - page_cache_release(page); + put_page(page); } if (err == -EINVAL) err = -EIO; @@ -2513,17 +2513,17 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) BUG_ON(NInoEncrypted(ni)); mapping = VFS_I(ni)->i_mapping; /* Work out the starting index and page offset. */ - idx = ofs >> PAGE_CACHE_SHIFT; - start_ofs = ofs & ~PAGE_CACHE_MASK; + idx = ofs >> PAGE_SHIFT; + start_ofs = ofs & ~PAGE_MASK; /* Work out the ending index and page offset. */ end = ofs + cnt; - end_ofs = end & ~PAGE_CACHE_MASK; + end_ofs = end & ~PAGE_MASK; /* If the end is outside the inode size return -ESPIPE. */ if (unlikely(end > i_size_read(VFS_I(ni)))) { ntfs_error(vol->sb, "Request exceeds end of attribute."); return -ESPIPE; } - end >>= PAGE_CACHE_SHIFT; + end >>= PAGE_SHIFT; /* If there is a first partial page, need to do it the slow way. */ if (start_ofs) { page = read_mapping_page(mapping, idx, NULL); @@ -2536,7 +2536,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) * If the last page is the same as the first page, need to * limit the write to the end offset. */ - size = PAGE_CACHE_SIZE; + size = PAGE_SIZE; if (idx == end) size = end_ofs; kaddr = kmap_atomic(page); @@ -2544,7 +2544,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) flush_dcache_page(page); kunmap_atomic(kaddr); set_page_dirty(page); - page_cache_release(page); + put_page(page); balance_dirty_pages_ratelimited(mapping); cond_resched(); if (idx == end) @@ -2561,7 +2561,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) return -ENOMEM; } kaddr = kmap_atomic(page); - memset(kaddr, val, PAGE_CACHE_SIZE); + memset(kaddr, val, PAGE_SIZE); flush_dcache_page(page); kunmap_atomic(kaddr); /* @@ -2585,7 +2585,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) set_page_dirty(page); /* Finally unlock and release the page. */ unlock_page(page); - page_cache_release(page); + put_page(page); balance_dirty_pages_ratelimited(mapping); cond_resched(); } @@ -2602,7 +2602,7 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val) flush_dcache_page(page); kunmap_atomic(kaddr); set_page_dirty(page); - page_cache_release(page); + put_page(page); balance_dirty_pages_ratelimited(mapping); cond_resched(); } diff --git a/fs/ntfs/bitmap.c b/fs/ntfs/bitmap.c index 0809cf876098..ec130c588d2b 100644 --- a/fs/ntfs/bitmap.c +++ b/fs/ntfs/bitmap.c @@ -67,8 +67,8 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * Calculate the indices for the pages containing the first and last * bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively. */ - index = start_bit >> (3 + PAGE_CACHE_SHIFT); - end_index = (start_bit + cnt - 1) >> (3 + PAGE_CACHE_SHIFT); + index = start_bit >> (3 + PAGE_SHIFT); + end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT); /* Get the page containing the first bit (@start_bit). */ mapping = vi->i_mapping; @@ -82,7 +82,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, kaddr = page_address(page); /* Set @pos to the position of the byte containing @start_bit. */ - pos = (start_bit >> 3) & ~PAGE_CACHE_MASK; + pos = (start_bit >> 3) & ~PAGE_MASK; /* Calculate the position of @start_bit in the first byte. */ bit = start_bit & 7; @@ -108,7 +108,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * Depending on @value, modify all remaining whole bytes in the page up * to @cnt. */ - len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE - pos); + len = min_t(s64, cnt >> 3, PAGE_SIZE - pos); memset(kaddr + pos, value ? 0xff : 0, len); cnt -= len << 3; @@ -132,7 +132,7 @@ int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit, * Depending on @value, modify all remaining whole bytes in the * page up to @cnt. */ - len = min_t(s64, cnt >> 3, PAGE_CACHE_SIZE); + len = min_t(s64, cnt >> 3, PAGE_SIZE); memset(kaddr, value ? 0xff : 0, len); cnt -= len << 3; } diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c index f82498c35e78..f2b5e746f49b 100644 --- a/fs/ntfs/compress.c +++ b/fs/ntfs/compress.c @@ -104,16 +104,12 @@ static void zero_partial_compressed_page(struct page *page, unsigned int kp_ofs; ntfs_debug("Zeroing page region outside initialized size."); - if (((s64)page->index << PAGE_CACHE_SHIFT) >= initialized_size) { - /* - * FIXME: Using clear_page() will become wrong when we get - * PAGE_CACHE_SIZE != PAGE_SIZE but for now there is no problem. - */ + if (((s64)page->index << PAGE_SHIFT) >= initialized_size) { clear_page(kp); return; } - kp_ofs = initialized_size & ~PAGE_CACHE_MASK; - memset(kp + kp_ofs, 0, PAGE_CACHE_SIZE - kp_ofs); + kp_ofs = initialized_size & ~PAGE_MASK; + memset(kp + kp_ofs, 0, PAGE_SIZE - kp_ofs); return; } @@ -123,7 +119,7 @@ static void zero_partial_compressed_page(struct page *page, static inline void handle_bounds_compressed_page(struct page *page, const loff_t i_size, const s64 initialized_size) { - if ((page->index >= (initialized_size >> PAGE_CACHE_SHIFT)) && + if ((page->index >= (initialized_size >> PAGE_SHIFT)) && (initialized_size < i_size)) zero_partial_compressed_page(page, initialized_size); return; @@ -160,7 +156,7 @@ static inline void handle_bounds_compressed_page(struct page *page, * @xpage_done indicates whether the target page (@dest_pages[@xpage]) was * completed during the decompression of the compression block (@cb_start). * - * Warning: This function *REQUIRES* PAGE_CACHE_SIZE >= 4096 or it will blow up + * Warning: This function *REQUIRES* PAGE_SIZE >= 4096 or it will blow up * unpredicatbly! You have been warned! * * Note to hackers: This function may not sleep until it has finished accessing @@ -241,7 +237,7 @@ return_error: if (di == xpage) *xpage_done = 1; else - page_cache_release(dp); + put_page(dp); dest_pages[di] = NULL; } } @@ -274,7 +270,7 @@ return_error: cb = cb_sb_end; /* Advance destination position to next sub-block. */ - *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_CACHE_MASK; + *dest_ofs = (*dest_ofs + NTFS_SB_SIZE) & ~PAGE_MASK; if (!*dest_ofs && (++*dest_index > dest_max_index)) goto return_overflow; goto do_next_sb; @@ -301,7 +297,7 @@ return_error: /* Advance destination position to next sub-block. */ *dest_ofs += NTFS_SB_SIZE; - if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) { + if (!(*dest_ofs &= ~PAGE_MASK)) { finalize_page: /* * First stage: add current page index to array of @@ -335,7 +331,7 @@ do_next_tag: *dest_ofs += nr_bytes; } /* We have finished the current sub-block. */ - if (!(*dest_ofs &= ~PAGE_CACHE_MASK)) + if (!(*dest_ofs &= ~PAGE_MASK)) goto finalize_page; goto do_next_sb; } @@ -462,7 +458,7 @@ return_overflow: * have been written to so that we would lose data if we were to just overwrite * them with the out-of-date uncompressed data. * - * FIXME: For PAGE_CACHE_SIZE > cb_size we are not doing the Right Thing(TM) at + * FIXME: For PAGE_SIZE > cb_size we are not doing the Right Thing(TM) at * the end of the file I think. We need to detect this case and zero the out * of bounds remainder of the page in question and mark it as handled. At the * moment we would just return -EIO on such a page. This bug will only become @@ -470,7 +466,7 @@ return_overflow: * clusters so is probably not going to be seen by anyone. Still this should * be fixed. (AIA) * - * FIXME: Again for PAGE_CACHE_SIZE > cb_size we are screwing up both in + * FIXME: Again for PAGE_SIZE > cb_size we are screwing up both in * handling sparse and compressed cbs. (AIA) * * FIXME: At the moment we don't do any zeroing out in the case that @@ -497,14 +493,14 @@ int ntfs_read_compressed_block(struct page *page) u64 cb_size_mask = cb_size - 1UL; VCN vcn; LCN lcn; - /* The first wanted vcn (minimum alignment is PAGE_CACHE_SIZE). */ - VCN start_vcn = (((s64)index << PAGE_CACHE_SHIFT) & ~cb_size_mask) >> + /* The first wanted vcn (minimum alignment is PAGE_SIZE). */ + VCN start_vcn = (((s64)index << PAGE_SHIFT) & ~cb_size_mask) >> vol->cluster_size_bits; /* * The first vcn after the last wanted vcn (minimum alignment is again - * PAGE_CACHE_SIZE. + * PAGE_SIZE. */ - VCN end_vcn = ((((s64)(index + 1UL) << PAGE_CACHE_SHIFT) + cb_size - 1) + VCN end_vcn = ((((s64)(index + 1UL) << PAGE_SHIFT) + cb_size - 1) & ~cb_size_mask) >> vol->cluster_size_bits; /* Number of compression blocks (cbs) in the wanted vcn range. */ unsigned int nr_cbs = (end_vcn - start_vcn) << vol->cluster_size_bits @@ -515,7 +511,7 @@ int ntfs_read_compressed_block(struct page *page) * guarantees of start_vcn and end_vcn, no need to round up here. */ unsigned int nr_pages = (end_vcn - start_vcn) << - vol->cluster_size_bits >> PAGE_CACHE_SHIFT; + vol->cluster_size_bits >> PAGE_SHIFT; unsigned int xpage, max_page, cur_page, cur_ofs, i; unsigned int cb_clusters, cb_max_ofs; int block, max_block, cb_max_page, bhs_size, nr_bhs, err = 0; @@ -549,7 +545,7 @@ int ntfs_read_compressed_block(struct page *page) * We have already been given one page, this is the one we must do. * Once again, the alignment guarantees keep it simple. */ - offset = start_vcn << vol->cluster_size_bits >> PAGE_CACHE_SHIFT; + offset = start_vcn << vol->cluster_size_bits >> PAGE_SHIFT; xpage = index - offset; pages[xpage] = page; /* @@ -560,13 +556,13 @@ int ntfs_read_compressed_block(struct page *page) i_size = i_size_read(VFS_I(ni)); initialized_size = ni->initialized_size; read_unlock_irqrestore(&ni->size_lock, flags); - max_page = ((i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) - + max_page = ((i_size + PAGE_SIZE - 1) >> PAGE_SHIFT) - offset; /* Is the page fully outside i_size? (truncate in progress) */ if (xpage >= max_page) { kfree(bhs); kfree(pages); - zero_user(page, 0, PAGE_CACHE_SIZE); + zero_user(page, 0, PAGE_SIZE); ntfs_debug("Compressed read outside i_size - truncated?"); SetPageUptodate(page); unlock_page(page); @@ -591,7 +587,7 @@ int ntfs_read_compressed_block(struct page *page) continue; } unlock_page(page); - page_cache_release(page); + put_page(page); pages[i] = NULL; } } @@ -735,9 +731,9 @@ lock_retry_remap: ntfs_debug("Successfully read the compression block."); /* The last page and maximum offset within it for the current cb. */ - cb_max_page = (cur_page << PAGE_CACHE_SHIFT) + cur_ofs + cb_size; - cb_max_ofs = cb_max_page & ~PAGE_CACHE_MASK; - cb_max_page >>= PAGE_CACHE_SHIFT; + cb_max_page = (cur_page << PAGE_SHIFT) + cur_ofs + cb_size; + cb_max_ofs = cb_max_page & ~PAGE_MASK; + cb_max_page >>= PAGE_SHIFT; /* Catch end of file inside a compression block. */ if (cb_max_page > max_page) @@ -753,16 +749,11 @@ lock_retry_remap: for (; cur_page < cb_max_page; cur_page++) { page = pages[cur_page]; if (page) { - /* - * FIXME: Using clear_page() will become wrong - * when we get PAGE_CACHE_SIZE != PAGE_SIZE but - * for now there is no problem. - */ if (likely(!cur_ofs)) clear_page(page_address(page)); else memset(page_address(page) + cur_ofs, 0, - PAGE_CACHE_SIZE - + PAGE_SIZE - cur_ofs); flush_dcache_page(page); kunmap(page); @@ -771,10 +762,10 @@ lock_retry_remap: if (cur_page == xpage) xpage_done = 1; else - page_cache_release(page); + put_page(page); pages[cur_page] = NULL; } - cb_pos += PAGE_CACHE_SIZE - cur_ofs; + cb_pos += PAGE_SIZE - cur_ofs; cur_ofs = 0; if (cb_pos >= cb_end) break; @@ -807,7 +798,7 @@ lock_retry_remap: * synchronous io for the majority of pages. * Or if we choose not to do the read-ahead/-behind stuff, we * could just return block_read_full_page(pages[xpage]) as long - * as PAGE_CACHE_SIZE <= cb_size. + * as PAGE_SIZE <= cb_size. */ if (cb_max_ofs) cb_max_page--; @@ -816,8 +807,8 @@ lock_retry_remap: page = pages[cur_page]; if (page) memcpy(page_address(page) + cur_ofs, cb_pos, - PAGE_CACHE_SIZE - cur_ofs); - cb_pos += PAGE_CACHE_SIZE - cur_ofs; + PAGE_SIZE - cur_ofs); + cb_pos += PAGE_SIZE - cur_ofs; cur_ofs = 0; if (cb_pos >= cb_end) break; @@ -850,10 +841,10 @@ lock_retry_remap: if (cur2_page == xpage) xpage_done = 1; else - page_cache_release(page); + put_page(page); pages[cur2_page] = NULL; } - cb_pos2 += PAGE_CACHE_SIZE - cur_ofs2; + cb_pos2 += PAGE_SIZE - cur_ofs2; cur_ofs2 = 0; if (cb_pos2 >= cb_end) break; @@ -884,7 +875,7 @@ lock_retry_remap: kunmap(page); unlock_page(page); if (prev_cur_page != xpage) - page_cache_release(page); + put_page(page); pages[prev_cur_page] = NULL; } } @@ -914,7 +905,7 @@ lock_retry_remap: kunmap(page); unlock_page(page); if (cur_page != xpage) - page_cache_release(page); + put_page(page); pages[cur_page] = NULL; } } @@ -961,7 +952,7 @@ err_out: kunmap(page); unlock_page(page); if (i != xpage) - page_cache_release(page); + put_page(page); } } kfree(pages); diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index b2eff5816adc..a18613579001 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -315,11 +315,11 @@ found_it: descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units - * of PAGE_CACHE_SIZE and map the page cache page, reading it from + * of PAGE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << - dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); + dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map directory index page, error %ld.", -PTR_ERR(page)); @@ -331,9 +331,9 @@ descend_into_child_node: fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << - dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); + dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); /* Bounds checks. */ - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", dir_ni->mft_no); goto unm_err_out; @@ -366,7 +366,7 @@ fast_descend_into_child_node: goto unm_err_out; } index_end = (u8*)ia + dir_ni->itype.index.block_size; - if (index_end > kaddr + PAGE_CACHE_SIZE) { + if (index_end > kaddr + PAGE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " @@ -559,9 +559,9 @@ found_it2: /* If vcn is in the same page cache page as old_vcn we * recycle the mapped page. */ if (old_vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT == vcn << + PAGE_SHIFT == vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT) + PAGE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); @@ -793,11 +793,11 @@ found_it: descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units - * of PAGE_CACHE_SIZE and map the page cache page, reading it from + * of PAGE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << - dir_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); + dir_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map directory index page, error %ld.", -PTR_ERR(page)); @@ -809,9 +809,9 @@ descend_into_child_node: fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << - dir_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); + dir_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); /* Bounds checks. */ - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", dir_ni->mft_no); goto unm_err_out; @@ -844,7 +844,7 @@ fast_descend_into_child_node: goto unm_err_out; } index_end = (u8*)ia + dir_ni->itype.index.block_size; - if (index_end > kaddr + PAGE_CACHE_SIZE) { + if (index_end > kaddr + PAGE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " @@ -968,9 +968,9 @@ found_it2: /* If vcn is in the same page cache page as old_vcn we * recycle the mapped page. */ if (old_vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT == vcn << + PAGE_SHIFT == vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT) + PAGE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); @@ -1246,15 +1246,15 @@ skip_index_root: goto iput_err_out; } /* Get the starting bit position in the current bitmap page. */ - cur_bmp_pos = bmp_pos & ((PAGE_CACHE_SIZE * 8) - 1); - bmp_pos &= ~(u64)((PAGE_CACHE_SIZE * 8) - 1); + cur_bmp_pos = bmp_pos & ((PAGE_SIZE * 8) - 1); + bmp_pos &= ~(u64)((PAGE_SIZE * 8) - 1); get_next_bmp_page: ntfs_debug("Reading bitmap with page index 0x%llx, bit ofs 0x%llx", - (unsigned long long)bmp_pos >> (3 + PAGE_CACHE_SHIFT), + (unsigned long long)bmp_pos >> (3 + PAGE_SHIFT), (unsigned long long)bmp_pos & - (unsigned long long)((PAGE_CACHE_SIZE * 8) - 1)); + (unsigned long long)((PAGE_SIZE * 8) - 1)); bmp_page = ntfs_map_page(bmp_mapping, - bmp_pos >> (3 + PAGE_CACHE_SHIFT)); + bmp_pos >> (3 + PAGE_SHIFT)); if (IS_ERR(bmp_page)) { ntfs_error(sb, "Reading index bitmap failed."); err = PTR_ERR(bmp_page); @@ -1270,9 +1270,9 @@ find_next_index_buffer: * If we have reached the end of the bitmap page, get the next * page, and put away the old one. */ - if (unlikely((cur_bmp_pos >> 3) >= PAGE_CACHE_SIZE)) { + if (unlikely((cur_bmp_pos >> 3) >= PAGE_SIZE)) { ntfs_unmap_page(bmp_page); - bmp_pos += PAGE_CACHE_SIZE * 8; + bmp_pos += PAGE_SIZE * 8; cur_bmp_pos = 0; goto get_next_bmp_page; } @@ -1285,8 +1285,8 @@ find_next_index_buffer: ntfs_debug("Handling index buffer 0x%llx.", (unsigned long long)bmp_pos + cur_bmp_pos); /* If the current index buffer is in the same page we reuse the page. */ - if ((prev_ia_pos & (s64)PAGE_CACHE_MASK) != - (ia_pos & (s64)PAGE_CACHE_MASK)) { + if ((prev_ia_pos & (s64)PAGE_MASK) != + (ia_pos & (s64)PAGE_MASK)) { prev_ia_pos = ia_pos; if (likely(ia_page != NULL)) { unlock_page(ia_page); @@ -1296,7 +1296,7 @@ find_next_index_buffer: * Map the page cache page containing the current ia_pos, * reading it from disk if necessary. */ - ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_CACHE_SHIFT); + ia_page = ntfs_map_page(ia_mapping, ia_pos >> PAGE_SHIFT); if (IS_ERR(ia_page)) { ntfs_error(sb, "Reading index allocation data failed."); err = PTR_ERR(ia_page); @@ -1307,10 +1307,10 @@ find_next_index_buffer: kaddr = (u8*)page_address(ia_page); } /* Get the current index buffer. */ - ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK & - ~(s64)(ndir->itype.index.block_size - 1))); + ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_MASK & + ~(s64)(ndir->itype.index.block_size - 1))); /* Bounds checks. */ - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) { + if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE)) { ntfs_error(sb, "Out of bounds check failed. Corrupt directory " "inode 0x%lx or driver bug.", vdir->i_ino); goto err_out; @@ -1348,7 +1348,7 @@ find_next_index_buffer: goto err_out; } index_end = (u8*)ia + ndir->itype.index.block_size; - if (unlikely(index_end > kaddr + PAGE_CACHE_SIZE)) { + if (unlikely(index_end > kaddr + PAGE_SIZE)) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of directory inode " "0x%lx crosses page boundary. Impossible! " "Cannot access! This is probably a bug in the " diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index bed4d427dfae..91117ada8528 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -220,8 +220,8 @@ do_non_resident_extend: m = NULL; } mapping = vi->i_mapping; - index = old_init_size >> PAGE_CACHE_SHIFT; - end_index = (new_init_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + index = old_init_size >> PAGE_SHIFT; + end_index = (new_init_size + PAGE_SIZE - 1) >> PAGE_SHIFT; do { /* * Read the page. If the page is not present, this will zero @@ -233,7 +233,7 @@ do_non_resident_extend: goto init_err_out; } if (unlikely(PageError(page))) { - page_cache_release(page); + put_page(page); err = -EIO; goto init_err_out; } @@ -242,13 +242,13 @@ do_non_resident_extend: * enough to make ntfs_writepage() work. */ write_lock_irqsave(&ni->size_lock, flags); - ni->initialized_size = (s64)(index + 1) << PAGE_CACHE_SHIFT; + ni->initialized_size = (s64)(index + 1) << PAGE_SHIFT; if (ni->initialized_size > new_init_size) ni->initialized_size = new_init_size; write_unlock_irqrestore(&ni->size_lock, flags); /* Set the page dirty so it gets written out. */ set_page_dirty(page); - page_cache_release(page); + put_page(page); /* * Play nice with the vm and the rest of the system. This is * very much needed as we can potentially be modifying the @@ -543,7 +543,7 @@ out: err_out: while (nr > 0) { unlock_page(pages[--nr]); - page_cache_release(pages[nr]); + put_page(pages[nr]); } goto out; } @@ -573,7 +573,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh) * only partially being written to. * * If @nr_pages is greater than one, we are guaranteed that the cluster size is - * greater than PAGE_CACHE_SIZE, that all pages in @pages are entirely inside + * greater than PAGE_SIZE, that all pages in @pages are entirely inside * the same cluster and that they are the entirety of that cluster, and that * the cluster is sparse, i.e. we need to allocate a cluster to fill the hole. * @@ -653,7 +653,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages, u = 0; do_next_page: page = pages[u]; - bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; + bh_pos = (s64)page->index << PAGE_SHIFT; bh = head = page_buffers(page); do { VCN cdelta; @@ -810,11 +810,11 @@ map_buffer_cached: kaddr = kmap_atomic(page); if (bh_pos < pos) { - pofs = bh_pos & ~PAGE_CACHE_MASK; + pofs = bh_pos & ~PAGE_MASK; memset(kaddr + pofs, 0, pos - bh_pos); } if (bh_end > end) { - pofs = end & ~PAGE_CACHE_MASK; + pofs = end & ~PAGE_MASK; memset(kaddr + pofs, 0, bh_end - end); } kunmap_atomic(kaddr); @@ -942,7 +942,7 @@ rl_not_mapped_enoent: * unmapped. This can only happen when the cluster size is * less than the page cache size. */ - if (unlikely(vol->cluster_size < PAGE_CACHE_SIZE)) { + if (unlikely(vol->cluster_size < PAGE_SIZE)) { bh_cend = (bh_end + vol->cluster_size - 1) >> vol->cluster_size_bits; if ((bh_cend <= cpos || bh_cpos >= cend)) { @@ -1208,7 +1208,7 @@ rl_not_mapped_enoent: wait_on_buffer(bh); if (likely(buffer_uptodate(bh))) { page = bh->b_page; - bh_pos = ((s64)page->index << PAGE_CACHE_SHIFT) + + bh_pos = ((s64)page->index << PAGE_SHIFT) + bh_offset(bh); /* * If the buffer overflows the initialized size, need @@ -1350,7 +1350,7 @@ rl_not_mapped_enoent: bh = head = page_buffers(page); do { if (u == nr_pages && - ((s64)page->index << PAGE_CACHE_SHIFT) + + ((s64)page->index << PAGE_SHIFT) + bh_offset(bh) >= end) break; if (!buffer_new(bh)) @@ -1422,7 +1422,7 @@ static inline int ntfs_commit_pages_after_non_resident_write( bool partial; page = pages[u]; - bh_pos = (s64)page->index << PAGE_CACHE_SHIFT; + bh_pos = (s64)page->index << PAGE_SHIFT; bh = head = page_buffers(page); partial = false; do { @@ -1639,7 +1639,7 @@ static int ntfs_commit_pages_after_write(struct page **pages, if (end < attr_len) memcpy(kaddr + end, kattr + end, attr_len - end); /* Zero the region outside the end of the attribute value. */ - memset(kaddr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); + memset(kaddr + attr_len, 0, PAGE_SIZE - attr_len); flush_dcache_page(page); SetPageUptodate(page); } @@ -1706,7 +1706,7 @@ static size_t ntfs_copy_from_user_iter(struct page **pages, unsigned nr_pages, unsigned len, copied; do { - len = PAGE_CACHE_SIZE - ofs; + len = PAGE_SIZE - ofs; if (len > bytes) len = bytes; copied = iov_iter_copy_from_user_atomic(*pages, &data, ofs, @@ -1724,14 +1724,14 @@ out: return total; err: /* Zero the rest of the target like __copy_from_user(). */ - len = PAGE_CACHE_SIZE - copied; + len = PAGE_SIZE - copied; do { if (len > bytes) len = bytes; zero_user(*pages, copied, len); bytes -= len; copied = 0; - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; } while (++pages < last_page); goto out; } @@ -1787,8 +1787,8 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, * attributes. */ nr_pages = 1; - if (vol->cluster_size > PAGE_CACHE_SIZE && NInoNonResident(ni)) - nr_pages = vol->cluster_size >> PAGE_CACHE_SHIFT; + if (vol->cluster_size > PAGE_SIZE && NInoNonResident(ni)) + nr_pages = vol->cluster_size >> PAGE_SHIFT; last_vcn = -1; do { VCN vcn; @@ -1796,9 +1796,9 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, unsigned ofs, do_pages, u; size_t copied; - start_idx = idx = pos >> PAGE_CACHE_SHIFT; - ofs = pos & ~PAGE_CACHE_MASK; - bytes = PAGE_CACHE_SIZE - ofs; + start_idx = idx = pos >> PAGE_SHIFT; + ofs = pos & ~PAGE_MASK; + bytes = PAGE_SIZE - ofs; do_pages = 1; if (nr_pages > 1) { vcn = pos >> vol->cluster_size_bits; @@ -1832,7 +1832,7 @@ static ssize_t ntfs_perform_write(struct file *file, struct iov_iter *i, if (lcn == LCN_HOLE) { start_idx = (pos & ~(s64) vol->cluster_size_mask) - >> PAGE_CACHE_SHIFT; + >> PAGE_SHIFT; bytes = vol->cluster_size - (pos & vol->cluster_size_mask); do_pages = nr_pages; @@ -1871,12 +1871,12 @@ again: if (unlikely(status)) { do { unlock_page(pages[--do_pages]); - page_cache_release(pages[do_pages]); + put_page(pages[do_pages]); } while (do_pages); break; } } - u = (pos >> PAGE_CACHE_SHIFT) - pages[0]->index; + u = (pos >> PAGE_SHIFT) - pages[0]->index; copied = ntfs_copy_from_user_iter(pages + u, do_pages - u, ofs, i, bytes); ntfs_flush_dcache_pages(pages + u, do_pages - u); @@ -1889,7 +1889,7 @@ again: } do { unlock_page(pages[--do_pages]); - page_cache_release(pages[do_pages]); + put_page(pages[do_pages]); } while (do_pages); if (unlikely(status < 0)) break; @@ -1921,7 +1921,7 @@ again: } } while (iov_iter_count(i)); if (cached_page) - page_cache_release(cached_page); + put_page(cached_page); ntfs_debug("Done. Returning %s (written 0x%lx, status %li).", written ? "written" : "status", (unsigned long)written, (long)status); diff --git a/fs/ntfs/index.c b/fs/ntfs/index.c index 096c135691ae..0d645f357930 100644 --- a/fs/ntfs/index.c +++ b/fs/ntfs/index.c @@ -272,11 +272,11 @@ done: descend_into_child_node: /* * Convert vcn to index into the index allocation attribute in units - * of PAGE_CACHE_SIZE and map the page cache page, reading it from + * of PAGE_SIZE and map the page cache page, reading it from * disk if necessary. */ page = ntfs_map_page(ia_mapping, vcn << - idx_ni->itype.index.vcn_size_bits >> PAGE_CACHE_SHIFT); + idx_ni->itype.index.vcn_size_bits >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(sb, "Failed to map index page, error %ld.", -PTR_ERR(page)); @@ -288,9 +288,9 @@ descend_into_child_node: fast_descend_into_child_node: /* Get to the index allocation block. */ ia = (INDEX_ALLOCATION*)(kaddr + ((vcn << - idx_ni->itype.index.vcn_size_bits) & ~PAGE_CACHE_MASK)); + idx_ni->itype.index.vcn_size_bits) & ~PAGE_MASK)); /* Bounds checks. */ - if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE) { + if ((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_SIZE) { ntfs_error(sb, "Out of bounds check failed. Corrupt inode " "0x%lx or driver bug.", idx_ni->mft_no); goto unm_err_out; @@ -323,7 +323,7 @@ fast_descend_into_child_node: goto unm_err_out; } index_end = (u8*)ia + idx_ni->itype.index.block_size; - if (index_end > kaddr + PAGE_CACHE_SIZE) { + if (index_end > kaddr + PAGE_SIZE) { ntfs_error(sb, "Index buffer (VCN 0x%llx) of inode 0x%lx " "crosses page boundary. Impossible! Cannot " "access! This is probably a bug in the " @@ -427,9 +427,9 @@ ia_done: * the mapped page. */ if (old_vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT == vcn << + PAGE_SHIFT == vcn << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT) + PAGE_SHIFT) goto fast_descend_into_child_node; unlock_page(page); ntfs_unmap_page(page); diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index d284f07eda77..f40972d6df90 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -868,12 +868,12 @@ skip_attr_list_load: ni->itype.index.block_size); goto unm_err_out; } - if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { + if (ni->itype.index.block_size > PAGE_SIZE) { ntfs_error(vi->i_sb, "Index block size (%u) > " - "PAGE_CACHE_SIZE (%ld) is not " + "PAGE_SIZE (%ld) is not " "supported. Sorry.", ni->itype.index.block_size, - PAGE_CACHE_SIZE); + PAGE_SIZE); err = -EOPNOTSUPP; goto unm_err_out; } @@ -1585,10 +1585,10 @@ static int ntfs_read_locked_index_inode(struct inode *base_vi, struct inode *vi) "two.", ni->itype.index.block_size); goto unm_err_out; } - if (ni->itype.index.block_size > PAGE_CACHE_SIZE) { - ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_CACHE_SIZE " + if (ni->itype.index.block_size > PAGE_SIZE) { + ntfs_error(vi->i_sb, "Index block size (%u) > PAGE_SIZE " "(%ld) is not supported. Sorry.", - ni->itype.index.block_size, PAGE_CACHE_SIZE); + ni->itype.index.block_size, PAGE_SIZE); err = -EOPNOTSUPP; goto unm_err_out; } diff --git a/fs/ntfs/lcnalloc.c b/fs/ntfs/lcnalloc.c index 1711b710b641..27a24a42f712 100644 --- a/fs/ntfs/lcnalloc.c +++ b/fs/ntfs/lcnalloc.c @@ -283,15 +283,15 @@ runlist_element *ntfs_cluster_alloc(ntfs_volume *vol, const VCN start_vcn, ntfs_unmap_page(page); } page = ntfs_map_page(mapping, last_read_pos >> - PAGE_CACHE_SHIFT); + PAGE_SHIFT); if (IS_ERR(page)) { err = PTR_ERR(page); ntfs_error(vol->sb, "Failed to map page."); goto out; } - buf_size = last_read_pos & ~PAGE_CACHE_MASK; + buf_size = last_read_pos & ~PAGE_MASK; buf = page_address(page) + buf_size; - buf_size = PAGE_CACHE_SIZE - buf_size; + buf_size = PAGE_SIZE - buf_size; if (unlikely(last_read_pos + buf_size > i_size)) buf_size = i_size - last_read_pos; buf_size <<= 3; diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index c71de292c5ad..9d71213ca81e 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c @@ -381,7 +381,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, * completely inside @rp, just copy it from there. Otherwise map all * the required pages and copy the data from them. */ - size = PAGE_CACHE_SIZE - (pos & ~PAGE_CACHE_MASK); + size = PAGE_SIZE - (pos & ~PAGE_MASK); if (size >= le32_to_cpu(rp->system_page_size)) { memcpy(trp, rp, le32_to_cpu(rp->system_page_size)); } else { @@ -394,8 +394,8 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, /* Copy the remaining data one page at a time. */ have_read = size; to_read = le32_to_cpu(rp->system_page_size) - size; - idx = (pos + size) >> PAGE_CACHE_SHIFT; - BUG_ON((pos + size) & ~PAGE_CACHE_MASK); + idx = (pos + size) >> PAGE_SHIFT; + BUG_ON((pos + size) & ~PAGE_MASK); do { page = ntfs_map_page(vi->i_mapping, idx); if (IS_ERR(page)) { @@ -406,7 +406,7 @@ static int ntfs_check_and_load_restart_page(struct inode *vi, err = -EIO; goto err_out; } - size = min_t(int, to_read, PAGE_CACHE_SIZE); + size = min_t(int, to_read, PAGE_SIZE); memcpy((u8*)trp + have_read, page_address(page), size); ntfs_unmap_page(page); have_read += size; @@ -509,11 +509,11 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) * log page size if the page cache size is between the default log page * size and twice that. */ - if (PAGE_CACHE_SIZE >= DefaultLogPageSize && PAGE_CACHE_SIZE <= + if (PAGE_SIZE >= DefaultLogPageSize && PAGE_SIZE <= DefaultLogPageSize * 2) log_page_size = DefaultLogPageSize; else - log_page_size = PAGE_CACHE_SIZE; + log_page_size = PAGE_SIZE; log_page_mask = log_page_size - 1; /* * Use ntfs_ffs() instead of ffs() to enable the compiler to @@ -539,7 +539,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) * to be empty. */ for (pos = 0; pos < size; pos <<= 1) { - pgoff_t idx = pos >> PAGE_CACHE_SHIFT; + pgoff_t idx = pos >> PAGE_SHIFT; if (!page || page->index != idx) { if (page) ntfs_unmap_page(page); @@ -550,7 +550,7 @@ bool ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) goto err_out; } } - kaddr = (u8*)page_address(page) + (pos & ~PAGE_CACHE_MASK); + kaddr = (u8*)page_address(page) + (pos & ~PAGE_MASK); /* * A non-empty block means the logfile is not empty while an * empty block after a non-empty block has been encountered diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 3014a36a255b..37b2501caaa4 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -61,16 +61,16 @@ static inline MFT_RECORD *map_mft_record_page(ntfs_inode *ni) * here if the volume was that big... */ index = (u64)ni->mft_no << vol->mft_record_size_bits >> - PAGE_CACHE_SHIFT; - ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + PAGE_SHIFT; + ofs = (ni->mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; i_size = i_size_read(mft_vi); /* The maximum valid index into the page cache for $MFT's data. */ - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; /* If the wanted index is out of bounds the mft record doesn't exist. */ if (unlikely(index >= end_index)) { - if (index > end_index || (i_size & ~PAGE_CACHE_MASK) < ofs + + if (index > end_index || (i_size & ~PAGE_MASK) < ofs + vol->mft_record_size) { page = ERR_PTR(-ENOENT); ntfs_error(vol->sb, "Attempt to read mft record 0x%lx, " @@ -487,7 +487,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, } /* Get the page containing the mirror copy of the mft record @m. */ page = ntfs_map_page(vol->mftmirr_ino->i_mapping, mft_no >> - (PAGE_CACHE_SHIFT - vol->mft_record_size_bits)); + (PAGE_SHIFT - vol->mft_record_size_bits)); if (IS_ERR(page)) { ntfs_error(vol->sb, "Failed to map mft mirror page."); err = PTR_ERR(page); @@ -497,7 +497,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no, BUG_ON(!PageUptodate(page)); ClearPageUptodate(page); /* Offset of the mft mirror record inside the page. */ - page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + page_ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; /* The address in the page of the mirror copy of the mft record @m. */ kmirr = page_address(page) + page_ofs; /* Copy the mst protected mft record to the mirror. */ @@ -1178,8 +1178,8 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol, for (; pass <= 2;) { /* Cap size to pass_end. */ ofs = data_pos >> 3; - page_ofs = ofs & ~PAGE_CACHE_MASK; - size = PAGE_CACHE_SIZE - page_ofs; + page_ofs = ofs & ~PAGE_MASK; + size = PAGE_SIZE - page_ofs; ll = ((pass_end + 7) >> 3) - ofs; if (size > ll) size = ll; @@ -1190,7 +1190,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol, */ if (size) { page = ntfs_map_page(mftbmp_mapping, - ofs >> PAGE_CACHE_SHIFT); + ofs >> PAGE_SHIFT); if (IS_ERR(page)) { ntfs_error(vol->sb, "Failed to read mft " "bitmap, aborting."); @@ -1328,13 +1328,13 @@ static int ntfs_mft_bitmap_extend_allocation_nolock(ntfs_volume *vol) */ ll = lcn >> 3; page = ntfs_map_page(vol->lcnbmp_ino->i_mapping, - ll >> PAGE_CACHE_SHIFT); + ll >> PAGE_SHIFT); if (IS_ERR(page)) { up_write(&mftbmp_ni->runlist.lock); ntfs_error(vol->sb, "Failed to read from lcn bitmap."); return PTR_ERR(page); } - b = (u8*)page_address(page) + (ll & ~PAGE_CACHE_MASK); + b = (u8*)page_address(page) + (ll & ~PAGE_MASK); tb = 1 << (lcn & 7ull); down_write(&vol->lcnbmp_lock); if (*b != 0xff && !(*b & tb)) { @@ -2103,14 +2103,14 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no) * The index into the page cache and the offset within the page cache * page of the wanted mft record. */ - index = mft_no << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; - ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + index = mft_no << vol->mft_record_size_bits >> PAGE_SHIFT; + ofs = (mft_no << vol->mft_record_size_bits) & ~PAGE_MASK; /* The maximum valid index into the page cache for $MFT's data. */ i_size = i_size_read(mft_vi); - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; if (unlikely(index >= end_index)) { if (unlikely(index > end_index || ofs + vol->mft_record_size >= - (i_size & ~PAGE_CACHE_MASK))) { + (i_size & ~PAGE_MASK))) { ntfs_error(vol->sb, "Tried to format non-existing mft " "record 0x%llx.", (long long)mft_no); return -ENOENT; @@ -2515,8 +2515,8 @@ mft_rec_already_initialized: * We now have allocated and initialized the mft record. Calculate the * index of and the offset within the page cache page the record is in. */ - index = bit << vol->mft_record_size_bits >> PAGE_CACHE_SHIFT; - ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; + index = bit << vol->mft_record_size_bits >> PAGE_SHIFT; + ofs = (bit << vol->mft_record_size_bits) & ~PAGE_MASK; /* Read, map, and pin the page containing the mft record. */ page = ntfs_map_page(vol->mft_ino->i_mapping, index); if (IS_ERR(page)) { diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h index c581e26a350d..12de47b96ca9 100644 --- a/fs/ntfs/ntfs.h +++ b/fs/ntfs/ntfs.h @@ -43,7 +43,7 @@ typedef enum { NTFS_MAX_NAME_LEN = 255, NTFS_MAX_ATTR_NAME_LEN = 255, NTFS_MAX_CLUSTER_SIZE = 64 * 1024, /* 64kiB */ - NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_CACHE_SIZE, + NTFS_MAX_PAGES_PER_CLUSTER = NTFS_MAX_CLUSTER_SIZE / PAGE_SIZE, } NTFS_CONSTANTS; /* Global variables. */ diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c index 1b38abdaa3ed..ecb49870a680 100644 --- a/fs/ntfs/super.c +++ b/fs/ntfs/super.c @@ -823,14 +823,14 @@ static bool parse_ntfs_boot_sector(ntfs_volume *vol, const NTFS_BOOT_SECTOR *b) ntfs_debug("vol->mft_record_size_bits = %i (0x%x)", vol->mft_record_size_bits, vol->mft_record_size_bits); /* - * We cannot support mft record sizes above the PAGE_CACHE_SIZE since + * We cannot support mft record sizes above the PAGE_SIZE since * we store $MFT/$DATA, the table of mft records in the page cache. */ - if (vol->mft_record_size > PAGE_CACHE_SIZE) { + if (vol->mft_record_size > PAGE_SIZE) { ntfs_error(vol->sb, "Mft record size (%i) exceeds the " - "PAGE_CACHE_SIZE on your system (%lu). " + "PAGE_SIZE on your system (%lu). " "This is not supported. Sorry.", - vol->mft_record_size, PAGE_CACHE_SIZE); + vol->mft_record_size, PAGE_SIZE); return false; } /* We cannot support mft record sizes below the sector size. */ @@ -1096,7 +1096,7 @@ static bool check_mft_mirror(ntfs_volume *vol) ntfs_debug("Entering."); /* Compare contents of $MFT and $MFTMirr. */ - mrecs_per_page = PAGE_CACHE_SIZE / vol->mft_record_size; + mrecs_per_page = PAGE_SIZE / vol->mft_record_size; BUG_ON(!mrecs_per_page); BUG_ON(!vol->mftmirr_size); mft_page = mirr_page = NULL; @@ -1615,20 +1615,20 @@ static bool load_and_init_attrdef(ntfs_volume *vol) if (!vol->attrdef) goto iput_failed; index = 0; - max_index = i_size >> PAGE_CACHE_SHIFT; - size = PAGE_CACHE_SIZE; + max_index = i_size >> PAGE_SHIFT; + size = PAGE_SIZE; while (index < max_index) { /* Read the attrdef table and copy it into the linear buffer. */ read_partial_attrdef_page: page = ntfs_map_page(ino->i_mapping, index); if (IS_ERR(page)) goto free_iput_failed; - memcpy((u8*)vol->attrdef + (index++ << PAGE_CACHE_SHIFT), + memcpy((u8*)vol->attrdef + (index++ << PAGE_SHIFT), page_address(page), size); ntfs_unmap_page(page); }; - if (size == PAGE_CACHE_SIZE) { - size = i_size & ~PAGE_CACHE_MASK; + if (size == PAGE_SIZE) { + size = i_size & ~PAGE_MASK; if (size) goto read_partial_attrdef_page; } @@ -1684,20 +1684,20 @@ static bool load_and_init_upcase(ntfs_volume *vol) if (!vol->upcase) goto iput_upcase_failed; index = 0; - max_index = i_size >> PAGE_CACHE_SHIFT; - size = PAGE_CACHE_SIZE; + max_index = i_size >> PAGE_SHIFT; + size = PAGE_SIZE; while (index < max_index) { /* Read the upcase table and copy it into the linear buffer. */ read_partial_upcase_page: page = ntfs_map_page(ino->i_mapping, index); if (IS_ERR(page)) goto iput_upcase_failed; - memcpy((char*)vol->upcase + (index++ << PAGE_CACHE_SHIFT), + memcpy((char*)vol->upcase + (index++ << PAGE_SHIFT), page_address(page), size); ntfs_unmap_page(page); }; - if (size == PAGE_CACHE_SIZE) { - size = i_size & ~PAGE_CACHE_MASK; + if (size == PAGE_SIZE) { + size = i_size & ~PAGE_MASK; if (size) goto read_partial_upcase_page; } @@ -2471,14 +2471,14 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) down_read(&vol->lcnbmp_lock); /* * Convert the number of bits into bytes rounded up, then convert into - * multiples of PAGE_CACHE_SIZE, rounding up so that if we have one + * multiples of PAGE_SIZE, rounding up so that if we have one * full and one partial page max_index = 2. */ - max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; - /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ + max_index = (((vol->nr_clusters + 7) >> 3) + PAGE_SIZE - 1) >> + PAGE_SHIFT; + /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */ ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.", - max_index, PAGE_CACHE_SIZE / 4); + max_index, PAGE_SIZE / 4); for (index = 0; index < max_index; index++) { unsigned long *kaddr; @@ -2491,7 +2491,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) if (IS_ERR(page)) { ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); - nr_free -= PAGE_CACHE_SIZE * 8; + nr_free -= PAGE_SIZE * 8; continue; } kaddr = kmap_atomic(page); @@ -2503,9 +2503,9 @@ static s64 get_nr_free_clusters(ntfs_volume *vol) * ntfs_readpage(). */ nr_free -= bitmap_weight(kaddr, - PAGE_CACHE_SIZE * BITS_PER_BYTE); + PAGE_SIZE * BITS_PER_BYTE); kunmap_atomic(kaddr); - page_cache_release(page); + put_page(page); } ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); /* @@ -2547,9 +2547,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, pgoff_t index; ntfs_debug("Entering."); - /* Use multiples of 4 bytes, thus max_size is PAGE_CACHE_SIZE / 4. */ + /* Use multiples of 4 bytes, thus max_size is PAGE_SIZE / 4. */ ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = " - "0x%lx.", max_index, PAGE_CACHE_SIZE / 4); + "0x%lx.", max_index, PAGE_SIZE / 4); for (index = 0; index < max_index; index++) { unsigned long *kaddr; @@ -2562,7 +2562,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, if (IS_ERR(page)) { ntfs_debug("read_mapping_page() error. Skipping " "page (index 0x%lx).", index); - nr_free -= PAGE_CACHE_SIZE * 8; + nr_free -= PAGE_SIZE * 8; continue; } kaddr = kmap_atomic(page); @@ -2574,9 +2574,9 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol, * ntfs_readpage(). */ nr_free -= bitmap_weight(kaddr, - PAGE_CACHE_SIZE * BITS_PER_BYTE); + PAGE_SIZE * BITS_PER_BYTE); kunmap_atomic(kaddr); - page_cache_release(page); + put_page(page); } ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", index - 1); @@ -2618,17 +2618,17 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs) /* Type of filesystem. */ sfs->f_type = NTFS_SB_MAGIC; /* Optimal transfer block size. */ - sfs->f_bsize = PAGE_CACHE_SIZE; + sfs->f_bsize = PAGE_SIZE; /* * Total data blocks in filesystem in units of f_bsize and since * inodes are also stored in data blocs ($MFT is a file) this is just * the total clusters. */ sfs->f_blocks = vol->nr_clusters << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; /* Free data blocks in filesystem in units of f_bsize. */ size = get_nr_free_clusters(vol) << vol->cluster_size_bits >> - PAGE_CACHE_SHIFT; + PAGE_SHIFT; if (size < 0LL) size = 0LL; /* Free blocks avail to non-superuser, same as above on NTFS. */ @@ -2639,11 +2639,11 @@ static int ntfs_statfs(struct dentry *dentry, struct kstatfs *sfs) size = i_size_read(vol->mft_ino) >> vol->mft_record_size_bits; /* * Convert the maximum number of set bits into bytes rounded up, then - * convert into multiples of PAGE_CACHE_SIZE, rounding up so that if we + * convert into multiples of PAGE_SIZE, rounding up so that if we * have one full and one partial page max_index = 2. */ max_index = ((((mft_ni->initialized_size >> vol->mft_record_size_bits) - + 7) >> 3) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + + 7) >> 3) + PAGE_SIZE - 1) >> PAGE_SHIFT; read_unlock_irqrestore(&mft_ni->size_lock, flags); /* Number of inodes in filesystem (at this point in time). */ sfs->f_files = size; @@ -2765,15 +2765,15 @@ static int ntfs_fill_super(struct super_block *sb, void *opt, const int silent) if (!parse_options(vol, (char*)opt)) goto err_out_now; - /* We support sector sizes up to the PAGE_CACHE_SIZE. */ - if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { + /* We support sector sizes up to the PAGE_SIZE. */ + if (bdev_logical_block_size(sb->s_bdev) > PAGE_SIZE) { if (!silent) ntfs_error(sb, "Device has unsupported sector size " "(%i). The maximum supported sector " "size on this architecture is %lu " "bytes.", bdev_logical_block_size(sb->s_bdev), - PAGE_CACHE_SIZE); + PAGE_SIZE); goto err_out_now; } /* diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 70907d638b60..e361d1a0ca09 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6671,7 +6671,7 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, { int i; struct page *page; - unsigned int from, to = PAGE_CACHE_SIZE; + unsigned int from, to = PAGE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); @@ -6679,21 +6679,21 @@ static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, if (numpages == 0) goto out; - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; for(i = 0; i < numpages; i++) { page = pages[i]; - from = start & (PAGE_CACHE_SIZE - 1); - if ((end >> PAGE_CACHE_SHIFT) == page->index) - to = end & (PAGE_CACHE_SIZE - 1); + from = start & (PAGE_SIZE - 1); + if ((end >> PAGE_SHIFT) == page->index) + to = end & (PAGE_SIZE - 1); - BUG_ON(from > PAGE_CACHE_SIZE); - BUG_ON(to > PAGE_CACHE_SIZE); + BUG_ON(from > PAGE_SIZE); + BUG_ON(to > PAGE_SIZE); ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, &phys); - start = (page->index + 1) << PAGE_CACHE_SHIFT; + start = (page->index + 1) << PAGE_SHIFT; } out: if (pages) @@ -6712,7 +6712,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, numpages = 0; last_page_bytes = PAGE_ALIGN(end); - index = start >> PAGE_CACHE_SHIFT; + index = start >> PAGE_SHIFT; do { pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); if (!pages[numpages]) { @@ -6723,7 +6723,7 @@ int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, numpages++; index++; - } while (index < (last_page_bytes >> PAGE_CACHE_SHIFT)); + } while (index < (last_page_bytes >> PAGE_SHIFT)); out: if (ret != 0) { @@ -6950,8 +6950,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * to do that now. */ if (!ocfs2_sparse_alloc(osb) && - PAGE_CACHE_SIZE < osb->s_clustersize) - end = PAGE_CACHE_SIZE; + PAGE_SIZE < osb->s_clustersize) + end = PAGE_SIZE; ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages); if (ret) { @@ -6971,8 +6971,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, goto out_unlock; } - page_end = PAGE_CACHE_SIZE; - if (PAGE_CACHE_SIZE > osb->s_clustersize) + page_end = PAGE_SIZE; + if (PAGE_SIZE > osb->s_clustersize) page_end = osb->s_clustersize; for (i = 0; i < num_pages; i++) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1581240a7ca0..ad1577348a92 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -234,7 +234,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, size = i_size_read(inode); - if (size > PAGE_CACHE_SIZE || + if (size > PAGE_SIZE || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", @@ -247,7 +247,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, if (size) memcpy(kaddr, di->id2.i_data.id_data, size); /* Clear the remaining part of the page */ - memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); + memset(kaddr + size, 0, PAGE_SIZE - size); flush_dcache_page(page); kunmap_atomic(kaddr); @@ -282,7 +282,7 @@ static int ocfs2_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct ocfs2_inode_info *oi = OCFS2_I(inode); - loff_t start = (loff_t)page->index << PAGE_CACHE_SHIFT; + loff_t start = (loff_t)page->index << PAGE_SHIFT; int ret, unlock = 1; trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, @@ -385,7 +385,7 @@ static int ocfs2_readpages(struct file *filp, struct address_space *mapping, * drop out in that case as it's not worth handling here. */ last = list_entry(pages->prev, struct page, lru); - start = (loff_t)last->index << PAGE_CACHE_SHIFT; + start = (loff_t)last->index << PAGE_SHIFT; if (start >= i_size_read(inode)) goto out_unlock; @@ -511,12 +511,12 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, unsigned int *start, unsigned int *end) { - unsigned int cluster_start = 0, cluster_end = PAGE_CACHE_SIZE; + unsigned int cluster_start = 0, cluster_end = PAGE_SIZE; - if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) { + if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) { unsigned int cpp; - cpp = 1 << (PAGE_CACHE_SHIFT - osb->s_clustersize_bits); + cpp = 1 << (PAGE_SHIFT - osb->s_clustersize_bits); cluster_start = cpos % cpp; cluster_start = cluster_start << osb->s_clustersize_bits; @@ -684,13 +684,13 @@ next_bh: return ret; } -#if (PAGE_CACHE_SIZE >= OCFS2_MAX_CLUSTERSIZE) +#if (PAGE_SIZE >= OCFS2_MAX_CLUSTERSIZE) #define OCFS2_MAX_CTXT_PAGES 1 #else -#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_CACHE_SIZE) +#define OCFS2_MAX_CTXT_PAGES (OCFS2_MAX_CLUSTERSIZE / PAGE_SIZE) #endif -#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_CACHE_SIZE / OCFS2_MIN_CLUSTERSIZE) +#define OCFS2_MAX_CLUSTERS_PER_PAGE (PAGE_SIZE / OCFS2_MIN_CLUSTERSIZE) struct ocfs2_unwritten_extent { struct list_head ue_node; @@ -785,7 +785,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) if (pages[i]) { unlock_page(pages[i]); mark_page_accessed(pages[i]); - page_cache_release(pages[i]); + put_page(pages[i]); } } } @@ -808,7 +808,7 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) } } mark_page_accessed(wc->w_target_page); - page_cache_release(wc->w_target_page); + put_page(wc->w_target_page); } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); } @@ -857,7 +857,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, wc->w_di_bh = di_bh; wc->w_type = type; - if (unlikely(PAGE_CACHE_SHIFT > osb->s_clustersize_bits)) + if (unlikely(PAGE_SHIFT > osb->s_clustersize_bits)) wc->w_large_pages = 1; else wc->w_large_pages = 0; @@ -920,7 +920,7 @@ static void ocfs2_write_failure(struct inode *inode, loff_t user_pos, unsigned user_len) { int i; - unsigned from = user_pos & (PAGE_CACHE_SIZE - 1), + unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; struct page *tmppage; @@ -960,7 +960,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, (page_offset(page) <= user_pos)); if (page == wc->w_target_page) { - map_from = user_pos & (PAGE_CACHE_SIZE - 1); + map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) @@ -1034,7 +1034,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, struct inode *inode = mapping->host; loff_t last_byte; - target_index = user_pos >> PAGE_CACHE_SHIFT; + target_index = user_pos >> PAGE_SHIFT; /* * Figure out how many pages we'll be manipulating here. For @@ -1053,14 +1053,14 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, */ last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); - end_index = ((last_byte - 1) >> PAGE_CACHE_SHIFT) + 1; + end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; if ((start + wc->w_num_pages) > end_index) wc->w_num_pages = end_index - start; } else { wc->w_num_pages = 1; start = target_index; } - end_index = (user_pos + user_len - 1) >> PAGE_CACHE_SHIFT; + end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; for(i = 0; i < wc->w_num_pages; i++) { index = start + i; @@ -1082,7 +1082,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, goto out; } - page_cache_get(mmap_page); + get_page(mmap_page); wc->w_pages[i] = mmap_page; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && @@ -1272,7 +1272,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, { struct ocfs2_write_cluster_desc *desc; - wc->w_target_from = pos & (PAGE_CACHE_SIZE - 1); + wc->w_target_from = pos & (PAGE_SIZE - 1); wc->w_target_to = wc->w_target_from + len; if (alloc == 0) @@ -1309,7 +1309,7 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb, &wc->w_target_to); } else { wc->w_target_from = 0; - wc->w_target_to = PAGE_CACHE_SIZE; + wc->w_target_to = PAGE_SIZE; } } @@ -1981,7 +1981,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, struct page *page, void *fsdata) { int i, ret; - unsigned from, to, start = pos & (PAGE_CACHE_SIZE - 1); + unsigned from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; @@ -2027,8 +2027,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping, from = wc->w_target_from; to = wc->w_target_to; - BUG_ON(from > PAGE_CACHE_SIZE || - to > PAGE_CACHE_SIZE || + BUG_ON(from > PAGE_SIZE || + to > PAGE_SIZE || to < from); } else { /* @@ -2037,7 +2037,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, * to flush their entire range. */ from = 0; - to = PAGE_CACHE_SIZE; + to = PAGE_SIZE; } if (page_has_buffers(tmppage)) { diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index bd15929b5f92..1934abb6b680 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -417,13 +417,13 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, bio->bi_private = wc; bio->bi_end_io = o2hb_bio_end_io; - vec_start = (cs << bits) % PAGE_CACHE_SIZE; + vec_start = (cs << bits) % PAGE_SIZE; while(cs < max_slots) { current_page = cs / spp; page = reg->hr_slot_data[current_page]; - vec_len = min(PAGE_CACHE_SIZE - vec_start, - (max_slots-cs) * (PAGE_CACHE_SIZE/spp) ); + vec_len = min(PAGE_SIZE - vec_start, + (max_slots-cs) * (PAGE_SIZE/spp) ); mlog(ML_HB_BIO, "page %d, vec_len = %u, vec_start = %u\n", current_page, vec_len, vec_start); @@ -431,7 +431,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg, len = bio_add_page(bio, page, vec_len, vec_start); if (len != vec_len) break; - cs += vec_len / (PAGE_CACHE_SIZE/spp); + cs += vec_len / (PAGE_SIZE/spp); vec_start = 0; } @@ -1576,7 +1576,7 @@ static ssize_t o2hb_region_dev_show(struct config_item *item, char *page) static void o2hb_init_region_params(struct o2hb_region *reg) { - reg->hr_slots_per_page = PAGE_CACHE_SIZE >> reg->hr_block_bits; + reg->hr_slots_per_page = PAGE_SIZE >> reg->hr_block_bits; reg->hr_timeout_ms = O2HB_REGION_TIMEOUT_MS; mlog(ML_HEARTBEAT, "hr_start_block = %llu, hr_blocks = %u\n", diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 03768bb3aab1..47b3b2d4e775 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -571,8 +571,8 @@ static int dlmfs_fill_super(struct super_block * sb, int silent) { sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = DLMFS_MAGIC; sb->s_op = &dlmfs_ops; sb->s_root = d_make_root(dlmfs_get_root_inode(sb)); diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index c18ab45f8d21..5308841756be 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -770,14 +770,14 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, { struct address_space *mapping = inode->i_mapping; struct page *page; - unsigned long index = abs_from >> PAGE_CACHE_SHIFT; + unsigned long index = abs_from >> PAGE_SHIFT; handle_t *handle; int ret = 0; unsigned zero_from, zero_to, block_start, block_end; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; BUG_ON(abs_from >= abs_to); - BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT)); + BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT)); BUG_ON(abs_from & (inode->i_blkbits - 1)); handle = ocfs2_zero_start_ordered_transaction(inode, di_bh); @@ -794,10 +794,10 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, } /* Get the offsets within the page that we want to zero */ - zero_from = abs_from & (PAGE_CACHE_SIZE - 1); - zero_to = abs_to & (PAGE_CACHE_SIZE - 1); + zero_from = abs_from & (PAGE_SIZE - 1); + zero_to = abs_to & (PAGE_SIZE - 1); if (!zero_to) - zero_to = PAGE_CACHE_SIZE; + zero_to = PAGE_SIZE; trace_ocfs2_write_zero_page( (unsigned long long)OCFS2_I(inode)->ip_blkno, @@ -851,7 +851,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, out_unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out_commit_trans: if (handle) ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle); @@ -959,7 +959,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start, BUG_ON(range_start >= range_end); while (zero_pos < range_end) { - next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE; + next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE; if (next_pos > range_end) next_pos = range_end; rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh); diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 9ea081f4e6e4..71545ad4628c 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -65,13 +65,13 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; loff_t pos = page_offset(page); - unsigned int len = PAGE_CACHE_SIZE; + unsigned int len = PAGE_SIZE; pgoff_t last_index; struct page *locked_page = NULL; void *fsdata; loff_t size = i_size_read(inode); - last_index = (size - 1) >> PAGE_CACHE_SHIFT; + last_index = (size - 1) >> PAGE_SHIFT; /* * There are cases that lead to the page no longer bebongs to the @@ -102,7 +102,7 @@ static int __ocfs2_page_mkwrite(struct file *file, struct buffer_head *di_bh, * because the "write" would invalidate their data. */ if (page->index == last_index) - len = ((size - 1) & ~PAGE_CACHE_MASK) + 1; + len = ((size - 1) & ~PAGE_MASK) + 1; ret = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, &locked_page, &fsdata, di_bh, page); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 6cf6538a0651..e63af7ddfe68 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -822,10 +822,10 @@ static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb, u32 clusters = pg_index; unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; - if (unlikely(PAGE_CACHE_SHIFT > cbits)) - clusters = pg_index << (PAGE_CACHE_SHIFT - cbits); - else if (PAGE_CACHE_SHIFT < cbits) - clusters = pg_index >> (cbits - PAGE_CACHE_SHIFT); + if (unlikely(PAGE_SHIFT > cbits)) + clusters = pg_index << (PAGE_SHIFT - cbits); + else if (PAGE_SHIFT < cbits) + clusters = pg_index >> (cbits - PAGE_SHIFT); return clusters; } @@ -839,10 +839,10 @@ static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb, unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; pgoff_t index = clusters; - if (PAGE_CACHE_SHIFT > cbits) { - index = (pgoff_t)clusters >> (PAGE_CACHE_SHIFT - cbits); - } else if (PAGE_CACHE_SHIFT < cbits) { - index = (pgoff_t)clusters << (cbits - PAGE_CACHE_SHIFT); + if (PAGE_SHIFT > cbits) { + index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits); + } else if (PAGE_SHIFT < cbits) { + index = (pgoff_t)clusters << (cbits - PAGE_SHIFT); } return index; @@ -853,8 +853,8 @@ static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb) unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits; unsigned int pages_per_cluster = 1; - if (PAGE_CACHE_SHIFT < cbits) - pages_per_cluster = 1 << (cbits - PAGE_CACHE_SHIFT); + if (PAGE_SHIFT < cbits) + pages_per_cluster = 1 << (cbits - PAGE_SHIFT); return pages_per_cluster; } diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 3892f3c079ca..ab6a6cdcf91c 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -867,6 +867,10 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid) int status = 0; trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type); + if (!sb_has_quota_loaded(sb, type)) { + status = -ESRCH; + goto out; + } status = ocfs2_lock_global_qf(info, 0); if (status < 0) goto out; @@ -878,8 +882,11 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid) out_global: ocfs2_unlock_global_qf(info, 0); out: - /* Avoid logging ENOENT since it just means there isn't next ID */ - if (status && status != -ENOENT) + /* + * Avoid logging ENOENT since it just means there isn't next ID and + * ESRCH which means quota isn't enabled for the filesystem. + */ + if (status && status != -ENOENT && status != -ESRCH) mlog_errno(status); return status; } diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 3eff031aaf26..744d5d90c363 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2937,16 +2937,16 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, end = i_size_read(inode); while (offset < end) { - page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + page_index = offset >> PAGE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) map_end = end; /* from, to is the offset within the page. */ - from = offset & (PAGE_CACHE_SIZE - 1); - to = PAGE_CACHE_SIZE; - if (map_end & (PAGE_CACHE_SIZE - 1)) - to = map_end & (PAGE_CACHE_SIZE - 1); + from = offset & (PAGE_SIZE - 1); + to = PAGE_SIZE; + if (map_end & (PAGE_SIZE - 1)) + to = map_end & (PAGE_SIZE - 1); page = find_or_create_page(mapping, page_index, GFP_NOFS); if (!page) { @@ -2956,10 +2956,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, } /* - * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page + * In case PAGE_SIZE <= CLUSTER_SIZE, This page * can't be dirtied before we CoW it out. */ - if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) + if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) BUG_ON(PageDirty(page)); if (!PageUptodate(page)) { @@ -2987,7 +2987,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, mark_page_accessed(page); unlock: unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; offset = map_end; if (ret) @@ -3165,8 +3165,8 @@ int ocfs2_cow_sync_writeback(struct super_block *sb, } while (offset < end) { - page_index = offset >> PAGE_CACHE_SHIFT; - map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT; + page_index = offset >> PAGE_SHIFT; + map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) map_end = end; @@ -3182,7 +3182,7 @@ int ocfs2_cow_sync_writeback(struct super_block *sb, mark_page_accessed(page); unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; offset = map_end; if (ret) diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 7db631e1c8b0..d7cae3327de5 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -605,8 +605,8 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits, /* * We might be limited by page cache size. */ - if (bytes > PAGE_CACHE_SIZE) { - bytes = PAGE_CACHE_SIZE; + if (bytes > PAGE_SIZE) { + bytes = PAGE_SIZE; trim = 1; /* * Shift by 31 here so that we don't get larger than diff --git a/fs/orangefs/dir.c b/fs/orangefs/dir.c index ba7dec40771e..324f0af40d7b 100644 --- a/fs/orangefs/dir.c +++ b/fs/orangefs/dir.c @@ -153,7 +153,6 @@ static int orangefs_readdir(struct file *file, struct dir_context *ctx) struct dentry *dentry = file->f_path.dentry; struct orangefs_kernel_op_s *new_op = NULL; struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(dentry->d_inode); - int buffer_full = 0; struct orangefs_readdir_response_s readdir_response; void *dents_buf; int i = 0; @@ -350,8 +349,7 @@ get_new_buffer_index: /* * Did we hit the end of the directory? */ - if (readdir_response.token == ORANGEFS_READDIR_END && - !buffer_full) { + if (readdir_response.token == ORANGEFS_READDIR_END) { gossip_debug(GOSSIP_DIR_DEBUG, "End of dir detected; setting ctx->pos to ORANGEFS_READDIR_END.\n"); ctx->pos = ORANGEFS_READDIR_END; diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c index 2382e267b49e..85640e955cde 100644 --- a/fs/orangefs/inode.c +++ b/fs/orangefs/inode.c @@ -18,8 +18,8 @@ static int read_one_page(struct page *page) int max_block; ssize_t bytes_read = 0; struct inode *inode = page->mapping->host; - const __u32 blocksize = PAGE_CACHE_SIZE; /* inode->i_blksize */ - const __u32 blockbits = PAGE_CACHE_SHIFT; /* inode->i_blkbits */ + const __u32 blocksize = PAGE_SIZE; /* inode->i_blksize */ + const __u32 blockbits = PAGE_SHIFT; /* inode->i_blkbits */ struct iov_iter to; struct bio_vec bv = {.bv_page = page, .bv_len = PAGE_SIZE}; @@ -86,7 +86,7 @@ static int orangefs_readpages(struct file *file, "failure adding page to cache, read_one_page returned: %d\n", ret); } else { - page_cache_release(page); + put_page(page); } } BUG_ON(!list_empty(pages)); @@ -204,22 +204,8 @@ static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr) if (ret != 0) return ret; - /* - * Only change the c/mtime if we are changing the size or we are - * explicitly asked to change it. This handles the semantic difference - * between truncate() and ftruncate() as implemented in the VFS. - * - * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a - * special case where we need to update the times despite not having - * these flags set. For all other operations the VFS set these flags - * explicitly if it wants a timestamp update. - */ - if (orig_size != i_size_read(inode) && - !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) { - iattr->ia_ctime = iattr->ia_mtime = - current_fs_time(inode->i_sb); + if (orig_size != i_size_read(inode)) iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME; - } return ret; } @@ -328,7 +314,7 @@ static int orangefs_init_iops(struct inode *inode) case S_IFREG: inode->i_op = &orangefs_file_inode_operations; inode->i_fop = &orangefs_file_operations; - inode->i_blkbits = PAGE_CACHE_SHIFT; + inode->i_blkbits = PAGE_SHIFT; break; case S_IFLNK: inode->i_op = &orangefs_symlink_inode_operations; @@ -456,7 +442,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir, inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - inode->i_size = PAGE_CACHE_SIZE; + inode->i_size = PAGE_SIZE; inode->i_rdev = dev; error = insert_inode_locked4(inode, hash, orangefs_test_inode, ref); diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c index 1f8acc9f9a88..75375e90a63f 100644 --- a/fs/orangefs/orangefs-bufmap.c +++ b/fs/orangefs/orangefs-bufmap.c @@ -170,7 +170,7 @@ orangefs_bufmap_unmap(struct orangefs_bufmap *bufmap) int i; for (i = 0; i < bufmap->page_count; i++) - page_cache_release(bufmap->page_array[i]); + put_page(bufmap->page_array[i]); } static void @@ -299,7 +299,7 @@ orangefs_bufmap_map(struct orangefs_bufmap *bufmap, for (i = 0; i < ret; i++) { SetPageError(bufmap->page_array[i]); - page_cache_release(bufmap->page_array[i]); + put_page(bufmap->page_array[i]); } return -ENOMEM; } diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index 19670b8b4053..1714a737d556 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c @@ -126,8 +126,7 @@ out: void orangefs_debugfs_cleanup(void) { - if (debug_dir) - debugfs_remove_recursive(debug_dir); + debugfs_remove_recursive(debug_dir); } /* open ORANGEFS_KMOD_DEBUG_HELP_FILE */ diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c index 40f5163b56aa..2d129b5886ee 100644 --- a/fs/orangefs/orangefs-utils.c +++ b/fs/orangefs/orangefs-utils.c @@ -303,7 +303,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size) } break; case S_IFDIR: - inode->i_size = PAGE_CACHE_SIZE; + inode->i_size = PAGE_SIZE; orangefs_inode->blksize = (1 << inode->i_blkbits); spin_lock(&inode->i_lock); inode_set_bytes(inode, inode->i_size); @@ -315,9 +315,13 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size) inode->i_size = (loff_t)strlen(new_op-> downcall.resp.getattr.link_target); orangefs_inode->blksize = (1 << inode->i_blkbits); - strlcpy(orangefs_inode->link_target, + ret = strscpy(orangefs_inode->link_target, new_op->downcall.resp.getattr.link_target, ORANGEFS_NAME_MAX); + if (ret == -E2BIG) { + ret = -EIO; + goto out; + } inode->i_link = orangefs_inode->link_target; } break; diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h index 50578a28bd9e..1efc6f8a5224 100644 --- a/fs/orangefs/protocol.h +++ b/fs/orangefs/protocol.h @@ -1,3 +1,4 @@ +#include <linux/kernel.h> #include <linux/types.h> #include <linux/spinlock_types.h> #include <linux/slab.h> @@ -74,8 +75,8 @@ static inline void ORANGEFS_khandle_to(const struct orangefs_khandle *kh, void *p, int size) { - memset(p, 0, size); memcpy(p, kh->u, 16); + memset(p + 16, 0, size - 16); } @@ -427,26 +428,28 @@ struct ORANGEFS_dev_map_desc { /* gossip.h *****************************************************************/ #ifdef GOSSIP_DISABLE_DEBUG -#define gossip_debug(mask, format, f...) do {} while (0) +#define gossip_debug(mask, fmt, ...) \ +do { \ + if (0) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ +} while (0) #else extern __u64 gossip_debug_mask; extern struct client_debug_mask client_debug_mask; /* try to avoid function call overhead by checking masks in macro */ -#define gossip_debug(mask, format, f...) \ -do { \ - if (gossip_debug_mask & mask) \ - printk(format, ##f); \ +#define gossip_debug(mask, fmt, ...) \ +do { \ + if (gossip_debug_mask & (mask)) \ + printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ } while (0) #endif /* GOSSIP_DISABLE_DEBUG */ /* do file and line number printouts w/ the GNU preprocessor */ -#define gossip_ldebug(mask, format, f...) \ - gossip_debug(mask, "%s: " format, __func__, ##f) - -#define gossip_err printk -#define gossip_lerr(format, f...) \ - gossip_err("%s line %d: " format, \ - __FILE__, \ - __LINE__, \ - ##f) +#define gossip_ldebug(mask, fmt, ...) \ + gossip_debug(mask, "%s: " fmt, __func__, ##__VA_ARGS__) + +#define gossip_err pr_err +#define gossip_lerr(fmt, ...) \ + gossip_err("%s line %d: " fmt, \ + __FILE__, __LINE__, ##__VA_ARGS__) diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c index ef5da7538cd5..63a6280d8c3a 100644 --- a/fs/orangefs/xattr.c +++ b/fs/orangefs/xattr.c @@ -73,10 +73,6 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix, "%s: prefix %s name %s, buffer_size %zd\n", __func__, prefix, name, size); - if (name == NULL || (size > 0 && buffer == NULL)) { - gossip_err("orangefs_inode_getxattr: bogus NULL pointers\n"); - return -EINVAL; - } if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) { gossip_err("Invalid key length (%d)\n", (int)(strlen(name) + strlen(prefix))); @@ -146,8 +142,8 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix, goto out_release_op; } - memset(buffer, 0, size); memcpy(buffer, new_op->downcall.resp.getxattr.val, length); + memset(buffer + length, 0, size - length); gossip_debug(GOSSIP_XATTR_DEBUG, "orangefs_inode_getxattr: inode %pU " "key %s key_sz %d, val_len %d\n", @@ -239,8 +235,7 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix, "%s: prefix %s, name %s, buffer_size %zd\n", __func__, prefix, name, size); - if (size < 0 || - size >= ORANGEFS_MAX_XATTR_VALUELEN || + if (size >= ORANGEFS_MAX_XATTR_VALUELEN || flags < 0) { gossip_err("orangefs_inode_setxattr: bogus values of size(%d), flags(%d)\n", (int)size, @@ -248,12 +243,6 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix, return -EINVAL; } - if (name == NULL || - (size > 0 && value == NULL)) { - gossip_err("orangefs_inode_setxattr: bogus NULL pointers!\n"); - return -EINVAL; - } - internal_flag = convert_to_internal_xattr_flags(flags); if (prefix) { @@ -353,10 +342,6 @@ ssize_t orangefs_listxattr(struct dentry *dentry, char *buffer, size_t size) gossip_err("%s: bogus NULL pointers\n", __func__); return -EINVAL; } - if (size < 0) { - gossip_err("Invalid size (%d)\n", (int)size); - return -EINVAL; - } down_read(&orangefs_inode->xattr_sem); new_op = op_alloc(ORANGEFS_VFS_OP_LISTXATTR); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index ef64984c9bbc..5d972e6cd3fe 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -295,6 +295,37 @@ static void ovl_dentry_release(struct dentry *dentry) } } +static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode) +{ + struct dentry *real; + + if (d_is_dir(dentry)) { + if (!inode || inode == d_inode(dentry)) + return dentry; + goto bug; + } + + real = ovl_dentry_upper(dentry); + if (real && (!inode || inode == d_inode(real))) + return real; + + real = ovl_dentry_lower(dentry); + if (!real) + goto bug; + + if (!inode || inode == d_inode(real)) + return real; + + /* Handle recursion */ + if (real->d_flags & DCACHE_OP_REAL) + return real->d_op->d_real(real, inode); + +bug: + WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry, + inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0); + return dentry; +} + static int ovl_dentry_revalidate(struct dentry *dentry, unsigned int flags) { struct ovl_entry *oe = dentry->d_fsdata; @@ -339,11 +370,13 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) static const struct dentry_operations ovl_dentry_operations = { .d_release = ovl_dentry_release, .d_select_inode = ovl_d_select_inode, + .d_real = ovl_d_real, }; static const struct dentry_operations ovl_reval_dentry_operations = { .d_release = ovl_dentry_release, .d_select_inode = ovl_d_select_inode, + .d_real = ovl_d_real, .d_revalidate = ovl_dentry_revalidate, .d_weak_revalidate = ovl_dentry_weak_revalidate, }; diff --git a/fs/pipe.c b/fs/pipe.c index ab8dad3ccb6a..0d3f5165cb0b 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -134,7 +134,7 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe, if (page_count(page) == 1 && !pipe->tmp_page) pipe->tmp_page = page; else - page_cache_release(page); + put_page(page); } /** @@ -180,7 +180,7 @@ EXPORT_SYMBOL(generic_pipe_buf_steal); */ void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - page_cache_get(buf->page); + get_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_get); @@ -211,7 +211,7 @@ EXPORT_SYMBOL(generic_pipe_buf_confirm); void generic_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - page_cache_release(buf->page); + put_page(buf->page); } EXPORT_SYMBOL(generic_pipe_buf_release); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9df431642042..229cb546bee0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -553,7 +553,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, if (radix_tree_exceptional_entry(page)) mss->swap += PAGE_SIZE; else - page_cache_release(page); + put_page(page); return; } diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 55bb57e6a30d..8afe10cf7df8 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -279,12 +279,12 @@ static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf) if (!page) return VM_FAULT_OOM; if (!PageUptodate(page)) { - offset = (loff_t) index << PAGE_CACHE_SHIFT; + offset = (loff_t) index << PAGE_SHIFT; buf = __va((page_to_pfn(page) << PAGE_SHIFT)); rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0); if (rc < 0) { unlock_page(page); - page_cache_release(page); + put_page(page); return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS; } SetPageUptodate(page); diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c index dc645b66cd79..45d6110744cb 100644 --- a/fs/pstore/inode.c +++ b/fs/pstore/inode.c @@ -420,8 +420,8 @@ static int pstore_fill_super(struct super_block *sb, void *data, int silent) pstore_sb = sb; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = PSTOREFS_MAGIC; sb->s_op = &pstore_ops; sb->s_time_gran = 1; diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c index e1f37278cf97..144ceda4948e 100644 --- a/fs/qnx6/dir.c +++ b/fs/qnx6/dir.c @@ -35,9 +35,9 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n) static unsigned last_entry(struct inode *inode, unsigned long page_nr) { unsigned long last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte / QNX6_DIR_ENTRY_SIZE; } @@ -47,9 +47,9 @@ static struct qnx6_long_filename *qnx6_longname(struct super_block *sb, { struct qnx6_sb_info *sbi = QNX6_SB(sb); u32 s = fs32_to_cpu(sbi, de->de_long_inode); /* in block units */ - u32 n = s >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits); /* in pages */ + u32 n = s >> (PAGE_SHIFT - sb->s_blocksize_bits); /* in pages */ /* within page */ - u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_CACHE_MASK; + u32 offs = (s << sb->s_blocksize_bits) & ~PAGE_MASK; struct address_space *mapping = sbi->longfile->i_mapping; struct page *page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) @@ -115,8 +115,8 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) struct qnx6_sb_info *sbi = QNX6_SB(s); loff_t pos = ctx->pos & ~(QNX6_DIR_ENTRY_SIZE - 1); unsigned long npages = dir_pages(inode); - unsigned long n = pos >> PAGE_CACHE_SHIFT; - unsigned start = (pos & ~PAGE_CACHE_MASK) / QNX6_DIR_ENTRY_SIZE; + unsigned long n = pos >> PAGE_SHIFT; + unsigned start = (pos & ~PAGE_MASK) / QNX6_DIR_ENTRY_SIZE; bool done = false; ctx->pos = pos; @@ -131,7 +131,7 @@ static int qnx6_readdir(struct file *file, struct dir_context *ctx) if (IS_ERR(page)) { pr_err("%s(): read failed\n", __func__); - ctx->pos = (n + 1) << PAGE_CACHE_SHIFT; + ctx->pos = (n + 1) << PAGE_SHIFT; return PTR_ERR(page); } de = ((struct qnx6_dir_entry *)page_address(page)) + start; diff --git a/fs/qnx6/inode.c b/fs/qnx6/inode.c index 47bb1de07155..1192422a1c56 100644 --- a/fs/qnx6/inode.c +++ b/fs/qnx6/inode.c @@ -542,8 +542,8 @@ struct inode *qnx6_iget(struct super_block *sb, unsigned ino) iget_failed(inode); return ERR_PTR(-EIO); } - n = (ino - 1) >> (PAGE_CACHE_SHIFT - QNX6_INODE_SIZE_BITS); - offs = (ino - 1) & (~PAGE_CACHE_MASK >> QNX6_INODE_SIZE_BITS); + n = (ino - 1) >> (PAGE_SHIFT - QNX6_INODE_SIZE_BITS); + offs = (ino - 1) & (~PAGE_MASK >> QNX6_INODE_SIZE_BITS); mapping = sbi->inodes->i_mapping; page = read_mapping_page(mapping, n, NULL); if (IS_ERR(page)) { diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h index d3fb2b698800..f23b5c4a66ad 100644 --- a/fs/qnx6/qnx6.h +++ b/fs/qnx6/qnx6.h @@ -128,7 +128,7 @@ extern struct qnx6_super_block *qnx6_mmi_fill_super(struct super_block *s, static inline void qnx6_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } extern unsigned qnx6_find_entry(int len, struct inode *dir, const char *name, diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index ba827daea5a0..ff21980d0119 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -2047,11 +2047,20 @@ int dquot_get_next_id(struct super_block *sb, struct kqid *qid) struct quota_info *dqopt = sb_dqopt(sb); int err; - if (!dqopt->ops[qid->type]->get_next_id) - return -ENOSYS; + mutex_lock(&dqopt->dqonoff_mutex); + if (!sb_has_quota_active(sb, qid->type)) { + err = -ESRCH; + goto out; + } + if (!dqopt->ops[qid->type]->get_next_id) { + err = -ENOSYS; + goto out; + } mutex_lock(&dqopt->dqio_mutex); err = dqopt->ops[qid->type]->get_next_id(sb, qid); mutex_unlock(&dqopt->dqio_mutex); +out: + mutex_unlock(&dqopt->dqonoff_mutex); return err; } diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 38981b037524..1ab6e6c2e60e 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -223,8 +223,8 @@ int ramfs_fill_super(struct super_block *sb, void *data, int silent) return err; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = RAMFS_MAGIC; sb->s_op = &ramfs_ops; sb->s_time_gran = 1; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 9424a4ba93a9..389773711de4 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -180,11 +180,11 @@ int reiserfs_commit_page(struct inode *inode, struct page *page, int partial = 0; unsigned blocksize; struct buffer_head *bh, *head; - unsigned long i_size_index = inode->i_size >> PAGE_CACHE_SHIFT; + unsigned long i_size_index = inode->i_size >> PAGE_SHIFT; int new; int logit = reiserfs_file_data_log(inode); struct super_block *s = inode->i_sb; - int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; + int bh_per_page = PAGE_SIZE / s->s_blocksize; struct reiserfs_transaction_handle th; int ret = 0; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index ae9e5b308cf9..d5c2e9c865de 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -386,7 +386,7 @@ static int _get_block_create_0(struct inode *inode, sector_t block, goto finished; } /* read file tail into part of page */ - offset = (cpu_key_k_offset(&key) - 1) & (PAGE_CACHE_SIZE - 1); + offset = (cpu_key_k_offset(&key) - 1) & (PAGE_SIZE - 1); copy_item_head(&tmp_ih, ih); /* @@ -587,10 +587,10 @@ static int convert_tail_for_hole(struct inode *inode, return -EIO; /* always try to read until the end of the block */ - tail_start = tail_offset & (PAGE_CACHE_SIZE - 1); + tail_start = tail_offset & (PAGE_SIZE - 1); tail_end = (tail_start | (bh_result->b_size - 1)) + 1; - index = tail_offset >> PAGE_CACHE_SHIFT; + index = tail_offset >> PAGE_SHIFT; /* * hole_page can be zero in case of direct_io, we are sure * that we cannot get here if we write with O_DIRECT into tail page @@ -629,7 +629,7 @@ static int convert_tail_for_hole(struct inode *inode, unlock: if (tail_page != hole_page) { unlock_page(tail_page); - page_cache_release(tail_page); + put_page(tail_page); } out: return retval; @@ -2189,11 +2189,11 @@ static int grab_tail_page(struct inode *inode, * we want the page with the last byte in the file, * not the page that will hold the next byte for appending */ - unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; + unsigned long index = (inode->i_size - 1) >> PAGE_SHIFT; unsigned long pos = 0; unsigned long start = 0; unsigned long blocksize = inode->i_sb->s_blocksize; - unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1); + unsigned long offset = (inode->i_size) & (PAGE_SIZE - 1); struct buffer_head *bh; struct buffer_head *head; struct page *page; @@ -2251,7 +2251,7 @@ out: unlock: unlock_page(page); - page_cache_release(page); + put_page(page); return error; } @@ -2265,7 +2265,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps) { struct reiserfs_transaction_handle th; /* we want the offset for the first byte after the end of the file */ - unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + unsigned long offset = inode->i_size & (PAGE_SIZE - 1); unsigned blocksize = inode->i_sb->s_blocksize; unsigned length; struct page *page = NULL; @@ -2345,7 +2345,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps) } } unlock_page(page); - page_cache_release(page); + put_page(page); } reiserfs_write_unlock(inode->i_sb); @@ -2354,7 +2354,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps) out: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } reiserfs_write_unlock(inode->i_sb); @@ -2426,7 +2426,7 @@ research: } else if (is_direct_le_ih(ih)) { char *p; p = page_address(bh_result->b_page); - p += (byte_offset - 1) & (PAGE_CACHE_SIZE - 1); + p += (byte_offset - 1) & (PAGE_SIZE - 1); copy_size = ih_item_len(ih) - pos_in_item; fs_gen = get_generation(inode->i_sb); @@ -2525,7 +2525,7 @@ static int reiserfs_write_full_page(struct page *page, struct writeback_control *wbc) { struct inode *inode = page->mapping->host; - unsigned long end_index = inode->i_size >> PAGE_CACHE_SHIFT; + unsigned long end_index = inode->i_size >> PAGE_SHIFT; int error = 0; unsigned long block; sector_t last_block; @@ -2535,7 +2535,7 @@ static int reiserfs_write_full_page(struct page *page, int checked = PageChecked(page); struct reiserfs_transaction_handle th; struct super_block *s = inode->i_sb; - int bh_per_page = PAGE_CACHE_SIZE / s->s_blocksize; + int bh_per_page = PAGE_SIZE / s->s_blocksize; th.t_trans_id = 0; /* no logging allowed when nonblocking or from PF_MEMALLOC */ @@ -2564,16 +2564,16 @@ static int reiserfs_write_full_page(struct page *page, if (page->index >= end_index) { unsigned last_offset; - last_offset = inode->i_size & (PAGE_CACHE_SIZE - 1); + last_offset = inode->i_size & (PAGE_SIZE - 1); /* no file contents in this page */ if (page->index >= end_index + 1 || !last_offset) { unlock_page(page); return 0; } - zero_user_segment(page, last_offset, PAGE_CACHE_SIZE); + zero_user_segment(page, last_offset, PAGE_SIZE); } bh = head; - block = page->index << (PAGE_CACHE_SHIFT - s->s_blocksize_bits); + block = page->index << (PAGE_SHIFT - s->s_blocksize_bits); last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; /* first map all the buffers, logging any direct items we find */ do { @@ -2774,7 +2774,7 @@ static int reiserfs_write_begin(struct file *file, *fsdata = (void *)(unsigned long)flags; } - index = pos >> PAGE_CACHE_SHIFT; + index = pos >> PAGE_SHIFT; page = grab_cache_page_write_begin(mapping, index, flags); if (!page) return -ENOMEM; @@ -2822,7 +2822,7 @@ static int reiserfs_write_begin(struct file *file, } if (ret) { unlock_page(page); - page_cache_release(page); + put_page(page); /* Truncate allocated blocks */ reiserfs_truncate_failed_write(inode); } @@ -2909,7 +2909,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping, else th = NULL; - start = pos & (PAGE_CACHE_SIZE - 1); + start = pos & (PAGE_SIZE - 1); if (unlikely(copied < len)) { if (!PageUptodate(page)) copied = 0; @@ -2974,7 +2974,7 @@ out: if (locked) reiserfs_write_unlock(inode->i_sb); unlock_page(page); - page_cache_release(page); + put_page(page); if (pos + len > inode->i_size) reiserfs_truncate_failed_write(inode); @@ -2996,7 +2996,7 @@ int reiserfs_commit_write(struct file *f, struct page *page, unsigned from, unsigned to) { struct inode *inode = page->mapping->host; - loff_t pos = ((loff_t) page->index << PAGE_CACHE_SHIFT) + to; + loff_t pos = ((loff_t) page->index << PAGE_SHIFT) + to; int ret = 0; int update_sd = 0; struct reiserfs_transaction_handle *th = NULL; @@ -3181,7 +3181,7 @@ static void reiserfs_invalidatepage(struct page *page, unsigned int offset, struct inode *inode = page->mapping->host; unsigned int curr_off = 0; unsigned int stop = offset + length; - int partial_page = (offset || length < PAGE_CACHE_SIZE); + int partial_page = (offset || length < PAGE_SIZE); int ret = 1; BUG_ON(!PageLocked(page)); diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 036a1fc0a8c3..57045f423893 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -203,7 +203,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) * __reiserfs_write_begin on that page. This will force a * reiserfs_get_block to unpack the tail for us. */ - index = inode->i_size >> PAGE_CACHE_SHIFT; + index = inode->i_size >> PAGE_SHIFT; mapping = inode->i_mapping; page = grab_cache_page(mapping, index); retval = -ENOMEM; @@ -221,7 +221,7 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) out_unlock: unlock_page(page); - page_cache_release(page); + put_page(page); out: inode_unlock(inode); diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 44c2bdced1c8..2ace90e981f0 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -599,18 +599,18 @@ static int journal_list_still_alive(struct super_block *s, * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh. If page->mapping was * null, it tries to free buffers on the page, which should make the - * final page_cache_release drop the page from the lru. + * final put_page drop the page from the lru. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; if (!page->mapping && trylock_page(page)) { - page_cache_get(page); + get_page(page); put_bh(bh); if (!page->mapping) try_to_free_buffers(page); unlock_page(page); - page_cache_release(page); + put_page(page); } else { put_bh(bh); } diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 24cbe013240f..5feacd689241 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -1342,7 +1342,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, */ data = kmap_atomic(un_bh->b_page); - off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); + off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_SIZE - 1)); memcpy(data + off, ih_item_body(PATH_PLAST_BUFFER(path), &s_ih), ret_value); @@ -1511,7 +1511,7 @@ static void unmap_buffers(struct page *page, loff_t pos) if (page) { if (page_has_buffers(page)) { - tail_index = pos & (PAGE_CACHE_SIZE - 1); + tail_index = pos & (PAGE_SIZE - 1); cur_index = 0; head = page_buffers(page); bh = head; diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c index f41e19b4bb42..2d5489b0a269 100644 --- a/fs/reiserfs/tail_conversion.c +++ b/fs/reiserfs/tail_conversion.c @@ -151,7 +151,7 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode, */ if (up_to_date_bh) { unsigned pgoff = - (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); + (tail_offset + total_tail - 1) & (PAGE_SIZE - 1); char *kaddr = kmap_atomic(up_to_date_bh->b_page); memset(kaddr + pgoff, 0, blk_size - total_tail); kunmap_atomic(kaddr); @@ -271,7 +271,7 @@ int indirect2direct(struct reiserfs_transaction_handle *th, * the page was locked and this part of the page was up to date when * indirect2direct was called, so we know the bytes are still valid */ - tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); + tail = tail + (pos & (PAGE_SIZE - 1)); PATH_LAST_POSITION(path)++; diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 57e0b2310532..28f5f8b11370 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -415,7 +415,7 @@ out: static inline void reiserfs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } static struct page *reiserfs_get_page(struct inode *dir, size_t n) @@ -427,7 +427,7 @@ static struct page *reiserfs_get_page(struct inode *dir, size_t n) * and an unlink/rmdir has just occurred - GFP_NOFS avoids this */ mapping_set_gfp_mask(mapping, GFP_NOFS); - page = read_mapping_page(mapping, n >> PAGE_CACHE_SHIFT, NULL); + page = read_mapping_page(mapping, n >> PAGE_SHIFT, NULL); if (!IS_ERR(page)) { kmap(page); if (PageError(page)) @@ -526,10 +526,10 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, while (buffer_pos < buffer_size || buffer_pos == 0) { size_t chunk; size_t skip = 0; - size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1)); + size_t page_offset = (file_pos & (PAGE_SIZE - 1)); - if (buffer_size - buffer_pos > PAGE_CACHE_SIZE) - chunk = PAGE_CACHE_SIZE; + if (buffer_size - buffer_pos > PAGE_SIZE) + chunk = PAGE_SIZE; else chunk = buffer_size - buffer_pos; @@ -546,8 +546,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th, struct reiserfs_xattr_header *rxh; skip = file_pos = sizeof(struct reiserfs_xattr_header); - if (chunk + skip > PAGE_CACHE_SIZE) - chunk = PAGE_CACHE_SIZE - skip; + if (chunk + skip > PAGE_SIZE) + chunk = PAGE_SIZE - skip; rxh = (struct reiserfs_xattr_header *)data; rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC); rxh->h_hash = cpu_to_le32(xahash); @@ -675,8 +675,8 @@ reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer, char *data; size_t skip = 0; - if (isize - file_pos > PAGE_CACHE_SIZE) - chunk = PAGE_CACHE_SIZE; + if (isize - file_pos > PAGE_SIZE) + chunk = PAGE_SIZE; else chunk = isize - file_pos; diff --git a/fs/splice.c b/fs/splice.c index 9947b5c69664..b018eb485019 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -88,7 +88,7 @@ out_unlock: static void page_cache_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { - page_cache_release(buf->page); + put_page(buf->page); buf->flags &= ~PIPE_BUF_FLAG_LRU; } @@ -268,7 +268,7 @@ EXPORT_SYMBOL_GPL(splice_to_pipe); void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) { - page_cache_release(spd->pages[i]); + put_page(spd->pages[i]); } /* @@ -328,9 +328,9 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, if (splice_grow_spd(pipe, &spd)) return -ENOMEM; - index = *ppos >> PAGE_CACHE_SHIFT; - loff = *ppos & ~PAGE_CACHE_MASK; - req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + index = *ppos >> PAGE_SHIFT; + loff = *ppos & ~PAGE_MASK; + req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT; nr_pages = min(req_pages, spd.nr_pages_max); /* @@ -365,7 +365,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, error = add_to_page_cache_lru(page, mapping, index, mapping_gfp_constraint(mapping, GFP_KERNEL)); if (unlikely(error)) { - page_cache_release(page); + put_page(page); if (error == -EEXIST) continue; break; @@ -385,7 +385,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, * Now loop over the map and see if we need to start IO on any * pages, fill in the partial map, etc. */ - index = *ppos >> PAGE_CACHE_SHIFT; + index = *ppos >> PAGE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; for (page_nr = 0; page_nr < nr_pages; page_nr++) { @@ -397,7 +397,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, /* * this_len is the max we'll use from this page */ - this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); + this_len = min_t(unsigned long, len, PAGE_SIZE - loff); page = spd.pages[page_nr]; if (PageReadahead(page)) @@ -426,7 +426,7 @@ retry_lookup: error = -ENOMEM; break; } - page_cache_release(spd.pages[page_nr]); + put_page(spd.pages[page_nr]); spd.pages[page_nr] = page; } /* @@ -456,7 +456,7 @@ fill_it: * i_size must be checked after PageUptodate. */ isize = i_size_read(mapping->host); - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = (isize - 1) >> PAGE_SHIFT; if (unlikely(!isize || index > end_index)) break; @@ -470,7 +470,7 @@ fill_it: /* * max good bytes in this page */ - plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + plen = ((isize - 1) & ~PAGE_MASK) + 1; if (plen <= loff) break; @@ -494,8 +494,8 @@ fill_it: * we got, 'nr_pages' is how many pages are in the map. */ while (page_nr < nr_pages) - page_cache_release(spd.pages[page_nr++]); - in->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; + put_page(spd.pages[page_nr++]); + in->f_ra.prev_pos = (loff_t)index << PAGE_SHIFT; if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); @@ -636,8 +636,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, goto shrink_ret; } - offset = *ppos & ~PAGE_CACHE_MASK; - nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + offset = *ppos & ~PAGE_MASK; + nr_pages = (len + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { struct page *page; @@ -647,7 +647,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, if (!page) goto err; - this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); + this_len = min_t(size_t, len, PAGE_SIZE - offset); vec[i].iov_base = (void __user *) page_address(page); vec[i].iov_len = this_len; spd.pages[i] = page; diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 0cea9b9236d0..2c2618410d51 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -181,11 +181,11 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length, in = min(bytes, msblk->devblksize - offset); bytes -= in; while (in) { - if (pg_offset == PAGE_CACHE_SIZE) { + if (pg_offset == PAGE_SIZE) { data = squashfs_next_page(output); pg_offset = 0; } - avail = min_t(int, in, PAGE_CACHE_SIZE - + avail = min_t(int, in, PAGE_SIZE - pg_offset); memcpy(data + pg_offset, bh[k]->b_data + offset, avail); diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 1cb70a0b2168..23813c078cc9 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -30,7 +30,7 @@ * access the metadata and fragment caches. * * To avoid out of memory and fragmentation issues with vmalloc the cache - * uses sequences of kmalloced PAGE_CACHE_SIZE buffers. + * uses sequences of kmalloced PAGE_SIZE buffers. * * It should be noted that the cache is not used for file datablocks, these * are decompressed and cached in the page-cache in the normal way. The @@ -231,7 +231,7 @@ void squashfs_cache_delete(struct squashfs_cache *cache) /* * Initialise cache allocating the specified number of entries, each of * size block_size. To avoid vmalloc fragmentation issues each entry - * is allocated as a sequence of kmalloced PAGE_CACHE_SIZE buffers. + * is allocated as a sequence of kmalloced PAGE_SIZE buffers. */ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) @@ -255,7 +255,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cache->unused = entries; cache->entries = entries; cache->block_size = block_size; - cache->pages = block_size >> PAGE_CACHE_SHIFT; + cache->pages = block_size >> PAGE_SHIFT; cache->pages = cache->pages ? cache->pages : 1; cache->name = name; cache->num_waiters = 0; @@ -275,7 +275,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, } for (j = 0; j < cache->pages; j++) { - entry->data[j] = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + entry->data[j] = kmalloc(PAGE_SIZE, GFP_KERNEL); if (entry->data[j] == NULL) { ERROR("Failed to allocate %s buffer\n", name); goto cleanup; @@ -314,10 +314,10 @@ int squashfs_copy_data(void *buffer, struct squashfs_cache_entry *entry, return min(length, entry->length - offset); while (offset < entry->length) { - void *buff = entry->data[offset / PAGE_CACHE_SIZE] - + (offset % PAGE_CACHE_SIZE); + void *buff = entry->data[offset / PAGE_SIZE] + + (offset % PAGE_SIZE); int bytes = min_t(int, entry->length - offset, - PAGE_CACHE_SIZE - (offset % PAGE_CACHE_SIZE)); + PAGE_SIZE - (offset % PAGE_SIZE)); if (bytes >= remaining) { memcpy(buffer, buff, remaining); @@ -415,7 +415,7 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, */ void *squashfs_read_table(struct super_block *sb, u64 block, int length) { - int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + int pages = (length + PAGE_SIZE - 1) >> PAGE_SHIFT; int i, res; void *table, *buffer, **data; struct squashfs_page_actor *actor; @@ -436,7 +436,7 @@ void *squashfs_read_table(struct super_block *sb, u64 block, int length) goto failed2; } - for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) + for (i = 0; i < pages; i++, buffer += PAGE_SIZE) data[i] = buffer; res = squashfs_read_data(sb, block, length | diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index e9034bf6e5ae..d2bc13636f79 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c @@ -102,7 +102,7 @@ static void *get_comp_opts(struct super_block *sb, unsigned short flags) * Read decompressor specific options from file system if present */ if (SQUASHFS_COMP_OPTS(flags)) { - buffer = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); if (buffer == NULL) { comp_opts = ERR_PTR(-ENOMEM); goto out; diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index e5c9689062ba..13d80947bf9e 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -175,7 +175,7 @@ static long long read_indexes(struct super_block *sb, int n, { int err, i; long long block = 0; - __le32 *blist = kmalloc(PAGE_CACHE_SIZE, GFP_KERNEL); + __le32 *blist = kmalloc(PAGE_SIZE, GFP_KERNEL); if (blist == NULL) { ERROR("read_indexes: Failed to allocate block_list\n"); @@ -183,7 +183,7 @@ static long long read_indexes(struct super_block *sb, int n, } while (n) { - int blocks = min_t(int, n, PAGE_CACHE_SIZE >> 2); + int blocks = min_t(int, n, PAGE_SIZE >> 2); err = squashfs_read_metadata(sb, blist, start_block, offset, blocks << 2); @@ -377,19 +377,19 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; void *pageaddr; - int i, mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; + int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int start_index = page->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers - * many PAGE_CACHE_SIZE pages (default block size is 128 KiB) explicitly + * many PAGE_SIZE pages (default block size is 128 KiB) explicitly * grab the pages from the page cache, except for the page that we've * been called to fill. */ for (i = start_index; i <= end_index && bytes > 0; i++, - bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { + bytes -= PAGE_SIZE, offset += PAGE_SIZE) { struct page *push_page; - int avail = buffer ? min_t(int, bytes, PAGE_CACHE_SIZE) : 0; + int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); @@ -404,14 +404,14 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, pageaddr = kmap_atomic(push_page); squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); + memset(pageaddr + avail, 0, PAGE_SIZE - avail); kunmap_atomic(pageaddr); flush_dcache_page(push_page); SetPageUptodate(push_page); skip_page: unlock_page(push_page); if (i != page->index) - page_cache_release(push_page); + put_page(push_page); } } @@ -454,7 +454,7 @@ static int squashfs_readpage(struct file *file, struct page *page) { struct inode *inode = page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int index = page->index >> (msblk->block_log - PAGE_CACHE_SHIFT); + int index = page->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int res; void *pageaddr; @@ -462,8 +462,8 @@ static int squashfs_readpage(struct file *file, struct page *page) TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", page->index, squashfs_i(inode)->start); - if (page->index >= ((i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT)) + if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> + PAGE_SHIFT)) goto out; if (index < file_end || squashfs_i(inode)->fragment_block == @@ -487,7 +487,7 @@ error_out: SetPageError(page); out: pageaddr = kmap_atomic(page); - memset(pageaddr, 0, PAGE_CACHE_SIZE); + memset(pageaddr, 0, PAGE_SIZE); kunmap_atomic(pageaddr); flush_dcache_page(page); if (!PageError(page)) diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index 43e7a7eddac0..cb485d8e0e91 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -30,8 +30,8 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) struct inode *inode = target_page->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int file_end = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; - int mask = (1 << (msblk->block_log - PAGE_CACHE_SHIFT)) - 1; + int file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; + int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; int start_index = target_page->index & ~mask; int end_index = start_index | mask; int i, n, pages, missing_pages, bytes, res = -ENOMEM; @@ -68,7 +68,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) if (PageUptodate(page[i])) { unlock_page(page[i]); - page_cache_release(page[i]); + put_page(page[i]); page[i] = NULL; missing_pages++; } @@ -96,10 +96,10 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) goto mark_errored; /* Last page may have trailing bytes not filled */ - bytes = res % PAGE_CACHE_SIZE; + bytes = res % PAGE_SIZE; if (bytes) { pageaddr = kmap_atomic(page[pages - 1]); - memset(pageaddr + bytes, 0, PAGE_CACHE_SIZE - bytes); + memset(pageaddr + bytes, 0, PAGE_SIZE - bytes); kunmap_atomic(pageaddr); } @@ -109,7 +109,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize) SetPageUptodate(page[i]); unlock_page(page[i]); if (page[i] != target_page) - page_cache_release(page[i]); + put_page(page[i]); } kfree(actor); @@ -127,7 +127,7 @@ mark_errored: flush_dcache_page(page[i]); SetPageError(page[i]); unlock_page(page[i]); - page_cache_release(page[i]); + put_page(page[i]); } out: @@ -153,21 +153,21 @@ static int squashfs_read_cache(struct page *target_page, u64 block, int bsize, } for (n = 0; n < pages && bytes > 0; n++, - bytes -= PAGE_CACHE_SIZE, offset += PAGE_CACHE_SIZE) { - int avail = min_t(int, bytes, PAGE_CACHE_SIZE); + bytes -= PAGE_SIZE, offset += PAGE_SIZE) { + int avail = min_t(int, bytes, PAGE_SIZE); if (page[n] == NULL) continue; pageaddr = kmap_atomic(page[n]); squashfs_copy_data(pageaddr, buffer, offset, avail); - memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); + memset(pageaddr + avail, 0, PAGE_SIZE - avail); kunmap_atomic(pageaddr); flush_dcache_page(page[n]); SetPageUptodate(page[n]); unlock_page(page[n]); if (page[n] != target_page) - page_cache_release(page[n]); + put_page(page[n]); } out: diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c index c31e2bc9c081..ff4468bd18b0 100644 --- a/fs/squashfs/lz4_wrapper.c +++ b/fs/squashfs/lz4_wrapper.c @@ -117,13 +117,13 @@ static int lz4_uncompress(struct squashfs_sb_info *msblk, void *strm, data = squashfs_first_page(output); buff = stream->output; while (data) { - if (bytes <= PAGE_CACHE_SIZE) { + if (bytes <= PAGE_SIZE) { memcpy(data, buff, bytes); break; } - memcpy(data, buff, PAGE_CACHE_SIZE); - buff += PAGE_CACHE_SIZE; - bytes -= PAGE_CACHE_SIZE; + memcpy(data, buff, PAGE_SIZE); + buff += PAGE_SIZE; + bytes -= PAGE_SIZE; data = squashfs_next_page(output); } squashfs_finish_page(output); diff --git a/fs/squashfs/lzo_wrapper.c b/fs/squashfs/lzo_wrapper.c index 244b9fbfff7b..934c17e96590 100644 --- a/fs/squashfs/lzo_wrapper.c +++ b/fs/squashfs/lzo_wrapper.c @@ -102,13 +102,13 @@ static int lzo_uncompress(struct squashfs_sb_info *msblk, void *strm, data = squashfs_first_page(output); buff = stream->output; while (data) { - if (bytes <= PAGE_CACHE_SIZE) { + if (bytes <= PAGE_SIZE) { memcpy(data, buff, bytes); break; } else { - memcpy(data, buff, PAGE_CACHE_SIZE); - buff += PAGE_CACHE_SIZE; - bytes -= PAGE_CACHE_SIZE; + memcpy(data, buff, PAGE_SIZE); + buff += PAGE_SIZE; + bytes -= PAGE_SIZE; data = squashfs_next_page(output); } } diff --git a/fs/squashfs/page_actor.c b/fs/squashfs/page_actor.c index 5a1c11f56441..9b7b1b6a7892 100644 --- a/fs/squashfs/page_actor.c +++ b/fs/squashfs/page_actor.c @@ -48,7 +48,7 @@ struct squashfs_page_actor *squashfs_page_actor_init(void **buffer, if (actor == NULL) return NULL; - actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->length = length ? : pages * PAGE_SIZE; actor->buffer = buffer; actor->pages = pages; actor->next_page = 0; @@ -88,7 +88,7 @@ struct squashfs_page_actor *squashfs_page_actor_init_special(struct page **page, if (actor == NULL) return NULL; - actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->length = length ? : pages * PAGE_SIZE; actor->page = page; actor->pages = pages; actor->next_page = 0; diff --git a/fs/squashfs/page_actor.h b/fs/squashfs/page_actor.h index 26dd82008b82..98537eab27e2 100644 --- a/fs/squashfs/page_actor.h +++ b/fs/squashfs/page_actor.h @@ -24,7 +24,7 @@ static inline struct squashfs_page_actor *squashfs_page_actor_init(void **page, if (actor == NULL) return NULL; - actor->length = length ? : pages * PAGE_CACHE_SIZE; + actor->length = length ? : pages * PAGE_SIZE; actor->page = page; actor->pages = pages; actor->next_page = 0; diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 5e79bfa4f260..cf01e15a7b16 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -152,7 +152,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) * Check the system page size is not larger than the filesystem * block size (by default 128K). This is currently not supported. */ - if (PAGE_CACHE_SIZE > msblk->block_size) { + if (PAGE_SIZE > msblk->block_size) { ERROR("Page size > filesystem block size (%d). This is " "currently not supported!\n", msblk->block_size); goto failed_mount; diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c index dbcc2f54bad4..d688ef42a6a1 100644 --- a/fs/squashfs/symlink.c +++ b/fs/squashfs/symlink.c @@ -48,10 +48,10 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page) struct inode *inode = page->mapping->host; struct super_block *sb = inode->i_sb; struct squashfs_sb_info *msblk = sb->s_fs_info; - int index = page->index << PAGE_CACHE_SHIFT; + int index = page->index << PAGE_SHIFT; u64 block = squashfs_i(inode)->start; int offset = squashfs_i(inode)->offset; - int length = min_t(int, i_size_read(inode) - index, PAGE_CACHE_SIZE); + int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE); int bytes, copied; void *pageaddr; struct squashfs_cache_entry *entry; @@ -94,7 +94,7 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page) copied = squashfs_copy_data(pageaddr + bytes, entry, offset, length - bytes); if (copied == length - bytes) - memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); + memset(pageaddr + length, 0, PAGE_SIZE - length); else block = entry->next_index; kunmap_atomic(pageaddr); diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c index c609624e4b8a..6bfaef73d065 100644 --- a/fs/squashfs/xz_wrapper.c +++ b/fs/squashfs/xz_wrapper.c @@ -141,7 +141,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, stream->buf.in_pos = 0; stream->buf.in_size = 0; stream->buf.out_pos = 0; - stream->buf.out_size = PAGE_CACHE_SIZE; + stream->buf.out_size = PAGE_SIZE; stream->buf.out = squashfs_first_page(output); do { @@ -158,7 +158,7 @@ static int squashfs_xz_uncompress(struct squashfs_sb_info *msblk, void *strm, stream->buf.out = squashfs_next_page(output); if (stream->buf.out != NULL) { stream->buf.out_pos = 0; - total += PAGE_CACHE_SIZE; + total += PAGE_SIZE; } } diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 8727caba6882..2ec24d128bce 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c @@ -69,7 +69,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, int zlib_err, zlib_init = 0, k = 0; z_stream *stream = strm; - stream->avail_out = PAGE_CACHE_SIZE; + stream->avail_out = PAGE_SIZE; stream->next_out = squashfs_first_page(output); stream->avail_in = 0; @@ -85,7 +85,7 @@ static int zlib_uncompress(struct squashfs_sb_info *msblk, void *strm, if (stream->avail_out == 0) { stream->next_out = squashfs_next_page(output); if (stream->next_out != NULL) - stream->avail_out = PAGE_CACHE_SIZE; + stream->avail_out = PAGE_SIZE; } if (!zlib_init) { diff --git a/fs/sync.c b/fs/sync.c index dd5d1711c7ac..2a54c1f22035 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -302,7 +302,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, goto out; if (sizeof(pgoff_t) == 4) { - if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { + if (offset >= (0x100000000ULL << PAGE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" @@ -310,7 +310,7 @@ SYSCALL_DEFINE4(sync_file_range, int, fd, loff_t, offset, loff_t, nbytes, ret = 0; goto out; } - if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { + if (endbyte >= (0x100000000ULL << PAGE_SHIFT)) { /* * Out to EOF */ diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index 63c1bcb224ee..c0f0a3e643eb 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -30,7 +30,7 @@ const struct file_operations sysv_dir_operations = { static inline void dir_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) @@ -73,8 +73,8 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx) if (pos >= inode->i_size) return 0; - offset = pos & ~PAGE_CACHE_MASK; - n = pos >> PAGE_CACHE_SHIFT; + offset = pos & ~PAGE_MASK; + n = pos >> PAGE_SHIFT; for ( ; n < npages; n++, offset = 0) { char *kaddr, *limit; @@ -85,7 +85,7 @@ static int sysv_readdir(struct file *file, struct dir_context *ctx) continue; kaddr = (char *)page_address(page); de = (struct sysv_dir_entry *)(kaddr+offset); - limit = kaddr + PAGE_CACHE_SIZE - SYSV_DIRSIZE; + limit = kaddr + PAGE_SIZE - SYSV_DIRSIZE; for ( ;(char*)de <= limit; de++, ctx->pos += sizeof(*de)) { char *name = de->name; @@ -146,7 +146,7 @@ struct sysv_dir_entry *sysv_find_entry(struct dentry *dentry, struct page **res_ if (!IS_ERR(page)) { kaddr = (char*)page_address(page); de = (struct sysv_dir_entry *) kaddr; - kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; + kaddr += PAGE_SIZE - SYSV_DIRSIZE; for ( ; (char *) de <= kaddr ; de++) { if (!de->inode) continue; @@ -190,7 +190,7 @@ int sysv_add_link(struct dentry *dentry, struct inode *inode) goto out; kaddr = (char*)page_address(page); de = (struct sysv_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - SYSV_DIRSIZE; + kaddr += PAGE_SIZE - SYSV_DIRSIZE; while ((char *)de <= kaddr) { if (!de->inode) goto got_it; @@ -261,7 +261,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) kmap(page); base = (char*)page_address(page); - memset(base, 0, PAGE_CACHE_SIZE); + memset(base, 0, PAGE_SIZE); de = (struct sysv_dir_entry *) base; de->inode = cpu_to_fs16(SYSV_SB(inode->i_sb), inode->i_ino); @@ -273,7 +273,7 @@ int sysv_make_empty(struct inode *inode, struct inode *dir) kunmap(page); err = dir_commit_chunk(page, 0, 2 * SYSV_DIRSIZE); fail: - page_cache_release(page); + put_page(page); return err; } @@ -296,7 +296,7 @@ int sysv_empty_dir(struct inode * inode) kaddr = (char *)page_address(page); de = (struct sysv_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE-SYSV_DIRSIZE; + kaddr += PAGE_SIZE-SYSV_DIRSIZE; for ( ;(char *)de <= kaddr; de++) { if (!de->inode) diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index 11e83ed0b4bf..90b60c03b588 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c @@ -264,11 +264,11 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 065c88f8e4b8..446753d8ac34 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -121,7 +121,7 @@ static int do_readpage(struct page *page) if (block >= beyond) { /* Reading beyond inode */ SetPageChecked(page); - memset(addr, 0, PAGE_CACHE_SIZE); + memset(addr, 0, PAGE_SIZE); goto out; } @@ -223,7 +223,7 @@ static int write_begin_slow(struct address_space *mapping, { struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct ubifs_budget_req req = { .new_page = 1 }; int uninitialized_var(err), appending = !!(pos + len > inode->i_size); struct page *page; @@ -254,13 +254,13 @@ static int write_begin_slow(struct address_space *mapping, } if (!PageUptodate(page)) { - if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) + if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) SetPageChecked(page); else { err = do_readpage(page); if (err) { unlock_page(page); - page_cache_release(page); + put_page(page); ubifs_release_budget(c, &req); return err; } @@ -428,7 +428,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, struct inode *inode = mapping->host; struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; int uninitialized_var(err), appending = !!(pos + len > inode->i_size); int skipped_read = 0; struct page *page; @@ -446,7 +446,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, if (!PageUptodate(page)) { /* The page is not loaded from the flash */ - if (!(pos & ~PAGE_CACHE_MASK) && len == PAGE_CACHE_SIZE) { + if (!(pos & ~PAGE_MASK) && len == PAGE_SIZE) { /* * We change whole page so no need to load it. But we * do not know whether this page exists on the media or @@ -462,7 +462,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, err = do_readpage(page); if (err) { unlock_page(page); - page_cache_release(page); + put_page(page); return err; } } @@ -494,7 +494,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, mutex_unlock(&ui->ui_mutex); } unlock_page(page); - page_cache_release(page); + put_page(page); return write_begin_slow(mapping, pos, len, pagep, flags); } @@ -549,12 +549,12 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, dbg_gen("ino %lu, pos %llu, pg %lu, len %u, copied %d, i_size %lld", inode->i_ino, pos, page->index, len, copied, inode->i_size); - if (unlikely(copied < len && len == PAGE_CACHE_SIZE)) { + if (unlikely(copied < len && len == PAGE_SIZE)) { /* * VFS copied less data to the page that it intended and * declared in its '->write_begin()' call via the @len * argument. If the page was not up-to-date, and @len was - * @PAGE_CACHE_SIZE, the 'ubifs_write_begin()' function did + * @PAGE_SIZE, the 'ubifs_write_begin()' function did * not load it from the media (for optimization reasons). This * means that part of the page contains garbage. So read the * page now. @@ -593,7 +593,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, out: unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -621,10 +621,10 @@ static int populate_page(struct ubifs_info *c, struct page *page, addr = zaddr = kmap(page); - end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; + end_index = (i_size - 1) >> PAGE_SHIFT; if (!i_size || page->index > end_index) { hole = 1; - memset(addr, 0, PAGE_CACHE_SIZE); + memset(addr, 0, PAGE_SIZE); goto out_hole; } @@ -673,7 +673,7 @@ static int populate_page(struct ubifs_info *c, struct page *page, } if (end_index == page->index) { - int len = i_size & (PAGE_CACHE_SIZE - 1); + int len = i_size & (PAGE_SIZE - 1); if (len && len < read) memset(zaddr + len, 0, read - len); @@ -773,7 +773,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, isize = i_size_read(inode); if (isize == 0) goto out_free; - end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); + end_index = ((isize - 1) >> PAGE_SHIFT); for (page_idx = 1; page_idx < page_cnt; page_idx++) { pgoff_t page_offset = offset + page_idx; @@ -788,7 +788,7 @@ static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, if (!PageUptodate(page)) err = populate_page(c, page, bu, &n); unlock_page(page); - page_cache_release(page); + put_page(page); if (err) break; } @@ -905,7 +905,7 @@ static int do_writepage(struct page *page, int len) #ifdef UBIFS_DEBUG struct ubifs_inode *ui = ubifs_inode(inode); spin_lock(&ui->ui_lock); - ubifs_assert(page->index <= ui->synced_i_size >> PAGE_CACHE_SHIFT); + ubifs_assert(page->index <= ui->synced_i_size >> PAGE_SHIFT); spin_unlock(&ui->ui_lock); #endif @@ -1001,8 +1001,8 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) struct inode *inode = page->mapping->host; struct ubifs_inode *ui = ubifs_inode(inode); loff_t i_size = i_size_read(inode), synced_i_size; - pgoff_t end_index = i_size >> PAGE_CACHE_SHIFT; - int err, len = i_size & (PAGE_CACHE_SIZE - 1); + pgoff_t end_index = i_size >> PAGE_SHIFT; + int err, len = i_size & (PAGE_SIZE - 1); void *kaddr; dbg_gen("ino %lu, pg %lu, pg flags %#lx", @@ -1021,7 +1021,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) /* Is the page fully inside @i_size? */ if (page->index < end_index) { - if (page->index >= synced_i_size >> PAGE_CACHE_SHIFT) { + if (page->index >= synced_i_size >> PAGE_SHIFT) { err = inode->i_sb->s_op->write_inode(inode, NULL); if (err) goto out_unlock; @@ -1034,7 +1034,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) * with this. */ } - return do_writepage(page, PAGE_CACHE_SIZE); + return do_writepage(page, PAGE_SIZE); } /* @@ -1045,7 +1045,7 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc) * writes to that region are not written out to the file." */ kaddr = kmap_atomic(page); - memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); + memset(kaddr + len, 0, PAGE_SIZE - len); flush_dcache_page(page); kunmap_atomic(kaddr); @@ -1138,7 +1138,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, truncate_setsize(inode, new_size); if (offset) { - pgoff_t index = new_size >> PAGE_CACHE_SHIFT; + pgoff_t index = new_size >> PAGE_SHIFT; struct page *page; page = find_lock_page(inode->i_mapping, index); @@ -1157,9 +1157,9 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, clear_page_dirty_for_io(page); if (UBIFS_BLOCKS_PER_PAGE_SHIFT) offset = new_size & - (PAGE_CACHE_SIZE - 1); + (PAGE_SIZE - 1); err = do_writepage(page, offset); - page_cache_release(page); + put_page(page); if (err) goto out_budg; /* @@ -1173,7 +1173,7 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, * having to read it. */ unlock_page(page); - page_cache_release(page); + put_page(page); } } } @@ -1285,7 +1285,7 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset, struct ubifs_info *c = inode->i_sb->s_fs_info; ubifs_assert(PagePrivate(page)); - if (offset || length < PAGE_CACHE_SIZE) + if (offset || length < PAGE_SIZE) /* Partial page remains dirty */ return; diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index a233ba913be4..e98c24ee25a1 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2237,12 +2237,12 @@ static int __init ubifs_init(void) BUILD_BUG_ON(UBIFS_COMPR_TYPES_CNT > 4); /* - * We require that PAGE_CACHE_SIZE is greater-than-or-equal-to + * We require that PAGE_SIZE is greater-than-or-equal-to * UBIFS_BLOCK_SIZE. It is assumed that both are powers of 2. */ - if (PAGE_CACHE_SIZE < UBIFS_BLOCK_SIZE) { + if (PAGE_SIZE < UBIFS_BLOCK_SIZE) { pr_err("UBIFS error (pid %d): VFS page cache size is %u bytes, but UBIFS requires at least 4096 bytes", - current->pid, (unsigned int)PAGE_CACHE_SIZE); + current->pid, (unsigned int)PAGE_SIZE); return -EINVAL; } diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index c2a57e193a81..4cd7e569cd00 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -46,8 +46,8 @@ #define UBIFS_SUPER_MAGIC 0x24051905 /* Number of UBIFS blocks per VFS page */ -#define UBIFS_BLOCKS_PER_PAGE (PAGE_CACHE_SIZE / UBIFS_BLOCK_SIZE) -#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_CACHE_SHIFT - UBIFS_BLOCK_SHIFT) +#define UBIFS_BLOCKS_PER_PAGE (PAGE_SIZE / UBIFS_BLOCK_SIZE) +#define UBIFS_BLOCKS_PER_PAGE_SHIFT (PAGE_SHIFT - UBIFS_BLOCK_SHIFT) /* "File system end of life" sequence number watermark */ #define SQNUM_WARN_WATERMARK 0xFFFFFFFF00000000ULL diff --git a/fs/udf/file.c b/fs/udf/file.c index 1af98963d860..877ba1c9b461 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -46,7 +46,7 @@ static void __udf_adinicb_readpage(struct page *page) kaddr = kmap(page); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); - memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); + memset(kaddr + inode->i_size, 0, PAGE_SIZE - inode->i_size); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); @@ -87,14 +87,14 @@ static int udf_adinicb_write_begin(struct file *file, { struct page *page; - if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) + if (WARN_ON_ONCE(pos >= PAGE_SIZE)) return -EIO; page = grab_cache_page_write_begin(mapping, 0, flags); if (!page) return -ENOMEM; *pagep = page; - if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) + if (!PageUptodate(page) && len != PAGE_SIZE) __udf_adinicb_readpage(page); return 0; } diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 166d3ed32c39..2dc461eeb415 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -287,7 +287,7 @@ int udf_expand_file_adinicb(struct inode *inode) if (!PageUptodate(page)) { kaddr = kmap(page); memset(kaddr + iinfo->i_lenAlloc, 0x00, - PAGE_CACHE_SIZE - iinfo->i_lenAlloc); + PAGE_SIZE - iinfo->i_lenAlloc); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, iinfo->i_lenAlloc); flush_dcache_page(page); @@ -319,7 +319,7 @@ int udf_expand_file_adinicb(struct inode *inode) inode->i_data.a_ops = &udf_adinicb_aops; up_write(&iinfo->i_data_sem); } - page_cache_release(page); + put_page(page); mark_inode_dirty(inode); return err; diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index dc5fae601c24..0447b949c7f5 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c @@ -237,7 +237,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, sector_t newb, struct page *locked_page) { const unsigned blks_per_page = - 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits); + 1 << (PAGE_SHIFT - inode->i_blkbits); const unsigned mask = blks_per_page - 1; struct address_space * const mapping = inode->i_mapping; pgoff_t index, cur_index, last_index; @@ -255,9 +255,9 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg, cur_index = locked_page->index; end = count + beg; - last_index = end >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + last_index = end >> (PAGE_SHIFT - inode->i_blkbits); for (i = beg; i < end; i = (i | mask) + 1) { - index = i >> (PAGE_CACHE_SHIFT - inode->i_blkbits); + index = i >> (PAGE_SHIFT - inode->i_blkbits); if (likely(cur_index != index)) { page = ufs_get_locked_page(mapping, index); diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 74f2e80288bf..0b1457292734 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -62,7 +62,7 @@ static int ufs_commit_chunk(struct page *page, loff_t pos, unsigned len) static inline void ufs_put_page(struct page *page) { kunmap(page); - page_cache_release(page); + put_page(page); } ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) @@ -111,13 +111,13 @@ static void ufs_check_page(struct page *page) struct super_block *sb = dir->i_sb; char *kaddr = page_address(page); unsigned offs, rec_len; - unsigned limit = PAGE_CACHE_SIZE; + unsigned limit = PAGE_SIZE; const unsigned chunk_mask = UFS_SB(sb)->s_uspi->s_dirblksize - 1; struct ufs_dir_entry *p; char *error; - if ((dir->i_size >> PAGE_CACHE_SHIFT) == page->index) { - limit = dir->i_size & ~PAGE_CACHE_MASK; + if ((dir->i_size >> PAGE_SHIFT) == page->index) { + limit = dir->i_size & ~PAGE_MASK; if (limit & chunk_mask) goto Ebadsize; if (!limit) @@ -170,7 +170,7 @@ Einumber: bad_entry: ufs_error (sb, "ufs_check_page", "bad entry in directory #%lu: %s - " "offset=%lu, rec_len=%d, name_len=%d", - dir->i_ino, error, (page->index<<PAGE_CACHE_SHIFT)+offs, + dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs, rec_len, ufs_get_de_namlen(sb, p)); goto fail; Eend: @@ -178,7 +178,7 @@ Eend: ufs_error(sb, __func__, "entry in directory #%lu spans the page boundary" "offset=%lu", - dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs); + dir->i_ino, (page->index<<PAGE_SHIFT)+offs); fail: SetPageChecked(page); SetPageError(page); @@ -211,9 +211,9 @@ ufs_last_byte(struct inode *inode, unsigned long page_nr) { unsigned last_byte = inode->i_size; - last_byte -= page_nr << PAGE_CACHE_SHIFT; - if (last_byte > PAGE_CACHE_SIZE) - last_byte = PAGE_CACHE_SIZE; + last_byte -= page_nr << PAGE_SHIFT; + if (last_byte > PAGE_SIZE) + last_byte = PAGE_SIZE; return last_byte; } @@ -341,7 +341,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode) kaddr = page_address(page); dir_end = kaddr + ufs_last_byte(dir, n); de = (struct ufs_dir_entry *)kaddr; - kaddr += PAGE_CACHE_SIZE - reclen; + kaddr += PAGE_SIZE - reclen; while ((char *)de <= kaddr) { if ((char *)de == dir_end) { /* We hit i_size */ @@ -432,8 +432,8 @@ ufs_readdir(struct file *file, struct dir_context *ctx) loff_t pos = ctx->pos; struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - unsigned int offset = pos & ~PAGE_CACHE_MASK; - unsigned long n = pos >> PAGE_CACHE_SHIFT; + unsigned int offset = pos & ~PAGE_MASK; + unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); int need_revalidate = file->f_version != inode->i_version; @@ -454,14 +454,14 @@ ufs_readdir(struct file *file, struct dir_context *ctx) ufs_error(sb, __func__, "bad page in #%lu", inode->i_ino); - ctx->pos += PAGE_CACHE_SIZE - offset; + ctx->pos += PAGE_SIZE - offset; return -EIO; } kaddr = page_address(page); if (unlikely(need_revalidate)) { if (offset) { offset = ufs_validate_entry(sb, kaddr, offset, chunk_mask); - ctx->pos = (n<<PAGE_CACHE_SHIFT) + offset; + ctx->pos = (n<<PAGE_SHIFT) + offset; } file->f_version = inode->i_version; need_revalidate = 0; @@ -574,7 +574,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir) kmap(page); base = (char*)page_address(page); - memset(base, 0, PAGE_CACHE_SIZE); + memset(base, 0, PAGE_SIZE); de = (struct ufs_dir_entry *) base; @@ -594,7 +594,7 @@ int ufs_make_empty(struct inode * inode, struct inode *dir) err = ufs_commit_chunk(page, 0, chunk_size); fail: - page_cache_release(page); + put_page(page); return err; } diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index d897e169ab9c..9f49431e798d 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c @@ -1051,13 +1051,13 @@ static int ufs_alloc_lastblock(struct inode *inode, loff_t size) lastfrag--; lastpage = ufs_get_locked_page(mapping, lastfrag >> - (PAGE_CACHE_SHIFT - inode->i_blkbits)); + (PAGE_SHIFT - inode->i_blkbits)); if (IS_ERR(lastpage)) { err = -EIO; goto out; } - end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); + end = lastfrag & ((1 << (PAGE_SHIFT - inode->i_blkbits)) - 1); bh = page_buffers(lastpage); for (i = 0; i < end; ++i) bh = bh->b_this_page; diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index acf4a3b61b81..a1559f762805 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c @@ -305,7 +305,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); else { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } inode_dec_link_count(old_dir); } @@ -315,11 +315,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, out_dir: if (dir_de) { kunmap(dir_page); - page_cache_release(dir_page); + put_page(dir_page); } out_old: kunmap(old_page); - page_cache_release(old_page); + put_page(old_page); out: return err; } diff --git a/fs/ufs/util.c b/fs/ufs/util.c index b6c2f94e041e..a409e3e7827a 100644 --- a/fs/ufs/util.c +++ b/fs/ufs/util.c @@ -261,14 +261,14 @@ struct page *ufs_get_locked_page(struct address_space *mapping, if (unlikely(page->mapping == NULL)) { /* Truncate got there first */ unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; goto out; } if (!PageUptodate(page) || PageError(page)) { unlock_page(page); - page_cache_release(page); + put_page(page); printk(KERN_ERR "ufs_change_blocknr: " "can not read page: ino %lu, index: %lu\n", diff --git a/fs/ufs/util.h b/fs/ufs/util.h index 954175928240..b7fbf53dbc81 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -283,7 +283,7 @@ extern struct page *ufs_get_locked_page(struct address_space *mapping, static inline void ufs_put_locked_page(struct page *page) { unlock_page(page); - page_cache_release(page); + put_page(page); } diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 041b6948aecc..ce41d7fe753c 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3742,11 +3742,11 @@ xfs_bmap_btalloc( args.prod = align; if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod))) args.mod = (xfs_extlen_t)(args.prod - args.mod); - } else if (mp->m_sb.sb_blocksize >= PAGE_CACHE_SIZE) { + } else if (mp->m_sb.sb_blocksize >= PAGE_SIZE) { args.prod = 1; args.mod = 0; } else { - args.prod = PAGE_CACHE_SIZE >> mp->m_sb.sb_blocklog; + args.prod = PAGE_SIZE >> mp->m_sb.sb_blocklog; if ((args.mod = (xfs_extlen_t)(do_mod(ap->offset, args.prod)))) args.mod = (xfs_extlen_t)(args.prod - args.mod); } diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index d445a64b979e..e49b2406d15d 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -704,7 +704,7 @@ next_buffer: xfs_iunlock(ip, XFS_ILOCK_EXCL); out_invalidate: - xfs_vm_invalidatepage(page, 0, PAGE_CACHE_SIZE); + xfs_vm_invalidatepage(page, 0, PAGE_SIZE); return; } @@ -925,9 +925,9 @@ xfs_do_writepage( * ---------------------------------^------------------| */ offset = i_size_read(inode); - end_index = offset >> PAGE_CACHE_SHIFT; + end_index = offset >> PAGE_SHIFT; if (page->index < end_index) - end_offset = (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT; + end_offset = (xfs_off_t)(page->index + 1) << PAGE_SHIFT; else { /* * Check whether the page to write out is beyond or straddles @@ -940,7 +940,7 @@ xfs_do_writepage( * | | Straddles | * ---------------------------------^-----------|--------| */ - unsigned offset_into_page = offset & (PAGE_CACHE_SIZE - 1); + unsigned offset_into_page = offset & (PAGE_SIZE - 1); /* * Skip the page if it is fully outside i_size, e.g. due to a @@ -971,7 +971,7 @@ xfs_do_writepage( * memory is zeroed when mapped, and writes to that region are * not written out to the file." */ - zero_user_segment(page, offset_into_page, PAGE_CACHE_SIZE); + zero_user_segment(page, offset_into_page, PAGE_SIZE); /* Adjust the end_offset to the end of file */ end_offset = offset; @@ -1475,7 +1475,7 @@ xfs_vm_write_failed( loff_t block_offset; loff_t block_start; loff_t block_end; - loff_t from = pos & (PAGE_CACHE_SIZE - 1); + loff_t from = pos & (PAGE_SIZE - 1); loff_t to = from + len; struct buffer_head *bh, *head; struct xfs_mount *mp = XFS_I(inode)->i_mount; @@ -1491,7 +1491,7 @@ xfs_vm_write_failed( * start of the page by using shifts rather than masks the mismatch * problem. */ - block_offset = (pos >> PAGE_CACHE_SHIFT) << PAGE_CACHE_SHIFT; + block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT; ASSERT(block_offset + from == pos); @@ -1558,12 +1558,12 @@ xfs_vm_write_begin( struct page **pagep, void **fsdata) { - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; struct page *page; int status; struct xfs_mount *mp = XFS_I(mapping->host)->i_mount; - ASSERT(len <= PAGE_CACHE_SIZE); + ASSERT(len <= PAGE_SIZE); page = grab_cache_page_write_begin(mapping, index, flags); if (!page) @@ -1592,7 +1592,7 @@ xfs_vm_write_begin( truncate_pagecache_range(inode, start, pos + len); } - page_cache_release(page); + put_page(page); page = NULL; } @@ -1620,7 +1620,7 @@ xfs_vm_write_end( { int ret; - ASSERT(len <= PAGE_CACHE_SIZE); + ASSERT(len <= PAGE_SIZE); ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); if (unlikely(ret < len)) { diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index a32c1dcae2ff..3b6309865c65 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -1237,7 +1237,7 @@ xfs_free_file_space( /* wait for the completion of any pending DIOs */ inode_dio_wait(VFS_I(ip)); - rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); + rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE); ioffset = round_down(offset, rounding); iendoffset = round_up(offset + len, rounding) - 1; error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset, @@ -1466,7 +1466,7 @@ xfs_shift_file_space( if (error) return error; error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, - offset >> PAGE_CACHE_SHIFT, -1); + offset >> PAGE_SHIFT, -1); if (error) return error; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ac0fd32de31e..569938a4a357 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -106,8 +106,8 @@ xfs_iozero( unsigned offset, bytes; void *fsdata; - offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ - bytes = PAGE_CACHE_SIZE - offset; + offset = (pos & (PAGE_SIZE -1)); /* Within page */ + bytes = PAGE_SIZE - offset; if (bytes > count) bytes = count; @@ -799,8 +799,8 @@ xfs_file_dio_aio_write( /* see generic_file_direct_write() for why this is necessary */ if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, - end >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + end >> PAGE_SHIFT); } if (ret > 0) { @@ -1207,9 +1207,9 @@ xfs_find_get_desired_pgoff( pagevec_init(&pvec, 0); - index = startoff >> PAGE_CACHE_SHIFT; + index = startoff >> PAGE_SHIFT; endoff = XFS_FSB_TO_B(mp, map->br_startoff + map->br_blockcount); - end = endoff >> PAGE_CACHE_SHIFT; + end = endoff >> PAGE_SHIFT; do { int want; unsigned nr_pages; diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index ec0e239a0fa9..a8192dc797dc 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -135,7 +135,7 @@ typedef __u32 xfs_nlink_t; * Size of block device i/o is parameterized here. * Currently the system supports page-sized i/o. */ -#define BLKDEV_IOSHIFT PAGE_CACHE_SHIFT +#define BLKDEV_IOSHIFT PAGE_SHIFT #define BLKDEV_IOSIZE (1<<BLKDEV_IOSHIFT) /* number of BB's per block device block */ #define BLKDEV_BB BTOBB(BLKDEV_IOSIZE) diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 536a0ee9cd5a..cfd4210dd015 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -171,7 +171,7 @@ xfs_sb_validate_fsb_count( ASSERT(sbp->sb_blocklog >= BBSHIFT); /* Limited by ULONG_MAX of page cache index */ - if (nblocks >> (PAGE_CACHE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) + if (nblocks >> (PAGE_SHIFT - sbp->sb_blocklog) > ULONG_MAX) return -EFBIG; return 0; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index bac6b3435591..eafe257b357a 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -231,12 +231,12 @@ static inline unsigned long xfs_preferred_iosize(xfs_mount_t *mp) { if (mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE) - return PAGE_CACHE_SIZE; + return PAGE_SIZE; return (mp->m_swidth ? (mp->m_swidth << mp->m_sb.sb_blocklog) : ((mp->m_flags & XFS_MOUNT_DFLT_IOSIZE) ? (1 << (int)MAX(mp->m_readio_log, mp->m_writeio_log)) : - PAGE_CACHE_SIZE)); + PAGE_SIZE)); } #define XFS_LAST_UNMOUNT_WAS_CLEAN(mp) \ diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index ade236e90bb3..51ddaf2c2b8c 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -293,8 +293,8 @@ xfs_fs_commit_blocks( * Make sure reads through the pagecache see the new data. */ error = invalidate_inode_pages2_range(inode->i_mapping, - start >> PAGE_CACHE_SHIFT, - (end - 1) >> PAGE_CACHE_SHIFT); + start >> PAGE_SHIFT, + (end - 1) >> PAGE_SHIFT); WARN_ON_ONCE(error); error = xfs_iomap_write_unwritten(ip, start, length); diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index d760934109b5..187e14b696c2 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -556,10 +556,10 @@ xfs_max_file_offset( /* Figure out maximum filesize, on Linux this can depend on * the filesystem blocksize (on 32 bit platforms). * __block_write_begin does this in an [unsigned] long... - * page->index << (PAGE_CACHE_SHIFT - bbits) + * page->index << (PAGE_SHIFT - bbits) * So, for page sized blocks (4K on 32 bit platforms), * this wraps at around 8Tb (hence MAX_LFS_FILESIZE which is - * (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) + * (((u64)PAGE_SIZE << (BITS_PER_LONG-1))-1) * but for smaller blocksizes it is less (bbits = log2 bsize). * Note1: get_block_t takes a long (implicit cast from above) * Note2: The Large Block Device (LBD and HAVE_SECTOR_T) patch @@ -570,10 +570,10 @@ xfs_max_file_offset( #if BITS_PER_LONG == 32 # if defined(CONFIG_LBDAF) ASSERT(sizeof(sector_t) == 8); - pagefactor = PAGE_CACHE_SIZE; + pagefactor = PAGE_SIZE; bitshift = BITS_PER_LONG; # else - pagefactor = PAGE_CACHE_SIZE >> (PAGE_CACHE_SHIFT - blockshift); + pagefactor = PAGE_SIZE >> (PAGE_SHIFT - blockshift); # endif #endif diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h new file mode 100644 index 000000000000..25afb31f0389 --- /dev/null +++ b/include/drm/bridge/analogix_dp.h @@ -0,0 +1,41 @@ +/* + * Analogix DP (Display Port) Core interface driver. + * + * Copyright (C) 2015 Rockchip Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#ifndef _ANALOGIX_DP_H_ +#define _ANALOGIX_DP_H_ + +#include <drm/drm_crtc.h> + +enum analogix_dp_devtype { + EXYNOS_DP, + RK3288_DP, +}; + +struct analogix_dp_plat_data { + enum analogix_dp_devtype dev_type; + struct drm_panel *panel; + struct drm_encoder *encoder; + struct drm_connector *connector; + + int (*power_on)(struct analogix_dp_plat_data *); + int (*power_off)(struct analogix_dp_plat_data *); + int (*attach)(struct analogix_dp_plat_data *, struct drm_bridge *, + struct drm_connector *); + int (*get_modes)(struct analogix_dp_plat_data *); +}; + +int analogix_dp_resume(struct device *dev); +int analogix_dp_suspend(struct device *dev); + +int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev, + struct analogix_dp_plat_data *plat_data); +void analogix_dp_unbind(struct device *dev, struct device *master, void *data); + +#endif /* _ANALOGIX_DP_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 9054598c9a7a..fe9d89c7d1ed 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -42,6 +42,8 @@ int drm_atomic_helper_commit(struct drm_device *dev, struct drm_atomic_state *state, bool async); +void drm_atomic_helper_wait_for_fences(struct drm_device *dev, + struct drm_atomic_state *state); bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, struct drm_atomic_state *old_state, struct drm_crtc *crtc); diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index e0170bf80bb0..8cb377c5eb93 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -45,16 +45,6 @@ struct drm_clip_rect; struct device_node; struct fence; -#define DRM_MODE_OBJECT_CRTC 0xcccccccc -#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 -#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 -#define DRM_MODE_OBJECT_MODE 0xdededede -#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 -#define DRM_MODE_OBJECT_FB 0xfbfbfbfb -#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb -#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee -#define DRM_MODE_OBJECT_ANY 0 - struct drm_mode_object { uint32_t id; uint32_t type; @@ -2259,8 +2249,8 @@ static inline unsigned drm_connector_index(struct drm_connector *connector) return connector->connector_id; } -/* helper to unplug all connectors from sysfs for device */ -extern void drm_connector_unplug_all(struct drm_device *dev); +/* helper to unregister all connectors from sysfs for device */ +extern void drm_connector_unregister_all(struct drm_device *dev); extern int drm_bridge_add(struct drm_bridge *bridge); extern void drm_bridge_remove(struct drm_bridge *bridge); diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h index e42495ad8136..70d4e221a3ad 100644 --- a/include/drm/drm_mem_util.h +++ b/include/drm/drm_mem_util.h @@ -54,6 +54,25 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); } +static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp) +{ + if (size != 0 && nmemb > SIZE_MAX / size) + return NULL; + + if (size * nmemb <= PAGE_SIZE) + return kmalloc(nmemb * size, gfp); + + if (gfp & __GFP_RECLAIMABLE) { + void *ptr = kmalloc(nmemb * size, + gfp | __GFP_NOWARN | __GFP_NORETRY); + if (ptr) + return ptr; + } + + return __vmalloc(size * nmemb, + gfp | __GFP_HIGHMEM, PAGE_KERNEL); +} + static __inline void drm_free_large(void *ptr) { kvfree(ptr); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index afae2316bd43..055a08ddac02 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -92,7 +92,7 @@ struct ttm_placement { */ struct ttm_bus_placement { void *addr; - unsigned long base; + phys_addr_t base; unsigned long size; unsigned long offset; bool is_iomem; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 3d4bf08aa21f..cb91f80c15b3 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -1030,8 +1030,7 @@ extern pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp); extern const struct ttm_mem_type_manager_func ttm_bo_manager_func; -#if (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) -#define TTM_HAS_AGP +#if IS_ENABLED(CONFIG_AGP) #include <linux/agp_backend.h> /** diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 1b4d69f68c33..3f103076d0bf 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -135,7 +135,7 @@ struct bdi_writeback { struct backing_dev_info { struct list_head bdi_list; - unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ + unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ unsigned int capabilities; /* Device capabilities */ congested_fn *congested_fn; /* Function pointer if device is md/dm */ void *congested_data; /* Pointer to aux data for congested func */ diff --git a/include/linux/bio.h b/include/linux/bio.h index 88bc64f00bb5..6b7481f62218 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -41,7 +41,7 @@ #endif #define BIO_MAX_PAGES 256 -#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) +#define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_SHIFT) #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) /* diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7e5d7e018bea..669e419d6234 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1372,7 +1372,7 @@ unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); static inline void put_dev_sector(Sector p) { - page_cache_release(p.v); + put_page(p.v); } static inline bool __bvec_gap_to_prev(struct request_queue *q, diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c67f052cc5e5..d48daa3f6f20 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -43,7 +43,7 @@ enum bh_state_bits { */ }; -#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512) +#define MAX_BUF_PER_PAGE (PAGE_SIZE / 512) struct page; struct buffer_head; @@ -263,7 +263,7 @@ void buffer_init(void); static inline void attach_page_buffers(struct page *page, struct buffer_head *head) { - page_cache_get(page); + get_page(page); SetPagePrivate(page); set_page_private(page, (unsigned long)head); } diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index e7975e4681e1..db92a8d4926e 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -176,8 +176,8 @@ extern void ceph_put_snap_context(struct ceph_snap_context *sc); */ static inline int calc_pages_for(u64 off, u64 len) { - return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - - (off >> PAGE_CACHE_SHIFT); + return ((off+len+PAGE_SIZE-1) >> PAGE_SHIFT) - + (off >> PAGE_SHIFT); } extern struct kmem_cache *ceph_inode_cachep; diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 22ab246feed3..eeae401a2412 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -199,7 +199,7 @@ #define unreachable() __builtin_unreachable() /* Mark a function definition as prohibited from being cloned. */ -#define __noclone __attribute__((__noclone__)) +#define __noclone __attribute__((__noclone__, __optimize__("no-tracer"))) #endif /* GCC_VERSION >= 40500 */ diff --git a/include/linux/console.h b/include/linux/console.h index ea731af2451e..e49cc1ef19be 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -191,6 +191,8 @@ void vcs_remove_sysfs(int index); #ifdef CONFIG_VGA_CONSOLE extern bool vgacon_text_force(void); +#else +static inline bool vgacon_text_force(void) { return false; } #endif #endif /* _LINUX_CONSOLE_H */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 7cb043d8f4e8..4bb4de8d95ea 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -161,6 +161,7 @@ struct dentry_operations { struct vfsmount *(*d_automount)(struct path *); int (*d_manage)(struct dentry *, bool); struct inode *(*d_select_inode)(struct dentry *, unsigned); + struct dentry *(*d_real)(struct dentry *, struct inode *); } ____cacheline_aligned; /* @@ -229,6 +230,7 @@ struct dentry_operations { #define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ #define DCACHE_ENCRYPTED_WITH_KEY 0x04000000 /* dir is encrypted with a valid key */ +#define DCACHE_OP_REAL 0x08000000 extern seqlock_t rename_lock; @@ -555,4 +557,12 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper) return upper; } +static inline struct dentry *d_real(struct dentry *dentry) +{ + if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) + return dentry->d_op->d_real(dentry, NULL); + else + return dentry; +} + #endif /* __LINUX_DCACHE_H */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 9eb215a155e0..b90e9bdbd1dd 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -262,7 +262,7 @@ struct f2fs_node { /* * For NAT entries */ -#define NAT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_nat_entry)) +#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry)) struct f2fs_nat_entry { __u8 version; /* latest version of cached nat entry */ @@ -282,7 +282,7 @@ struct f2fs_nat_block { * Not allow to change this. */ #define SIT_VBLOCK_MAP_SIZE 64 -#define SIT_ENTRY_PER_BLOCK (PAGE_CACHE_SIZE / sizeof(struct f2fs_sit_entry)) +#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry)) /* * Note that f2fs_sit_entry->vblocks has the following bit-field information. diff --git a/include/linux/fs.h b/include/linux/fs.h index 14a97194b34b..70e61b58baaf 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -929,7 +929,7 @@ static inline struct file *get_file(struct file *f) /* Page cache limit. The filesystems should put that into their s_maxbytes limits, otherwise bad things can happen in VM. */ #if BITS_PER_LONG==32 -#define MAX_LFS_FILESIZE (((loff_t)PAGE_CACHE_SIZE << (BITS_PER_LONG-1))-1) +#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1) #elif BITS_PER_LONG==64 #define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL) #endif @@ -1241,6 +1241,16 @@ static inline struct inode *file_inode(const struct file *f) return f->f_inode; } +static inline struct dentry *file_dentry(const struct file *file) +{ + struct dentry *dentry = file->f_path.dentry; + + if (unlikely(dentry->d_flags & DCACHE_OP_REAL)) + return dentry->d_op->d_real(dentry, file_inode(file)); + else + return dentry; +} + static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) { return locks_lock_inode_wait(file_inode(filp), fl); @@ -2067,7 +2077,7 @@ extern int generic_update_time(struct inode *, struct timespec *, int); /* /sys/fs */ extern struct kobject *fs_kobj; -#define MAX_RW_COUNT (INT_MAX & PAGE_CACHE_MASK) +#define MAX_RW_COUNT (INT_MAX & PAGE_MASK) #ifdef CONFIG_MANDATORY_FILE_LOCKING extern int locks_mandatory_locked(struct file *); diff --git a/include/linux/iommu.h b/include/linux/iommu.h index a5c539fa5d2b..ef7a6ecd8584 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -195,9 +195,7 @@ struct iommu_ops { /* Get the number of windows per domain */ u32 (*domain_get_windows)(struct iommu_domain *domain); -#ifdef CONFIG_OF_IOMMU int (*of_xlate)(struct device *dev, struct of_phandle_args *args); -#endif unsigned long pgsize_bitmap; void *priv; diff --git a/include/linux/mm.h b/include/linux/mm.h index ed6407d1b7b5..ffcff53e3b2b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -623,7 +623,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address, * * A page may belong to an inode's memory mapping. In this case, page->mapping * is the pointer to the inode, and page->index is the file offset of the page, - * in units of PAGE_CACHE_SIZE. + * in units of PAGE_SIZE. * * If pagecache pages are not associated with an inode, they are said to be * anonymous pages. These may become associated with the swapcache, and in that diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 944b2b37313b..c2d75b4fa86c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -341,7 +341,7 @@ struct vm_area_struct { /* Information about our backing store: */ unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ + units */ struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index cb0d5d09c2e4..8395308a2445 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2120,7 +2120,10 @@ struct napi_gro_cb { /* Used in foo-over-udp, set in udp[46]_gro_receive */ u8 is_ipv6:1; - /* 7 bit hole */ + /* Used in GRE, set in fou/gue_gro_receive */ + u8 is_fou:1; + + /* 6 bit hole */ /* used to support CHECKSUM_COMPLETE for tunneling protocols */ __wsum csum; diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index f2f650f136ee..957049f72290 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h @@ -41,8 +41,8 @@ struct nfs_page { struct page *wb_page; /* page to read in/write out */ struct nfs_open_context *wb_context; /* File state context info */ struct nfs_lock_context *wb_lock_context; /* lock context info */ - pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ - unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ + pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ + unsigned int wb_offset, /* Offset & ~PAGE_MASK */ wb_pgbase, /* Start of page data */ wb_bytes; /* Length of request */ struct kref wb_kref; /* reference count */ @@ -184,7 +184,7 @@ nfs_list_entry(struct list_head *head) static inline loff_t req_offset(struct nfs_page *req) { - return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; + return (((loff_t)req->wb_index) << PAGE_SHIFT) + req->wb_offset; } #endif /* _LINUX_NFS_PAGE_H */ diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index 9abb763e4b86..e9fcf90b270d 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h @@ -331,7 +331,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) { unsigned len = le16_to_cpu(dlen); -#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) +#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == NILFS_MAX_REC_LEN) return 1 << 16; #endif @@ -340,7 +340,7 @@ static inline unsigned nilfs_rec_len_from_disk(__le16 dlen) static inline __le16 nilfs_rec_len_to_disk(unsigned len) { -#if !defined(__KERNEL__) || (PAGE_CACHE_SIZE >= 65536) +#if !defined(__KERNEL__) || (PAGE_SIZE >= 65536) if (len == (1 << 16)) return cpu_to_le16(NILFS_MAX_REC_LEN); else if (len > (1 << 16)) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 1ebd65c91422..7e1ab155c67c 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -86,21 +86,6 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) (__force unsigned long)mask; } -/* - * The page cache can be done in larger chunks than - * one page, because it allows for more efficient - * throughput (it can then be mapped into user - * space in smaller chunks for same flexibility). - * - * Or rather, it _will_ be done in larger chunks. - */ -#define PAGE_CACHE_SHIFT PAGE_SHIFT -#define PAGE_CACHE_SIZE PAGE_SIZE -#define PAGE_CACHE_MASK PAGE_MASK -#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) - -#define page_cache_get(page) get_page(page) -#define page_cache_release(page) put_page(page) void release_pages(struct page **pages, int nr, bool cold); /* @@ -390,13 +375,13 @@ static inline pgoff_t page_to_pgoff(struct page *page) return page->index << compound_order(page); if (likely(!PageTransTail(page))) - return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + return page->index; /* * We don't initialize ->index for tail pages: calculate based on * head page */ - pgoff = compound_head(page)->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); + pgoff = compound_head(page)->index; pgoff += page - compound_head(page); return pgoff; } @@ -406,12 +391,12 @@ static inline pgoff_t page_to_pgoff(struct page *page) */ static inline loff_t page_offset(struct page *page) { - return ((loff_t)page->index) << PAGE_CACHE_SHIFT; + return ((loff_t)page->index) << PAGE_SHIFT; } static inline loff_t page_file_offset(struct page *page) { - return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; + return ((loff_t)page_file_index(page)) << PAGE_SHIFT; } extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, @@ -425,7 +410,7 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, return linear_hugepage_index(vma, address); pgoff = (address - vma->vm_start) >> PAGE_SHIFT; pgoff += vma->vm_pgoff; - return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); + return pgoff; } extern void __lock_page(struct page *page); @@ -535,8 +520,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); /* * Fault a userspace page into pagetables. Return non-zero on a fault. * - * This assumes that two userspace pages are always sufficient. That's - * not true if PAGE_CACHE_SIZE > PAGE_SIZE. + * This assumes that two userspace pages are always sufficient. */ static inline int fault_in_pages_writeable(char __user *uaddr, int size) { @@ -671,8 +655,8 @@ static inline int add_to_page_cache(struct page *page, static inline unsigned long dir_pages(struct inode *inode) { - return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> - PAGE_CACHE_SHIFT; + return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> + PAGE_SHIFT; } #endif /* _LINUX_PAGEMAP_H */ diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index cc0fc712bb82..7ca44fb5b675 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -129,7 +129,7 @@ static inline void svc_get(struct svc_serv *serv) * * These happen to all be powers of 2, which is not strictly * necessary but helps enforce the real limitation, which is - * that they should be multiples of PAGE_CACHE_SIZE. + * that they should be multiples of PAGE_SIZE. * * For UDP transports, a block plus NFS,RPC, and UDP headers * has to fit into the IP datagram limit of 64K. The largest diff --git a/include/linux/swap.h b/include/linux/swap.h index d18b65c53dbb..2b83359c19ca 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -433,9 +433,9 @@ struct backing_dev_info; #define si_swapinfo(val) \ do { (val)->freeswap = (val)->totalswap = 0; } while (0) /* only sparc can not include linux/pagemap.h in this file - * so leave page_cache_release and release_pages undeclared... */ + * so leave put_page and release_pages undeclared... */ #define free_page_and_swap_cache(page) \ - page_cache_release(page) + put_page(page) #define free_pages_and_swap_cache(pages, nr) \ release_pages((pages), (nr), false); diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index d1f1d338af20..8b51df3ab334 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -8,6 +8,7 @@ #include <linux/rbtree.h> struct vm_area_struct; /* vma defining user mapping in mm_types.h */ +struct notifier_block; /* in notifier.h */ /* bits in flags of vmalloc's vm_struct below */ #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ @@ -187,4 +188,7 @@ pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) #define VMALLOC_TOTAL 0UL #endif +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); + #endif /* _LINUX_VMALLOC_H */ diff --git a/include/net/act_api.h b/include/net/act_api.h index 2a19fe111c78..03e322b30218 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -135,6 +135,7 @@ void tcf_hashinfo_destroy(const struct tc_action_ops *ops, static inline void tc_action_net_exit(struct tc_action_net *tn) { tcf_hashinfo_destroy(tn->ops, tn->hinfo); + kfree(tn->hinfo); } int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb, diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 0c09da34b67a..e385eb3076a1 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1001,6 +1001,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * flag indicates that the PN was verified for replay protection. * Note that this flag is also currently only supported when a frame * is also decrypted (ie. @RX_FLAG_DECRYPTED must be set) + * @RX_FLAG_DUP_VALIDATED: The driver should set this flag if it did + * de-duplication by itself. * @RX_FLAG_FAILED_FCS_CRC: Set this flag if the FCS check failed on * the frame. * @RX_FLAG_FAILED_PLCP_CRC: Set this flag if the PCLP check failed on diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 65521cfdcade..03fb33efcae2 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -386,11 +386,9 @@ static inline struct list_head *sctp_list_dequeue(struct list_head *list) { struct list_head *result = NULL; - if (list->next != list) { + if (!list_empty(list)) { result = list->next; - list->next = result->next; - list->next->prev = list; - INIT_LIST_HEAD(result); + list_del_init(result); } return result; } diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index c067019ed12a..74d79bde7075 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -516,6 +516,31 @@ static inline int scsi_device_tpgs(struct scsi_device *sdev) return sdev->inquiry ? (sdev->inquiry[5] >> 4) & 0x3 : 0; } +/** + * scsi_device_supports_vpd - test if a device supports VPD pages + * @sdev: the &struct scsi_device to test + * + * If the 'try_vpd_pages' flag is set it takes precedence. + * Otherwise we will assume VPD pages are supported if the + * SCSI level is at least SPC-3 and 'skip_vpd_pages' is not set. + */ +static inline int scsi_device_supports_vpd(struct scsi_device *sdev) +{ + /* Attempt VPD inquiry if the device blacklist explicitly calls + * for it. + */ + if (sdev->try_vpd_pages) + return 1; + /* + * Although VPD inquiries can go to SCSI-2 type devices, + * some USB ones crash on receiving them, and the pages + * we currently ask for are for SPC-3 and beyond + */ + if (sdev->scsi_level > SCSI_SPC_2 && !sdev->skip_vpd_pages) + return 1; + return 0; +} + #define MODULE_ALIAS_SCSI_DEVICE(type) \ MODULE_ALIAS("scsi:t-" __stringify(type) "*") #define SCSI_DEVICE_MODALIAS_FMT "scsi:t-0x%02x" diff --git a/include/sound/hda_chmap.h b/include/sound/hda_chmap.h index e20d219a0304..babd445c7505 100644 --- a/include/sound/hda_chmap.h +++ b/include/sound/hda_chmap.h @@ -36,6 +36,8 @@ struct hdac_chmap_ops { int (*chmap_validate)(struct hdac_chmap *hchmap, int ca, int channels, unsigned char *chmap); + int (*get_spk_alloc)(struct hdac_device *hdac, int pcm_idx); + void (*get_chmap)(struct hdac_device *hdac, int pcm_idx, unsigned char *chmap); void (*set_chmap)(struct hdac_device *hdac, int pcm_idx, diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h index fa341fcb5829..eed87a7559b7 100644 --- a/include/sound/hda_i915.h +++ b/include/sound/hda_i915.h @@ -10,8 +10,8 @@ int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable); int snd_hdac_display_power(struct hdac_bus *bus, bool enable); int snd_hdac_get_display_clk(struct hdac_bus *bus); -int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate); -int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, +int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate); +int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, bool *audio_enabled, char *buffer, int max_bytes); int snd_hdac_i915_init(struct hdac_bus *bus); int snd_hdac_i915_exit(struct hdac_bus *bus); @@ -29,12 +29,12 @@ static inline int snd_hdac_get_display_clk(struct hdac_bus *bus) { return 0; } -static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, - int rate) +static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec, + hda_nid_t nid, int rate) { return 0; } -static inline int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, +static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, bool *audio_enabled, char *buffer, int max_bytes) { diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index 677807f29a1c..e90e82ad6875 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -23,7 +23,7 @@ struct map_lookup; struct extent_buffer; struct btrfs_work; struct __btrfs_workqueue; -struct btrfs_qgroup_operation; +struct btrfs_qgroup_extent_record; #define show_ref_type(type) \ __print_symbolic(type, \ @@ -1231,6 +1231,93 @@ DEFINE_EVENT(btrfs__qgroup_delayed_ref, btrfs_qgroup_free_delayed_ref, TP_ARGS(ref_root, reserved) ); + +DECLARE_EVENT_CLASS(btrfs_qgroup_extent, + TP_PROTO(struct btrfs_qgroup_extent_record *rec), + + TP_ARGS(rec), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + ), + + TP_fast_assign( + __entry->bytenr = rec->bytenr, + __entry->num_bytes = rec->num_bytes; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu", + (unsigned long long)__entry->bytenr, + (unsigned long long)__entry->num_bytes) +); + +DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents, + + TP_PROTO(struct btrfs_qgroup_extent_record *rec), + + TP_ARGS(rec) +); + +DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_insert_dirty_extent, + + TP_PROTO(struct btrfs_qgroup_extent_record *rec), + + TP_ARGS(rec) +); + +TRACE_EVENT(btrfs_qgroup_account_extent, + + TP_PROTO(u64 bytenr, u64 num_bytes, u64 nr_old_roots, u64 nr_new_roots), + + TP_ARGS(bytenr, num_bytes, nr_old_roots, nr_new_roots), + + TP_STRUCT__entry( + __field( u64, bytenr ) + __field( u64, num_bytes ) + __field( u64, nr_old_roots ) + __field( u64, nr_new_roots ) + ), + + TP_fast_assign( + __entry->bytenr = bytenr; + __entry->num_bytes = num_bytes; + __entry->nr_old_roots = nr_old_roots; + __entry->nr_new_roots = nr_new_roots; + ), + + TP_printk("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, " + "nr_new_roots = %llu", + __entry->bytenr, + __entry->num_bytes, + __entry->nr_old_roots, + __entry->nr_new_roots) +); + +TRACE_EVENT(qgroup_update_counters, + + TP_PROTO(u64 qgid, u64 cur_old_count, u64 cur_new_count), + + TP_ARGS(qgid, cur_old_count, cur_new_count), + + TP_STRUCT__entry( + __field( u64, qgid ) + __field( u64, cur_old_count ) + __field( u64, cur_new_count ) + ), + + TP_fast_assign( + __entry->qgid = qgid; + __entry->cur_old_count = cur_old_count; + __entry->cur_new_count = cur_new_count; + ), + + TP_printk("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu", + __entry->qgid, + __entry->cur_old_count, + __entry->cur_new_count) +); + #endif /* _TRACE_BTRFS_H */ /* This part must be outside protection */ diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index a0ebfe7c9a28..368325061ca7 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -36,7 +36,13 @@ #ifndef _DRM_H_ #define _DRM_H_ -#if defined(__KERNEL__) || defined(__linux__) +#if defined(__KERNEL__) + +#include <linux/types.h> +#include <asm/ioctl.h> +typedef unsigned int drm_handle_t; + +#elif defined(__linux__) #include <linux/types.h> #include <asm/ioctl.h> @@ -181,7 +187,7 @@ enum drm_map_type { _DRM_SHM = 2, /**< shared, cached */ _DRM_AGP = 3, /**< AGP/GART */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ - _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ + _DRM_CONSISTENT = 5 /**< Consistent memory for PCI DMA */ }; /** @@ -373,7 +379,11 @@ struct drm_buf_pub { */ struct drm_buf_map { int count; /**< Length of the buffer list */ +#ifdef __cplusplus + void __user *virt; +#else void __user *virtual; /**< Mmap'd area in user-virtual */ +#endif struct drm_buf_pub __user *list; /**< Buffer information */ }; @@ -431,7 +441,7 @@ struct drm_draw { * DRM_IOCTL_UPDATE_DRAW ioctl argument type. */ typedef enum { - DRM_DRAWABLE_CLIPRECTS, + DRM_DRAWABLE_CLIPRECTS } drm_drawable_info_type_t; struct drm_update_draw { @@ -681,7 +691,7 @@ struct drm_prime_handle { __s32 fd; }; -#include <drm/drm_mode.h> +#include "drm_mode.h" #define DRM_IOCTL_BASE 'd' #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h index c0217434d28d..7a7856e02e49 100644 --- a/include/uapi/drm/drm_mode.h +++ b/include/uapi/drm/drm_mode.h @@ -320,6 +320,16 @@ struct drm_mode_connector_set_property { __u32 connector_id; }; +#define DRM_MODE_OBJECT_CRTC 0xcccccccc +#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 +#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 +#define DRM_MODE_OBJECT_MODE 0xdededede +#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 +#define DRM_MODE_OBJECT_FB 0xfbfbfbfb +#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb +#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee +#define DRM_MODE_OBJECT_ANY 0 + struct drm_mode_obj_get_properties { __u64 props_ptr; __u64 prop_values_ptr; diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h index 06d6c6228a7a..d5ce71607972 100644 --- a/include/uapi/linux/usb/ch9.h +++ b/include/uapi/linux/usb/ch9.h @@ -899,7 +899,7 @@ struct usb_ssp_cap_descriptor { __le32 bmAttributes; #define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */ #define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */ - __u16 wFunctionalitySupport; + __le16 wFunctionalitySupport; #define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf) #define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8) #define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12) diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index c18264df9504..4cb65bbfa654 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -40,6 +40,8 @@ #define VIRTIO_CONFIG_S_DRIVER_OK 4 /* Driver has finished configuring features */ #define VIRTIO_CONFIG_S_FEATURES_OK 8 +/* Device entered invalid state, driver must reset it */ +#define VIRTIO_CONFIG_S_NEEDS_RESET 0x40 /* We've given up on this device. */ #define VIRTIO_CONFIG_S_FAILED 0x80 diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index eeba75395f7d..ad66589f2ae6 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -194,8 +194,9 @@ int ipu_cpmem_set_format_rgb(struct ipuv3_channel *ch, int ipu_cpmem_set_format_passthrough(struct ipuv3_channel *ch, int width); void ipu_cpmem_set_yuv_interleaved(struct ipuv3_channel *ch, u32 pixel_format); void ipu_cpmem_set_yuv_planar_full(struct ipuv3_channel *ch, - u32 pixel_format, int stride, - int u_offset, int v_offset); + unsigned int uv_stride, + unsigned int u_offset, + unsigned int v_offset); void ipu_cpmem_set_yuv_planar(struct ipuv3_channel *ch, u32 pixel_format, int stride, int height); int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc); @@ -236,7 +237,7 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc); int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc, unsigned long bandwidth_mbs, int burstsize); void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc); -int ipu_dmfc_init_channel(struct dmfc_channel *dmfc, int width); +void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width); struct dmfc_channel *ipu_dmfc_get(struct ipu_soc *ipu, int ipuv3_channel); void ipu_dmfc_put(struct dmfc_channel *dmfc); diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h index ddcc8ca7316b..19aa65a35546 100644 --- a/include/video/mipi_display.h +++ b/include/video/mipi_display.h @@ -115,6 +115,14 @@ enum { MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E, MIPI_DCS_SET_TEAR_SCANLINE = 0x44, MIPI_DCS_GET_SCANLINE = 0x45, + MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_DISPLAY_BRIGHTNESS = 0x52, /* MIPI DCS 1.3 */ + MIPI_DCS_WRITE_CONTROL_DISPLAY = 0x53, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_CONTROL_DISPLAY = 0x54, /* MIPI DCS 1.3 */ + MIPI_DCS_WRITE_POWER_SAVE = 0x55, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_POWER_SAVE = 0x56, /* MIPI DCS 1.3 */ + MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E, /* MIPI DCS 1.3 */ + MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F, /* MIPI DCS 1.3 */ MIPI_DCS_READ_DDB_START = 0xA1, MIPI_DCS_READ_DDB_CONTINUE = 0xA8, }; diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 781c1399c6a3..ade739f67f1d 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -307,8 +307,8 @@ static int mqueue_fill_super(struct super_block *sb, void *data, int silent) struct inode *inode; struct ipc_namespace *ns = data; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 220fc17b9718..7edc95edfaee 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -321,7 +321,7 @@ retry: copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE); ret = __replace_page(vma, vaddr, old_page, new_page); - page_cache_release(new_page); + put_page(new_page); put_old: put_page(old_page); @@ -539,14 +539,14 @@ static int __copy_insn(struct address_space *mapping, struct file *filp, * see uprobe_register(). */ if (mapping->a_ops->readpage) - page = read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT, filp); + page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp); else - page = shmem_read_mapping_page(mapping, offset >> PAGE_CACHE_SHIFT); + page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT); if (IS_ERR(page)) return PTR_ERR(page); copy_from_page(page, offset, insn, nbytes); - page_cache_release(page); + put_page(page); return 0; } diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 27a7a26b1ece..8f22fbedc3a6 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -2444,6 +2444,22 @@ static struct bpf_test tests[] = { { { 0, 4294967295U } }, }, { + "ALU_ADD_X: 2 + 4294967294 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_LD_IMM64(R1, 4294967294U), + BPF_ALU32_REG(BPF_ADD, R0, R1), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { "ALU64_ADD_X: 1 + 2 = 3", .u.insns_int = { BPF_LD_IMM64(R0, 1), @@ -2467,6 +2483,23 @@ static struct bpf_test tests[] = { { }, { { 0, 4294967295U } }, }, + { + "ALU64_ADD_X: 2 + 4294967294 = 4294967296", + .u.insns_int = { + BPF_LD_IMM64(R0, 2), + BPF_LD_IMM64(R1, 4294967294U), + BPF_LD_IMM64(R2, 4294967296ULL), + BPF_ALU64_REG(BPF_ADD, R0, R1), + BPF_JMP_REG(BPF_JEQ, R0, R2, 2), + BPF_MOV32_IMM(R0, 0), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_ALU | BPF_ADD | BPF_K */ { "ALU_ADD_K: 1 + 2 = 3", @@ -2502,6 +2535,21 @@ static struct bpf_test tests[] = { { { 0, 4294967295U } }, }, { + "ALU_ADD_K: 4294967294 + 2 = 0", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_ALU32_IMM(BPF_ADD, R0, 2), + BPF_JMP_IMM(BPF_JEQ, R0, 0, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { "ALU_ADD_K: 0 + (-1) = 0x00000000ffffffff", .u.insns_int = { BPF_LD_IMM64(R2, 0x0), @@ -2518,6 +2566,70 @@ static struct bpf_test tests[] = { { { 0, 0x1 } }, }, { + "ALU_ADD_K: 0 + 0xffff = 0xffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0xffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x7fffffff), + BPF_ALU32_IMM(BPF_ADD, R2, 0x7fffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80000000 = 0x80000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x80000000), + BPF_ALU32_IMM(BPF_ADD, R2, 0x80000000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80008000 = 0x80008000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x80008000), + BPF_ALU32_IMM(BPF_ADD, R2, 0x80008000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { "ALU64_ADD_K: 1 + 2 = 3", .u.insns_int = { BPF_LD_IMM64(R0, 1), @@ -2551,6 +2663,22 @@ static struct bpf_test tests[] = { { { 0, 2147483647 } }, }, { + "ALU64_ADD_K: 4294967294 + 2 = 4294967296", + .u.insns_int = { + BPF_LD_IMM64(R0, 4294967294U), + BPF_LD_IMM64(R1, 4294967296ULL), + BPF_ALU64_IMM(BPF_ADD, R0, 2), + BPF_JMP_REG(BPF_JEQ, R0, R1, 2), + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, + { "ALU64_ADD_K: 2147483646 + -2147483647 = -1", .u.insns_int = { BPF_LD_IMM64(R0, 2147483646), @@ -2593,6 +2721,70 @@ static struct bpf_test tests[] = { { }, { { 0, 0x1 } }, }, + { + "ALU64_ADD_K: 0 + 0xffff = 0xffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffff), + BPF_ALU64_IMM(BPF_ADD, R2, 0xffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0x7fffffff = 0x7fffffff", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0x7fffffff), + BPF_ALU64_IMM(BPF_ADD, R2, 0x7fffffff), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU64_ADD_K: 0 + 0x80000000 = 0xffffffff80000000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffff80000000LL), + BPF_ALU64_IMM(BPF_ADD, R2, 0x80000000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, + { + "ALU_ADD_K: 0 + 0x80008000 = 0xffffffff80008000", + .u.insns_int = { + BPF_LD_IMM64(R2, 0x0), + BPF_LD_IMM64(R3, 0xffffffff80008000LL), + BPF_ALU64_IMM(BPF_ADD, R2, 0x80008000), + BPF_JMP_REG(BPF_JEQ, R2, R3, 2), + BPF_MOV32_IMM(R0, 2), + BPF_EXIT_INSN(), + BPF_MOV32_IMM(R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 0x1 } }, + }, /* BPF_ALU | BPF_SUB | BPF_X */ { "ALU_SUB_X: 3 - 1 = 2", @@ -4222,6 +4414,20 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JGT_K: Unsigned jump: if (-1 > 1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_JMP_IMM(BPF_JGT, R1, 1, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGE | BPF_K */ { "JMP_JGE_K: if (3 >= 2) return 1", @@ -4303,7 +4509,7 @@ static struct bpf_test tests[] = { .u.insns_int = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JNE, R1, 2, 1), + BPF_JMP_IMM(BPF_JSET, R1, 2, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), @@ -4317,7 +4523,7 @@ static struct bpf_test tests[] = { .u.insns_int = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), - BPF_JMP_IMM(BPF_JNE, R1, 0xffffffff, 1), + BPF_JMP_IMM(BPF_JSET, R1, 0xffffffff, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), @@ -4404,6 +4610,21 @@ static struct bpf_test tests[] = { { }, { { 0, 1 } }, }, + { + "JMP_JGT_X: Unsigned jump: if (-1 > 1) return 1", + .u.insns_int = { + BPF_ALU32_IMM(BPF_MOV, R0, 0), + BPF_LD_IMM64(R1, -1), + BPF_LD_IMM64(R2, 1), + BPF_JMP_REG(BPF_JGT, R1, R2, 1), + BPF_EXIT_INSN(), + BPF_ALU32_IMM(BPF_MOV, R0, 1), + BPF_EXIT_INSN(), + }, + INTERNAL, + { }, + { { 0, 1 } }, + }, /* BPF_JMP | BPF_JGE | BPF_X */ { "JMP_JGE_X: if (3 >= 2) return 1", @@ -4474,7 +4695,7 @@ static struct bpf_test tests[] = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), BPF_LD_IMM64(R2, 2), - BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_JMP_REG(BPF_JSET, R1, R2, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), @@ -4489,7 +4710,7 @@ static struct bpf_test tests[] = { BPF_ALU32_IMM(BPF_MOV, R0, 0), BPF_LD_IMM64(R1, 3), BPF_LD_IMM64(R2, 0xffffffff), - BPF_JMP_REG(BPF_JNE, R1, R2, 1), + BPF_JMP_REG(BPF_JSET, R1, R2, 1), BPF_EXIT_INSN(), BPF_ALU32_IMM(BPF_MOV, R0, 1), BPF_EXIT_INSN(), diff --git a/mm/fadvise.c b/mm/fadvise.c index b8a5bc66b0c0..b8024fa7101d 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -97,8 +97,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) break; case POSIX_FADV_WILLNEED: /* First and last PARTIAL page! */ - start_index = offset >> PAGE_CACHE_SHIFT; - end_index = endbyte >> PAGE_CACHE_SHIFT; + start_index = offset >> PAGE_SHIFT; + end_index = endbyte >> PAGE_SHIFT; /* Careful about overflow on the "+1" */ nrpages = end_index - start_index + 1; @@ -124,8 +124,8 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) * preserved on the expectation that it is better to preserve * needed memory than to discard unneeded memory. */ - start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; - end_index = (endbyte >> PAGE_CACHE_SHIFT); + start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; + end_index = (endbyte >> PAGE_SHIFT); if (end_index >= start_index) { unsigned long count = invalidate_mapping_pages(mapping, diff --git a/mm/filemap.c b/mm/filemap.c index a8c69c8c0a90..f2479af09da9 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -265,7 +265,7 @@ void delete_from_page_cache(struct page *page) if (freepage) freepage(page); - page_cache_release(page); + put_page(page); } EXPORT_SYMBOL(delete_from_page_cache); @@ -352,8 +352,8 @@ EXPORT_SYMBOL(filemap_flush); static int __filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, loff_t end_byte) { - pgoff_t index = start_byte >> PAGE_CACHE_SHIFT; - pgoff_t end = end_byte >> PAGE_CACHE_SHIFT; + pgoff_t index = start_byte >> PAGE_SHIFT; + pgoff_t end = end_byte >> PAGE_SHIFT; struct pagevec pvec; int nr_pages; int ret = 0; @@ -550,7 +550,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) pgoff_t offset = old->index; freepage = mapping->a_ops->freepage; - page_cache_get(new); + get_page(new); new->mapping = mapping; new->index = offset; @@ -572,7 +572,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) radix_tree_preload_end(); if (freepage) freepage(old); - page_cache_release(old); + put_page(old); } return error; @@ -651,7 +651,7 @@ static int __add_to_page_cache_locked(struct page *page, return error; } - page_cache_get(page); + get_page(page); page->mapping = mapping; page->index = offset; @@ -675,7 +675,7 @@ err_insert: spin_unlock_irq(&mapping->tree_lock); if (!huge) mem_cgroup_cancel_charge(page, memcg, false); - page_cache_release(page); + put_page(page); return error; } @@ -1083,7 +1083,7 @@ repeat: * include/linux/pagemap.h for details. */ if (unlikely(page != *pagep)) { - page_cache_release(page); + put_page(page); goto repeat; } } @@ -1121,7 +1121,7 @@ repeat: /* Has the page been truncated? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); - page_cache_release(page); + put_page(page); goto repeat; } VM_BUG_ON_PAGE(page->index != offset, page); @@ -1168,7 +1168,7 @@ repeat: if (fgp_flags & FGP_LOCK) { if (fgp_flags & FGP_NOWAIT) { if (!trylock_page(page)) { - page_cache_release(page); + put_page(page); return NULL; } } else { @@ -1178,7 +1178,7 @@ repeat: /* Has the page been truncated? */ if (unlikely(page->mapping != mapping)) { unlock_page(page); - page_cache_release(page); + put_page(page); goto repeat; } VM_BUG_ON_PAGE(page->index != offset, page); @@ -1209,7 +1209,7 @@ no_page: err = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_RECLAIM_MASK); if (unlikely(err)) { - page_cache_release(page); + put_page(page); page = NULL; if (err == -EEXIST) goto repeat; @@ -1278,7 +1278,7 @@ repeat: /* Has the page moved? */ if (unlikely(page != *slot)) { - page_cache_release(page); + put_page(page); goto repeat; } export: @@ -1343,7 +1343,7 @@ repeat: /* Has the page moved? */ if (unlikely(page != *slot)) { - page_cache_release(page); + put_page(page); goto repeat; } @@ -1405,7 +1405,7 @@ repeat: /* Has the page moved? */ if (unlikely(page != *slot)) { - page_cache_release(page); + put_page(page); goto repeat; } @@ -1415,7 +1415,7 @@ repeat: * negatives, which is just confusing to the caller. */ if (page->mapping == NULL || page->index != iter.index) { - page_cache_release(page); + put_page(page); break; } @@ -1482,7 +1482,7 @@ repeat: /* Has the page moved? */ if (unlikely(page != *slot)) { - page_cache_release(page); + put_page(page); goto repeat; } @@ -1549,7 +1549,7 @@ repeat: /* Has the page moved? */ if (unlikely(page != *slot)) { - page_cache_release(page); + put_page(page); goto repeat; } export: @@ -1610,11 +1610,11 @@ static ssize_t do_generic_file_read(struct file *filp, loff_t *ppos, unsigned int prev_offset; int error = 0; - index = *ppos >> PAGE_CACHE_SHIFT; - prev_index = ra->prev_pos >> PAGE_CACHE_SHIFT; - prev_offset = ra->prev_pos & (PAGE_CACHE_SIZE-1); - last_index = (*ppos + iter->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; - offset = *ppos & ~PAGE_CACHE_MASK; + index = *ppos >> PAGE_SHIFT; + prev_index = ra->prev_pos >> PAGE_SHIFT; + prev_offset = ra->prev_pos & (PAGE_SIZE-1); + last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; + offset = *ppos & ~PAGE_MASK; for (;;) { struct page *page; @@ -1648,7 +1648,7 @@ find_page: if (PageUptodate(page)) goto page_ok; - if (inode->i_blkbits == PAGE_CACHE_SHIFT || + if (inode->i_blkbits == PAGE_SHIFT || !mapping->a_ops->is_partially_uptodate) goto page_not_up_to_date; if (!trylock_page(page)) @@ -1672,18 +1672,18 @@ page_ok: */ isize = i_size_read(inode); - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = (isize - 1) >> PAGE_SHIFT; if (unlikely(!isize || index > end_index)) { - page_cache_release(page); + put_page(page); goto out; } /* nr is the maximum number of bytes to copy from this page */ - nr = PAGE_CACHE_SIZE; + nr = PAGE_SIZE; if (index == end_index) { - nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + nr = ((isize - 1) & ~PAGE_MASK) + 1; if (nr <= offset) { - page_cache_release(page); + put_page(page); goto out; } } @@ -1711,11 +1711,11 @@ page_ok: ret = copy_page_to_iter(page, offset, nr, iter); offset += ret; - index += offset >> PAGE_CACHE_SHIFT; - offset &= ~PAGE_CACHE_MASK; + index += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; prev_offset = offset; - page_cache_release(page); + put_page(page); written += ret; if (!iov_iter_count(iter)) goto out; @@ -1735,7 +1735,7 @@ page_not_up_to_date_locked: /* Did it get truncated before we got the lock? */ if (!page->mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); continue; } @@ -1757,7 +1757,7 @@ readpage: if (unlikely(error)) { if (error == AOP_TRUNCATED_PAGE) { - page_cache_release(page); + put_page(page); error = 0; goto find_page; } @@ -1774,7 +1774,7 @@ readpage: * invalidate_mapping_pages got it */ unlock_page(page); - page_cache_release(page); + put_page(page); goto find_page; } unlock_page(page); @@ -1789,7 +1789,7 @@ readpage: readpage_error: /* UHHUH! A synchronous read error occurred. Report it */ - page_cache_release(page); + put_page(page); goto out; no_cached_page: @@ -1805,7 +1805,7 @@ no_cached_page: error = add_to_page_cache_lru(page, mapping, index, mapping_gfp_constraint(mapping, GFP_KERNEL)); if (error) { - page_cache_release(page); + put_page(page); if (error == -EEXIST) { error = 0; goto find_page; @@ -1817,10 +1817,10 @@ no_cached_page: out: ra->prev_pos = prev_index; - ra->prev_pos <<= PAGE_CACHE_SHIFT; + ra->prev_pos <<= PAGE_SHIFT; ra->prev_pos |= prev_offset; - *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset; + *ppos = ((loff_t)index << PAGE_SHIFT) + offset; file_accessed(filp); return written ? written : error; } @@ -1912,7 +1912,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) else if (ret == -EEXIST) ret = 0; /* losing race to add is OK */ - page_cache_release(page); + put_page(page); } while (ret == AOP_TRUNCATED_PAGE); @@ -2022,8 +2022,8 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) loff_t size; int ret = 0; - size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); - if (offset >= size >> PAGE_CACHE_SHIFT) + size = round_up(i_size_read(inode), PAGE_SIZE); + if (offset >= size >> PAGE_SHIFT) return VM_FAULT_SIGBUS; /* @@ -2049,7 +2049,7 @@ retry_find: } if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) { - page_cache_release(page); + put_page(page); return ret | VM_FAULT_RETRY; } @@ -2072,10 +2072,10 @@ retry_find: * Found the page and have a reference on it. * We must recheck i_size under page lock. */ - size = round_up(i_size_read(inode), PAGE_CACHE_SIZE); - if (unlikely(offset >= size >> PAGE_CACHE_SHIFT)) { + size = round_up(i_size_read(inode), PAGE_SIZE); + if (unlikely(offset >= size >> PAGE_SHIFT)) { unlock_page(page); - page_cache_release(page); + put_page(page); return VM_FAULT_SIGBUS; } @@ -2120,7 +2120,7 @@ page_not_uptodate: if (!PageUptodate(page)) error = -EIO; } - page_cache_release(page); + put_page(page); if (!error || error == AOP_TRUNCATED_PAGE) goto retry_find; @@ -2164,7 +2164,7 @@ repeat: /* Has the page moved? */ if (unlikely(page != *slot)) { - page_cache_release(page); + put_page(page); goto repeat; } @@ -2178,8 +2178,8 @@ repeat: if (page->mapping != mapping || !PageUptodate(page)) goto unlock; - size = round_up(i_size_read(mapping->host), PAGE_CACHE_SIZE); - if (page->index >= size >> PAGE_CACHE_SHIFT) + size = round_up(i_size_read(mapping->host), PAGE_SIZE); + if (page->index >= size >> PAGE_SHIFT) goto unlock; pte = vmf->pte + page->index - vmf->pgoff; @@ -2195,7 +2195,7 @@ repeat: unlock: unlock_page(page); skip: - page_cache_release(page); + put_page(page); next: if (iter.index == vmf->max_pgoff) break; @@ -2278,7 +2278,7 @@ static struct page *wait_on_page_read(struct page *page) if (!IS_ERR(page)) { wait_on_page_locked(page); if (!PageUptodate(page)) { - page_cache_release(page); + put_page(page); page = ERR_PTR(-EIO); } } @@ -2301,7 +2301,7 @@ repeat: return ERR_PTR(-ENOMEM); err = add_to_page_cache_lru(page, mapping, index, gfp); if (unlikely(err)) { - page_cache_release(page); + put_page(page); if (err == -EEXIST) goto repeat; /* Presumably ENOMEM for radix tree node */ @@ -2311,7 +2311,7 @@ repeat: filler: err = filler(data, page); if (err < 0) { - page_cache_release(page); + put_page(page); return ERR_PTR(err); } @@ -2364,7 +2364,7 @@ filler: /* Case c or d, restart the operation */ if (!page->mapping) { unlock_page(page); - page_cache_release(page); + put_page(page); goto repeat; } @@ -2511,7 +2511,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) struct iov_iter data; write_len = iov_iter_count(from); - end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT; + end = (pos + write_len - 1) >> PAGE_SHIFT; written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1); if (written) @@ -2525,7 +2525,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) */ if (mapping->nrpages) { written = invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, end); + pos >> PAGE_SHIFT, end); /* * If a page can not be invalidated, return 0 to fall back * to buffered write. @@ -2550,7 +2550,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos) */ if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, - pos >> PAGE_CACHE_SHIFT, end); + pos >> PAGE_SHIFT, end); } if (written > 0) { @@ -2611,8 +2611,8 @@ ssize_t generic_perform_write(struct file *file, size_t copied; /* Bytes copied from user */ void *fsdata; - offset = (pos & (PAGE_CACHE_SIZE - 1)); - bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, + offset = (pos & (PAGE_SIZE - 1)); + bytes = min_t(unsigned long, PAGE_SIZE - offset, iov_iter_count(i)); again: @@ -2665,7 +2665,7 @@ again: * because not all segments in the iov can be copied at * once without a pagefault. */ - bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, + bytes = min_t(unsigned long, PAGE_SIZE - offset, iov_iter_single_seg_count(i)); goto again; } @@ -2752,8 +2752,8 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) iocb->ki_pos = endbyte + 1; written += status; invalidate_mapping_pages(mapping, - pos >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + pos >> PAGE_SHIFT, + endbyte >> PAGE_SHIFT); } else { /* * We don't know how much we wrote, so just return @@ -1107,7 +1107,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) * @addr: user address * * Returns struct page pointer of user page pinned for dump, - * to be freed afterwards by page_cache_release() or put_page(). + * to be freed afterwards by put_page(). * * Returns NULL on any kind of failure - a hole must then be inserted into * the corefile, to preserve alignment with its headers; and also returns diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 06058eaa173b..19d0d08b396f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3346,7 +3346,7 @@ retry_avoidcopy: old_page != pagecache_page) outside_reserve = 1; - page_cache_get(old_page); + get_page(old_page); /* * Drop page table lock as buddy allocator may be called. It will @@ -3364,7 +3364,7 @@ retry_avoidcopy: * may get SIGKILLed if it later faults. */ if (outside_reserve) { - page_cache_release(old_page); + put_page(old_page); BUG_ON(huge_pte_none(pte)); unmap_ref_private(mm, vma, old_page, address); BUG_ON(huge_pte_none(pte)); @@ -3425,9 +3425,9 @@ retry_avoidcopy: spin_unlock(ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); out_release_all: - page_cache_release(new_page); + put_page(new_page); out_release_old: - page_cache_release(old_page); + put_page(old_page); spin_lock(ptl); /* Caller expects lock to be held */ return ret; diff --git a/mm/madvise.c b/mm/madvise.c index a01147359f3b..07427d3fcead 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -170,7 +170,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, vma, index); if (page) - page_cache_release(page); + put_page(page); } return 0; @@ -204,14 +204,14 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, page = find_get_entry(mapping, index); if (!radix_tree_exceptional_entry(page)) { if (page) - page_cache_release(page); + put_page(page); continue; } swap = radix_to_swp_entry(page); page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, NULL, 0); if (page) - page_cache_release(page); + put_page(page); } lru_add_drain(); /* Push any new pages onto the LRU now */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5a544c6c0717..78f5f2641b91 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -538,7 +538,7 @@ static int delete_from_lru_cache(struct page *p) /* * drop the page count elevated by isolate_lru_page() */ - page_cache_release(p); + put_page(p); return 0; } return -EIO; diff --git a/mm/memory.c b/mm/memory.c index 098f00d05461..93897f23cc11 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2054,7 +2054,7 @@ static inline int wp_page_reuse(struct mm_struct *mm, VM_BUG_ON_PAGE(PageAnon(page), page); mapping = page->mapping; unlock_page(page); - page_cache_release(page); + put_page(page); if ((dirtied || page_mkwrite) && mapping) { /* @@ -2188,7 +2188,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, } if (new_page) - page_cache_release(new_page); + put_page(new_page); pte_unmap_unlock(page_table, ptl); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); @@ -2203,14 +2203,14 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, munlock_vma_page(old_page); unlock_page(old_page); } - page_cache_release(old_page); + put_page(old_page); } return page_copied ? VM_FAULT_WRITE : 0; oom_free_new: - page_cache_release(new_page); + put_page(new_page); oom: if (old_page) - page_cache_release(old_page); + put_page(old_page); return VM_FAULT_OOM; } @@ -2258,7 +2258,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, { int page_mkwrite = 0; - page_cache_get(old_page); + get_page(old_page); if (vma->vm_ops && vma->vm_ops->page_mkwrite) { int tmp; @@ -2267,7 +2267,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, tmp = do_page_mkwrite(vma, old_page, address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - page_cache_release(old_page); + put_page(old_page); return tmp; } /* @@ -2281,7 +2281,7 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); pte_unmap_unlock(page_table, ptl); - page_cache_release(old_page); + put_page(old_page); return 0; } page_mkwrite = 1; @@ -2341,7 +2341,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, */ if (PageAnon(old_page) && !PageKsm(old_page)) { if (!trylock_page(old_page)) { - page_cache_get(old_page); + get_page(old_page); pte_unmap_unlock(page_table, ptl); lock_page(old_page); page_table = pte_offset_map_lock(mm, pmd, address, @@ -2349,10 +2349,10 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, if (!pte_same(*page_table, orig_pte)) { unlock_page(old_page); pte_unmap_unlock(page_table, ptl); - page_cache_release(old_page); + put_page(old_page); return 0; } - page_cache_release(old_page); + put_page(old_page); } if (reuse_swap_page(old_page)) { /* @@ -2375,7 +2375,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, /* * Ok, we need to copy. Oh, well.. */ - page_cache_get(old_page); + get_page(old_page); pte_unmap_unlock(page_table, ptl); return wp_page_copy(mm, vma, address, page_table, pmd, @@ -2400,7 +2400,6 @@ static inline void unmap_mapping_range_tree(struct rb_root *root, vba = vma->vm_pgoff; vea = vba + vma_pages(vma) - 1; - /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */ zba = details->first_index; if (zba < vba) zba = vba; @@ -2619,7 +2618,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, * parallel locked swapcache. */ unlock_page(swapcache); - page_cache_release(swapcache); + put_page(swapcache); } if (flags & FAULT_FLAG_WRITE) { @@ -2641,10 +2640,10 @@ out_nomap: out_page: unlock_page(page); out_release: - page_cache_release(page); + put_page(page); if (page != swapcache) { unlock_page(swapcache); - page_cache_release(swapcache); + put_page(swapcache); } return ret; } @@ -2752,7 +2751,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, if (userfaultfd_missing(vma)) { pte_unmap_unlock(page_table, ptl); mem_cgroup_cancel_charge(page, memcg, false); - page_cache_release(page); + put_page(page); return handle_userfault(vma, address, flags, VM_UFFD_MISSING); } @@ -2771,10 +2770,10 @@ unlock: return 0; release: mem_cgroup_cancel_charge(page, memcg, false); - page_cache_release(page); + put_page(page); goto unlock; oom_free_page: - page_cache_release(page); + put_page(page); oom: return VM_FAULT_OOM; } @@ -2807,7 +2806,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address, if (unlikely(PageHWPoison(vmf.page))) { if (ret & VM_FAULT_LOCKED) unlock_page(vmf.page); - page_cache_release(vmf.page); + put_page(vmf.page); return VM_FAULT_HWPOISON; } @@ -2996,7 +2995,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!pte_same(*pte, orig_pte))) { pte_unmap_unlock(pte, ptl); unlock_page(fault_page); - page_cache_release(fault_page); + put_page(fault_page); return ret; } do_set_pte(vma, address, fault_page, pte, false, false); @@ -3024,7 +3023,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, return VM_FAULT_OOM; if (mem_cgroup_try_charge(new_page, mm, GFP_KERNEL, &memcg, false)) { - page_cache_release(new_page); + put_page(new_page); return VM_FAULT_OOM; } @@ -3041,7 +3040,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); if (fault_page) { unlock_page(fault_page); - page_cache_release(fault_page); + put_page(fault_page); } else { /* * The fault handler has no page to lock, so it holds @@ -3057,7 +3056,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, pte_unmap_unlock(pte, ptl); if (fault_page) { unlock_page(fault_page); - page_cache_release(fault_page); + put_page(fault_page); } else { /* * The fault handler has no page to lock, so it holds @@ -3068,7 +3067,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma, return ret; uncharge_out: mem_cgroup_cancel_charge(new_page, memcg, false); - page_cache_release(new_page); + put_page(new_page); return ret; } @@ -3096,7 +3095,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, tmp = do_page_mkwrite(vma, fault_page, address); if (unlikely(!tmp || (tmp & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))) { - page_cache_release(fault_page); + put_page(fault_page); return tmp; } } @@ -3105,7 +3104,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (unlikely(!pte_same(*pte, orig_pte))) { pte_unmap_unlock(pte, ptl); unlock_page(fault_page); - page_cache_release(fault_page); + put_page(fault_page); return ret; } do_set_pte(vma, address, fault_page, pte, true, false); @@ -3736,7 +3735,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, buf, maddr + offset, bytes); } kunmap(page); - page_cache_release(page); + put_page(page); } len -= bytes; buf += bytes; diff --git a/mm/mincore.c b/mm/mincore.c index 563f32045490..c0b5ba965200 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -75,7 +75,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff) #endif if (page) { present = PageUptodate(page); - page_cache_release(page); + put_page(page); } return present; @@ -211,7 +211,7 @@ static long do_mincore(unsigned long addr, unsigned long pages, unsigned char *v * return values: * zero - success * -EFAULT - vec points to an illegal address - * -EINVAL - addr is not a multiple of PAGE_CACHE_SIZE + * -EINVAL - addr is not a multiple of PAGE_SIZE * -ENOMEM - Addresses in the range [addr, addr + len] are * invalid for the address space of this process, or * specify one or more pages which are not currently @@ -226,14 +226,14 @@ SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len, unsigned char *tmp; /* Check the start address: needs to be page-aligned.. */ - if (start & ~PAGE_CACHE_MASK) + if (start & ~PAGE_MASK) return -EINVAL; /* ..and we need to be passed a valid user-space range */ if (!access_ok(VERIFY_READ, (void __user *) start, len)) return -ENOMEM; - /* This also avoids any overflows on PAGE_CACHE_ALIGN */ + /* This also avoids any overflows on PAGE_ALIGN */ pages = len >> PAGE_SHIFT; pages += (offset_in_page(len)) != 0; diff --git a/mm/nommu.c b/mm/nommu.c index de8b6b6580c1..102e257cc6c3 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -141,7 +141,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (pages) { pages[i] = virt_to_page(start); if (pages[i]) - page_cache_get(pages[i]); + get_page(pages[i]); } if (vmas) vmas[i] = vma; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 11ff8f758631..999792d35ccc 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2176,8 +2176,8 @@ int write_cache_pages(struct address_space *mapping, cycled = 0; end = -1; } else { - index = wbc->range_start >> PAGE_CACHE_SHIFT; - end = wbc->range_end >> PAGE_CACHE_SHIFT; + index = wbc->range_start >> PAGE_SHIFT; + end = wbc->range_end >> PAGE_SHIFT; if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) range_whole = 1; cycled = 1; /* ignore range_cyclic tests */ @@ -2382,14 +2382,14 @@ int write_one_page(struct page *page, int wait) wait_on_page_writeback(page); if (clear_page_dirty_for_io(page)) { - page_cache_get(page); + get_page(page); ret = mapping->a_ops->writepage(page, &wbc); if (ret == 0 && wait) { wait_on_page_writeback(page); if (PageError(page)) ret = -EIO; } - page_cache_release(page); + put_page(page); } else { unlock_page(page); } @@ -2431,7 +2431,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) __inc_zone_page_state(page, NR_DIRTIED); __inc_wb_stat(wb, WB_RECLAIMABLE); __inc_wb_stat(wb, WB_DIRTIED); - task_io_account_write(PAGE_CACHE_SIZE); + task_io_account_write(PAGE_SIZE); current->nr_dirtied++; this_cpu_inc(bdp_ratelimits); } @@ -2450,7 +2450,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping, mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); dec_zone_page_state(page, NR_FILE_DIRTY); dec_wb_stat(wb, WB_RECLAIMABLE); - task_io_account_cancelled_write(PAGE_CACHE_SIZE); + task_io_account_cancelled_write(PAGE_SIZE); } } diff --git a/mm/page_io.c b/mm/page_io.c index 18aac7819cc9..cd92e3d67a32 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -252,7 +252,7 @@ out: static sector_t swap_page_sector(struct page *page) { - return (sector_t)__page_file_index(page) << (PAGE_CACHE_SHIFT - 9); + return (sector_t)__page_file_index(page) << (PAGE_SHIFT - 9); } int __swap_writepage(struct page *page, struct writeback_control *wbc, diff --git a/mm/readahead.c b/mm/readahead.c index 20e58e820e44..40be3ae0afe3 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -47,11 +47,11 @@ static void read_cache_pages_invalidate_page(struct address_space *mapping, if (!trylock_page(page)) BUG(); page->mapping = mapping; - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0, PAGE_SIZE); page->mapping = NULL; unlock_page(page); } - page_cache_release(page); + put_page(page); } /* @@ -93,14 +93,14 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages, read_cache_pages_invalidate_page(mapping, page); continue; } - page_cache_release(page); + put_page(page); ret = filler(data, page); if (unlikely(ret)) { read_cache_pages_invalidate_pages(mapping, pages); break; } - task_io_account_read(PAGE_CACHE_SIZE); + task_io_account_read(PAGE_SIZE); } return ret; } @@ -130,7 +130,7 @@ static int read_pages(struct address_space *mapping, struct file *filp, mapping_gfp_constraint(mapping, GFP_KERNEL))) { mapping->a_ops->readpage(filp, page); } - page_cache_release(page); + put_page(page); } ret = 0; @@ -163,7 +163,7 @@ int __do_page_cache_readahead(struct address_space *mapping, struct file *filp, if (isize == 0) goto out; - end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); + end_index = ((isize - 1) >> PAGE_SHIFT); /* * Preallocate as many pages as we will need. @@ -216,7 +216,7 @@ int force_page_cache_readahead(struct address_space *mapping, struct file *filp, while (nr_to_read) { int err; - unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; + unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; if (this_chunk > nr_to_read) this_chunk = nr_to_read; @@ -425,7 +425,7 @@ ondemand_readahead(struct address_space *mapping, * trivial case: (offset - prev_offset) == 1 * unaligned reads: (offset - prev_offset) == 0 */ - prev_offset = (unsigned long long)ra->prev_pos >> PAGE_CACHE_SHIFT; + prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; if (offset - prev_offset <= 1UL) goto initial_readahead; @@ -558,8 +558,8 @@ SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) if (f.file) { if (f.file->f_mode & FMODE_READ) { struct address_space *mapping = f.file->f_mapping; - pgoff_t start = offset >> PAGE_CACHE_SHIFT; - pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT; + pgoff_t start = offset >> PAGE_SHIFT; + pgoff_t end = (offset + count - 1) >> PAGE_SHIFT; unsigned long len = end - start + 1; ret = do_readahead(mapping, f.file, start, len); } diff --git a/mm/rmap.c b/mm/rmap.c index 395e314b7996..307b555024ef 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1541,7 +1541,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, discard: page_remove_rmap(page, PageHuge(page)); - page_cache_release(page); + put_page(page); out_unmap: pte_unmap_unlock(pte, ptl); diff --git a/mm/shmem.c b/mm/shmem.c index 9428c51ab2d6..719bd6b88d98 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -75,8 +75,8 @@ static struct vfsmount *shm_mnt; #include "internal.h" -#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512) -#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT) +#define BLOCKS_PER_PAGE (PAGE_SIZE/512) +#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT) /* Pretend that each entry is of this size in directory's i_size */ #define BOGO_DIRENT_SIZE 20 @@ -176,13 +176,13 @@ static inline int shmem_reacct_size(unsigned long flags, static inline int shmem_acct_block(unsigned long flags) { return (flags & VM_NORESERVE) ? - security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0; + security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_SIZE)) : 0; } static inline void shmem_unacct_blocks(unsigned long flags, long pages) { if (flags & VM_NORESERVE) - vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE)); + vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE)); } static const struct super_operations shmem_ops; @@ -300,7 +300,7 @@ static int shmem_add_to_page_cache(struct page *page, VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); - page_cache_get(page); + get_page(page); page->mapping = mapping; page->index = index; @@ -318,7 +318,7 @@ static int shmem_add_to_page_cache(struct page *page, } else { page->mapping = NULL; spin_unlock_irq(&mapping->tree_lock); - page_cache_release(page); + put_page(page); } return error; } @@ -338,7 +338,7 @@ static void shmem_delete_from_page_cache(struct page *page, void *radswap) __dec_zone_page_state(page, NR_FILE_PAGES); __dec_zone_page_state(page, NR_SHMEM); spin_unlock_irq(&mapping->tree_lock); - page_cache_release(page); + put_page(page); BUG_ON(error); } @@ -474,10 +474,10 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, { struct address_space *mapping = inode->i_mapping; struct shmem_inode_info *info = SHMEM_I(inode); - pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT; - unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1); - unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); + pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; + pgoff_t end = (lend + 1) >> PAGE_SHIFT; + unsigned int partial_start = lstart & (PAGE_SIZE - 1); + unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1); struct pagevec pvec; pgoff_t indices[PAGEVEC_SIZE]; long nr_swaps_freed = 0; @@ -530,7 +530,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, struct page *page = NULL; shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); if (page) { - unsigned int top = PAGE_CACHE_SIZE; + unsigned int top = PAGE_SIZE; if (start > end) { top = partial_end; partial_end = 0; @@ -538,7 +538,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, zero_user_segment(page, partial_start, top); set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); } } if (partial_end) { @@ -548,7 +548,7 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend, zero_user_segment(page, 0, partial_end); set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); } } if (start >= end) @@ -833,7 +833,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page) mem_cgroup_commit_charge(page, memcg, true, false); out: unlock_page(page); - page_cache_release(page); + put_page(page); return error; } @@ -1080,7 +1080,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, if (!newpage) return -ENOMEM; - page_cache_get(newpage); + get_page(newpage); copy_highpage(newpage, oldpage); flush_dcache_page(newpage); @@ -1120,8 +1120,8 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp, set_page_private(oldpage, 0); unlock_page(oldpage); - page_cache_release(oldpage); - page_cache_release(oldpage); + put_page(oldpage); + put_page(oldpage); return error; } @@ -1145,7 +1145,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, int once = 0; int alloced = 0; - if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) + if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT)) return -EFBIG; repeat: swap.val = 0; @@ -1156,7 +1156,7 @@ repeat: } if (sgp != SGP_WRITE && sgp != SGP_FALLOC && - ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { + ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { error = -EINVAL; goto unlock; } @@ -1169,7 +1169,7 @@ repeat: if (sgp != SGP_READ) goto clear; unlock_page(page); - page_cache_release(page); + put_page(page); page = NULL; } if (page || (sgp == SGP_READ && !swap.val)) { @@ -1327,7 +1327,7 @@ clear: /* Perhaps the file has been truncated since we checked */ if (sgp != SGP_WRITE && sgp != SGP_FALLOC && - ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { + ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) { if (alloced) { ClearPageDirty(page); delete_from_page_cache(page); @@ -1355,7 +1355,7 @@ failed: unlock: if (page) { unlock_page(page); - page_cache_release(page); + put_page(page); } if (error == -ENOSPC && !once++) { info = SHMEM_I(inode); @@ -1577,7 +1577,7 @@ shmem_write_begin(struct file *file, struct address_space *mapping, { struct inode *inode = mapping->host; struct shmem_inode_info *info = SHMEM_I(inode); - pgoff_t index = pos >> PAGE_CACHE_SHIFT; + pgoff_t index = pos >> PAGE_SHIFT; /* i_mutex is held by caller */ if (unlikely(info->seals)) { @@ -1601,16 +1601,16 @@ shmem_write_end(struct file *file, struct address_space *mapping, i_size_write(inode, pos + copied); if (!PageUptodate(page)) { - if (copied < PAGE_CACHE_SIZE) { - unsigned from = pos & (PAGE_CACHE_SIZE - 1); + if (copied < PAGE_SIZE) { + unsigned from = pos & (PAGE_SIZE - 1); zero_user_segments(page, 0, from, - from + copied, PAGE_CACHE_SIZE); + from + copied, PAGE_SIZE); } SetPageUptodate(page); } set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); return copied; } @@ -1635,8 +1635,8 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (!iter_is_iovec(to)) sgp = SGP_DIRTY; - index = *ppos >> PAGE_CACHE_SHIFT; - offset = *ppos & ~PAGE_CACHE_MASK; + index = *ppos >> PAGE_SHIFT; + offset = *ppos & ~PAGE_MASK; for (;;) { struct page *page = NULL; @@ -1644,11 +1644,11 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) unsigned long nr, ret; loff_t i_size = i_size_read(inode); - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; if (index > end_index) break; if (index == end_index) { - nr = i_size & ~PAGE_CACHE_MASK; + nr = i_size & ~PAGE_MASK; if (nr <= offset) break; } @@ -1666,14 +1666,14 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) * We must evaluate after, since reads (unlike writes) * are called without i_mutex protection against truncate */ - nr = PAGE_CACHE_SIZE; + nr = PAGE_SIZE; i_size = i_size_read(inode); - end_index = i_size >> PAGE_CACHE_SHIFT; + end_index = i_size >> PAGE_SHIFT; if (index == end_index) { - nr = i_size & ~PAGE_CACHE_MASK; + nr = i_size & ~PAGE_MASK; if (nr <= offset) { if (page) - page_cache_release(page); + put_page(page); break; } } @@ -1694,7 +1694,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) mark_page_accessed(page); } else { page = ZERO_PAGE(0); - page_cache_get(page); + get_page(page); } /* @@ -1704,10 +1704,10 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) ret = copy_page_to_iter(page, offset, nr, to); retval += ret; offset += ret; - index += offset >> PAGE_CACHE_SHIFT; - offset &= ~PAGE_CACHE_MASK; + index += offset >> PAGE_SHIFT; + offset &= ~PAGE_MASK; - page_cache_release(page); + put_page(page); if (!iov_iter_count(to)) break; if (ret < nr) { @@ -1717,7 +1717,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to) cond_resched(); } - *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; + *ppos = ((loff_t) index << PAGE_SHIFT) + offset; file_accessed(file); return retval ? retval : error; } @@ -1755,9 +1755,9 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, if (splice_grow_spd(pipe, &spd)) return -ENOMEM; - index = *ppos >> PAGE_CACHE_SHIFT; - loff = *ppos & ~PAGE_CACHE_MASK; - req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + index = *ppos >> PAGE_SHIFT; + loff = *ppos & ~PAGE_MASK; + req_pages = (len + loff + PAGE_SIZE - 1) >> PAGE_SHIFT; nr_pages = min(req_pages, spd.nr_pages_max); spd.nr_pages = find_get_pages_contig(mapping, index, @@ -1774,7 +1774,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, index++; } - index = *ppos >> PAGE_CACHE_SHIFT; + index = *ppos >> PAGE_SHIFT; nr_pages = spd.nr_pages; spd.nr_pages = 0; @@ -1784,7 +1784,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, if (!len) break; - this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff); + this_len = min_t(unsigned long, len, PAGE_SIZE - loff); page = spd.pages[page_nr]; if (!PageUptodate(page) || page->mapping != mapping) { @@ -1793,19 +1793,19 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, if (error) break; unlock_page(page); - page_cache_release(spd.pages[page_nr]); + put_page(spd.pages[page_nr]); spd.pages[page_nr] = page; } isize = i_size_read(inode); - end_index = (isize - 1) >> PAGE_CACHE_SHIFT; + end_index = (isize - 1) >> PAGE_SHIFT; if (unlikely(!isize || index > end_index)) break; if (end_index == index) { unsigned int plen; - plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; + plen = ((isize - 1) & ~PAGE_MASK) + 1; if (plen <= loff) break; @@ -1822,7 +1822,7 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos, } while (page_nr < nr_pages) - page_cache_release(spd.pages[page_nr++]); + put_page(spd.pages[page_nr++]); if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); @@ -1904,10 +1904,10 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) else if (offset >= inode->i_size) offset = -ENXIO; else { - start = offset >> PAGE_CACHE_SHIFT; - end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + start = offset >> PAGE_SHIFT; + end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT; new_offset = shmem_seek_hole_data(mapping, start, end, whence); - new_offset <<= PAGE_CACHE_SHIFT; + new_offset <<= PAGE_SHIFT; if (new_offset > offset) { if (new_offset < inode->i_size) offset = new_offset; @@ -2203,8 +2203,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, goto out; } - start = offset >> PAGE_CACHE_SHIFT; - end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + start = offset >> PAGE_SHIFT; + end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT; /* Try to avoid a swapstorm if len is impossible to satisfy */ if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) { error = -ENOSPC; @@ -2237,8 +2237,8 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, if (error) { /* Remove the !PageUptodate pages we added */ shmem_undo_range(inode, - (loff_t)start << PAGE_CACHE_SHIFT, - (loff_t)index << PAGE_CACHE_SHIFT, true); + (loff_t)start << PAGE_SHIFT, + (loff_t)index << PAGE_SHIFT, true); goto undone; } @@ -2259,7 +2259,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset, */ set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); cond_resched(); } @@ -2280,7 +2280,7 @@ static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); buf->f_type = TMPFS_MAGIC; - buf->f_bsize = PAGE_CACHE_SIZE; + buf->f_bsize = PAGE_SIZE; buf->f_namelen = NAME_MAX; if (sbinfo->max_blocks) { buf->f_blocks = sbinfo->max_blocks; @@ -2523,7 +2523,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s struct shmem_inode_info *info; len = strlen(symname) + 1; - if (len > PAGE_CACHE_SIZE) + if (len > PAGE_SIZE) return -ENAMETOOLONG; inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); @@ -2562,7 +2562,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s SetPageUptodate(page); set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); } dir->i_size += BOGO_DIRENT_SIZE; dir->i_ctime = dir->i_mtime = CURRENT_TIME; @@ -2835,7 +2835,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo, if (*rest) goto bad_val; sbinfo->max_blocks = - DIV_ROUND_UP(size, PAGE_CACHE_SIZE); + DIV_ROUND_UP(size, PAGE_SIZE); } else if (!strcmp(this_char,"nr_blocks")) { sbinfo->max_blocks = memparse(value, &rest); if (*rest) @@ -2940,7 +2940,7 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (sbinfo->max_blocks != shmem_default_max_blocks()) seq_printf(seq, ",size=%luk", - sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10)); + sbinfo->max_blocks << (PAGE_SHIFT - 10)); if (sbinfo->max_inodes != shmem_default_max_inodes()) seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) @@ -3082,8 +3082,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) sbinfo->free_inodes = sbinfo->max_inodes; sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = TMPFS_MAGIC; sb->s_op = &shmem_ops; sb->s_time_gran = 1; diff --git a/mm/swap.c b/mm/swap.c index 09fe5e97714a..a0bc206b4ac6 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -114,7 +114,7 @@ void put_pages_list(struct list_head *pages) victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); - page_cache_release(victim); + put_page(victim); } } EXPORT_SYMBOL(put_pages_list); @@ -142,7 +142,7 @@ int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, return seg; pages[seg] = kmap_to_page(kiov[seg].iov_base); - page_cache_get(pages[seg]); + get_page(pages[seg]); } return seg; @@ -236,7 +236,7 @@ void rotate_reclaimable_page(struct page *page) struct pagevec *pvec; unsigned long flags; - page_cache_get(page); + get_page(page); local_irq_save(flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page)) @@ -294,7 +294,7 @@ void activate_page(struct page *page) if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); - page_cache_get(page); + get_page(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); @@ -389,7 +389,7 @@ static void __lru_cache_add(struct page *page) { struct pagevec *pvec = &get_cpu_var(lru_add_pvec); - page_cache_get(page); + get_page(page); if (!pagevec_space(pvec)) __pagevec_lru_add(pvec); pagevec_add(pvec, page); @@ -646,7 +646,7 @@ void deactivate_page(struct page *page) if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); - page_cache_get(page); + get_page(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); @@ -698,7 +698,7 @@ void lru_add_drain_all(void) } /** - * release_pages - batched page_cache_release() + * release_pages - batched put_page() * @pages: array of pages to release * @nr: number of pages * @cold: whether the pages are cache cold diff --git a/mm/swap_state.c b/mm/swap_state.c index 69cb2464e7dc..366ce3518703 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -85,7 +85,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) VM_BUG_ON_PAGE(PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapBacked(page), page); - page_cache_get(page); + get_page(page); SetPageSwapCache(page); set_page_private(page, entry.val); @@ -109,7 +109,7 @@ int __add_to_swap_cache(struct page *page, swp_entry_t entry) VM_BUG_ON(error == -EEXIST); set_page_private(page, 0UL); ClearPageSwapCache(page); - page_cache_release(page); + put_page(page); } return error; @@ -226,7 +226,7 @@ void delete_from_swap_cache(struct page *page) spin_unlock_irq(&address_space->tree_lock); swapcache_free(entry); - page_cache_release(page); + put_page(page); } /* @@ -252,7 +252,7 @@ static inline void free_swap_cache(struct page *page) void free_page_and_swap_cache(struct page *page) { free_swap_cache(page); - page_cache_release(page); + put_page(page); } /* @@ -380,7 +380,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, } while (err != -ENOMEM); if (new_page) - page_cache_release(new_page); + put_page(new_page); return found_page; } @@ -495,7 +495,7 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, continue; if (offset != entry_offset) SetPageReadahead(page); - page_cache_release(page); + put_page(page); } blk_finish_plug(&plug); diff --git a/mm/swapfile.c b/mm/swapfile.c index 560ad380634c..83874eced5bf 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -119,7 +119,7 @@ __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) ret = try_to_free_swap(page); unlock_page(page); } - page_cache_release(page); + put_page(page); return ret; } @@ -1000,7 +1000,7 @@ int free_swap_and_cache(swp_entry_t entry) page = find_get_page(swap_address_space(entry), entry.val); if (page && !trylock_page(page)) { - page_cache_release(page); + put_page(page); page = NULL; } } @@ -1017,7 +1017,7 @@ int free_swap_and_cache(swp_entry_t entry) SetPageDirty(page); } unlock_page(page); - page_cache_release(page); + put_page(page); } return p != NULL; } @@ -1518,7 +1518,7 @@ int try_to_unuse(unsigned int type, bool frontswap, } if (retval) { unlock_page(page); - page_cache_release(page); + put_page(page); break; } @@ -1570,7 +1570,7 @@ int try_to_unuse(unsigned int type, bool frontswap, */ SetPageDirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); /* * Make sure that we aren't completely killing @@ -2574,7 +2574,7 @@ bad_swap: out: if (page && !IS_ERR(page)) { kunmap(page); - page_cache_release(page); + put_page(page); } if (name) putname(name); diff --git a/mm/truncate.c b/mm/truncate.c index 7598b552ae03..b00272810871 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -118,7 +118,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page) return -EIO; if (page_has_private(page)) - do_invalidatepage(page, 0, PAGE_CACHE_SIZE); + do_invalidatepage(page, 0, PAGE_SIZE); /* * Some filesystems seem to re-dirty the page even after @@ -159,8 +159,8 @@ int truncate_inode_page(struct address_space *mapping, struct page *page) { if (page_mapped(page)) { unmap_mapping_range(mapping, - (loff_t)page->index << PAGE_CACHE_SHIFT, - PAGE_CACHE_SIZE, 0); + (loff_t)page->index << PAGE_SHIFT, + PAGE_SIZE, 0); } return truncate_complete_page(mapping, page); } @@ -241,8 +241,8 @@ void truncate_inode_pages_range(struct address_space *mapping, return; /* Offsets within partial pages */ - partial_start = lstart & (PAGE_CACHE_SIZE - 1); - partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1); + partial_start = lstart & (PAGE_SIZE - 1); + partial_end = (lend + 1) & (PAGE_SIZE - 1); /* * 'start' and 'end' always covers the range of pages to be fully @@ -250,7 +250,7 @@ void truncate_inode_pages_range(struct address_space *mapping, * start of the range and 'partial_end' at the end of the range. * Note that 'end' is exclusive while 'lend' is inclusive. */ - start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; + start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT; if (lend == -1) /* * lend == -1 indicates end-of-file so we have to set 'end' @@ -259,7 +259,7 @@ void truncate_inode_pages_range(struct address_space *mapping, */ end = -1; else - end = (lend + 1) >> PAGE_CACHE_SHIFT; + end = (lend + 1) >> PAGE_SHIFT; pagevec_init(&pvec, 0); index = start; @@ -298,7 +298,7 @@ void truncate_inode_pages_range(struct address_space *mapping, if (partial_start) { struct page *page = find_lock_page(mapping, start - 1); if (page) { - unsigned int top = PAGE_CACHE_SIZE; + unsigned int top = PAGE_SIZE; if (start > end) { /* Truncation within a single page */ top = partial_end; @@ -311,7 +311,7 @@ void truncate_inode_pages_range(struct address_space *mapping, do_invalidatepage(page, partial_start, top - partial_start); unlock_page(page); - page_cache_release(page); + put_page(page); } } if (partial_end) { @@ -324,7 +324,7 @@ void truncate_inode_pages_range(struct address_space *mapping, do_invalidatepage(page, 0, partial_end); unlock_page(page); - page_cache_release(page); + put_page(page); } } /* @@ -538,7 +538,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page) if (mapping->a_ops->freepage) mapping->a_ops->freepage(page); - page_cache_release(page); /* pagecache ref */ + put_page(page); /* pagecache ref */ return 1; failed: spin_unlock_irqrestore(&mapping->tree_lock, flags); @@ -608,18 +608,18 @@ int invalidate_inode_pages2_range(struct address_space *mapping, * Zap the rest of the file in one hit. */ unmap_mapping_range(mapping, - (loff_t)index << PAGE_CACHE_SHIFT, + (loff_t)index << PAGE_SHIFT, (loff_t)(1 + end - index) - << PAGE_CACHE_SHIFT, - 0); + << PAGE_SHIFT, + 0); did_range_unmap = 1; } else { /* * Just zap this page */ unmap_mapping_range(mapping, - (loff_t)index << PAGE_CACHE_SHIFT, - PAGE_CACHE_SIZE, 0); + (loff_t)index << PAGE_SHIFT, + PAGE_SIZE, 0); } } BUG_ON(page_mapped(page)); @@ -744,14 +744,14 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) WARN_ON(to > inode->i_size); - if (from >= to || bsize == PAGE_CACHE_SIZE) + if (from >= to || bsize == PAGE_SIZE) return; /* Page straddling @from will not have any hole block created? */ rounded_from = round_up(from, bsize); - if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) + if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1))) return; - index = from >> PAGE_CACHE_SHIFT; + index = from >> PAGE_SHIFT; page = find_lock_page(inode->i_mapping, index); /* Page not cached? Nothing to do */ if (!page) @@ -763,7 +763,7 @@ void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) if (page_mkclean(page)) set_page_dirty(page); unlock_page(page); - page_cache_release(page); + put_page(page); } EXPORT_SYMBOL(pagecache_isize_extended); diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 9f3a0290b273..af817e5060fb 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -93,7 +93,7 @@ out_release_uncharge_unlock: pte_unmap_unlock(dst_pte, ptl); mem_cgroup_cancel_charge(page, memcg, false); out_release: - page_cache_release(page); + put_page(page); goto out; } @@ -287,7 +287,7 @@ out_unlock: up_read(&dst_mm->mmap_sem); out: if (page) - page_cache_release(page); + put_page(page); BUG_ON(copied < 0); BUG_ON(err > 0); BUG_ON(!copied && !err); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ae7d20b447ff..293889d7f482 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -21,6 +21,7 @@ #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/list.h> +#include <linux/notifier.h> #include <linux/rbtree.h> #include <linux/radix-tree.h> #include <linux/rcupdate.h> @@ -344,6 +345,8 @@ static void __insert_vmap_area(struct vmap_area *va) static void purge_vmap_area_lazy(void); +static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); + /* * Allocate a region of KVA of the specified size and alignment, within the * vstart and vend. @@ -363,6 +366,8 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, BUG_ON(offset_in_page(size)); BUG_ON(!is_power_of_2(align)); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); + va = kmalloc_node(sizeof(struct vmap_area), gfp_mask & GFP_RECLAIM_MASK, node); if (unlikely(!va)) @@ -468,6 +473,16 @@ overflow: purged = 1; goto retry; } + + if (gfpflags_allow_blocking(gfp_mask)) { + unsigned long freed = 0; + blocking_notifier_call_chain(&vmap_notify_list, 0, &freed); + if (freed > 0) { + purged = 0; + goto retry; + } + } + if (printk_ratelimit()) pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n", size); @@ -475,6 +490,18 @@ overflow: return ERR_PTR(-EBUSY); } +int register_vmap_purge_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&vmap_notify_list, nb); +} +EXPORT_SYMBOL_GPL(register_vmap_purge_notifier); + +int unregister_vmap_purge_notifier(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&vmap_notify_list, nb); +} +EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier); + static void __free_vmap_area(struct vmap_area *va) { BUG_ON(RB_EMPTY_NODE(&va->rb_node)); diff --git a/mm/zswap.c b/mm/zswap.c index bf14508afd64..91dad80d068b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -869,7 +869,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) case ZSWAP_SWAPCACHE_EXIST: /* page is already in the swap cache, ignore for now */ - page_cache_release(page); + put_page(page); ret = -EEXIST; goto fail; @@ -897,7 +897,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) /* start writeback */ __swap_writepage(page, &wbc, end_swap_bio_write); - page_cache_release(page); + put_page(page); zswap_written_back_pages++; spin_lock(&tree->lock); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 1831f6353622..a5502898ea33 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -269,7 +269,7 @@ static void _ceph_msgr_exit(void) } BUG_ON(zero_page == NULL); - page_cache_release(zero_page); + put_page(zero_page); zero_page = NULL; ceph_msgr_slab_exit(); @@ -282,7 +282,7 @@ int ceph_msgr_init(void) BUG_ON(zero_page != NULL); zero_page = ZERO_PAGE(0); - page_cache_get(zero_page); + get_page(zero_page); /* * The number of active work items is limited by the number of @@ -1602,7 +1602,7 @@ static int write_partial_skip(struct ceph_connection *con) dout("%s %p %d left\n", __func__, con, con->out_skip); while (con->out_skip > 0) { - size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); + size_t size = min(con->out_skip, (int) PAGE_SIZE); ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); if (ret <= 0) diff --git a/net/ceph/pagelist.c b/net/ceph/pagelist.c index c7c220a736e5..6864007e64fc 100644 --- a/net/ceph/pagelist.c +++ b/net/ceph/pagelist.c @@ -56,7 +56,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) size_t bit = pl->room; int ret; - memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), + memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, bit); pl->length += bit; pl->room -= bit; @@ -67,7 +67,7 @@ int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len) return ret; } - memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len); + memcpy(pl->mapped_tail + (pl->length & ~PAGE_MASK), buf, len); pl->length += len; pl->room -= len; return 0; diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 10297f7a89ba..00d2601407c5 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c @@ -95,19 +95,19 @@ int ceph_copy_user_to_page_vector(struct page **pages, loff_t off, size_t len) { int i = 0; - int po = off & ~PAGE_CACHE_MASK; + int po = off & ~PAGE_MASK; int left = len; int l, bad; while (left > 0) { - l = min_t(int, PAGE_CACHE_SIZE-po, left); + l = min_t(int, PAGE_SIZE-po, left); bad = copy_from_user(page_address(pages[i]) + po, data, l); if (bad == l) return -EFAULT; data += l - bad; left -= l - bad; po += l - bad; - if (po == PAGE_CACHE_SIZE) { + if (po == PAGE_SIZE) { po = 0; i++; } @@ -121,17 +121,17 @@ void ceph_copy_to_page_vector(struct page **pages, loff_t off, size_t len) { int i = 0; - size_t po = off & ~PAGE_CACHE_MASK; + size_t po = off & ~PAGE_MASK; size_t left = len; while (left > 0) { - size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); + size_t l = min_t(size_t, PAGE_SIZE-po, left); memcpy(page_address(pages[i]) + po, data, l); data += l; left -= l; po += l; - if (po == PAGE_CACHE_SIZE) { + if (po == PAGE_SIZE) { po = 0; i++; } @@ -144,17 +144,17 @@ void ceph_copy_from_page_vector(struct page **pages, loff_t off, size_t len) { int i = 0; - size_t po = off & ~PAGE_CACHE_MASK; + size_t po = off & ~PAGE_MASK; size_t left = len; while (left > 0) { - size_t l = min_t(size_t, PAGE_CACHE_SIZE-po, left); + size_t l = min_t(size_t, PAGE_SIZE-po, left); memcpy(data, page_address(pages[i]) + po, l); data += l; left -= l; po += l; - if (po == PAGE_CACHE_SIZE) { + if (po == PAGE_SIZE) { po = 0; i++; } @@ -168,25 +168,25 @@ EXPORT_SYMBOL(ceph_copy_from_page_vector); */ void ceph_zero_page_vector_range(int off, int len, struct page **pages) { - int i = off >> PAGE_CACHE_SHIFT; + int i = off >> PAGE_SHIFT; - off &= ~PAGE_CACHE_MASK; + off &= ~PAGE_MASK; dout("zero_page_vector_page %u~%u\n", off, len); /* leading partial page? */ if (off) { - int end = min((int)PAGE_CACHE_SIZE, off + len); + int end = min((int)PAGE_SIZE, off + len); dout("zeroing %d %p head from %d\n", i, pages[i], (int)off); zero_user_segment(pages[i], off, end); len -= (end - off); i++; } - while (len >= PAGE_CACHE_SIZE) { + while (len >= PAGE_SIZE) { dout("zeroing %d %p len=%d\n", i, pages[i], len); - zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE); - len -= PAGE_CACHE_SIZE; + zero_user_segment(pages[i], 0, PAGE_SIZE); + len -= PAGE_SIZE; i++; } /* trailing partial page? */ diff --git a/net/core/dev.c b/net/core/dev.c index b9bcbe77d913..77a71cd68535 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4439,6 +4439,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff NAPI_GRO_CB(skb)->flush = 0; NAPI_GRO_CB(skb)->free = 0; NAPI_GRO_CB(skb)->encap_mark = 0; + NAPI_GRO_CB(skb)->is_fou = 0; NAPI_GRO_CB(skb)->gro_remcsum_start = 0; /* Setup for GRO checksum validation */ diff --git a/net/core/netpoll.c b/net/core/netpoll.c index a57bd17805b4..94acfc89ad97 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -603,6 +603,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) const struct net_device_ops *ops; int err; + np->dev = ndev; strlcpy(np->dev_name, ndev->name, IFNAMSIZ); INIT_WORK(&np->cleanup_work, netpoll_async_cleanup); @@ -669,7 +670,6 @@ int netpoll_setup(struct netpoll *np) goto unlock; } dev_hold(ndev); - np->dev = ndev; if (netdev_master_upper_dev_get(ndev)) { np_err(np, "%s is a slave device, aborting\n", np->dev_name); @@ -770,7 +770,6 @@ int netpoll_setup(struct netpoll *np) return 0; put: - np->dev = NULL; dev_put(ndev); unlock: rtnl_unlock(); diff --git a/net/core/sock.c b/net/core/sock.c index b67b9aedb230..7e73c26b6bb4 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -221,7 +221,8 @@ static const char *const af_family_key_strings[AF_MAX+1] = { "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" , "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" , "sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" , - "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_MAX" + "sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" , + "sk_lock-AF_MAX" }; static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" , @@ -237,7 +238,8 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = { "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" , "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" , "slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" , - "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_MAX" + "slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" , + "slock-AF_MAX" }; static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" , @@ -253,7 +255,8 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = { "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" , "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" , "clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" , - "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_MAX" + "clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" , + "clock-AF_MAX" }; /* diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c index 5a94aea280d3..a39068b4a4d9 100644 --- a/net/ipv4/fou.c +++ b/net/ipv4/fou.c @@ -203,6 +203,9 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head, */ NAPI_GRO_CB(skb)->encap_mark = 0; + /* Flag this frame as already having an outer encap header */ + NAPI_GRO_CB(skb)->is_fou = 1; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); @@ -368,6 +371,9 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head, */ NAPI_GRO_CB(skb)->encap_mark = 0; + /* Flag this frame as already having an outer encap header */ + NAPI_GRO_CB(skb)->is_fou = 1; + rcu_read_lock(); offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[guehdr->proto_ctype]); diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index c47539d04b88..6a5bd4317866 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c @@ -150,6 +150,14 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head, if ((greh->flags & ~(GRE_KEY|GRE_CSUM)) != 0) goto out; + /* We can only support GRE_CSUM if we can track the location of + * the GRE header. In the case of FOU/GUE we cannot because the + * outer UDP header displaces the GRE header leaving us in a state + * of limbo. + */ + if ((greh->flags & GRE_CSUM) && NAPI_GRO_CB(skb)->is_fou) + goto out; + type = greh->protocol; rcu_read_lock(); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 31936d387cfd..af5d1f38217f 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -862,9 +862,16 @@ static void __gre_tunnel_init(struct net_device *dev) dev->hw_features |= GRE_FEATURES; if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) { - /* TCP offload with GRE SEQ is not supported. */ - dev->features |= NETIF_F_GSO_SOFTWARE; - dev->hw_features |= NETIF_F_GSO_SOFTWARE; + /* TCP offload with GRE SEQ is not supported, nor + * can we support 2 levels of outer headers requiring + * an update. + */ + if (!(tunnel->parms.o_flags & TUNNEL_CSUM) || + (tunnel->encap.type == TUNNEL_ENCAP_NONE)) { + dev->features |= NETIF_F_GSO_SOFTWARE; + dev->hw_features |= NETIF_F_GSO_SOFTWARE; + } + /* Can use a lockless transmit, unless we generate * output sequences */ diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 9428345d3a07..bc972e7152c7 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1090,8 +1090,8 @@ static inline int ip6_ufo_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int hh_len, int fragheaderlen, - int transhdrlen, int mtu, unsigned int flags, - const struct flowi6 *fl6) + int exthdrlen, int transhdrlen, int mtu, + unsigned int flags, const struct flowi6 *fl6) { struct sk_buff *skb; @@ -1116,7 +1116,7 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb_put(skb, fragheaderlen + transhdrlen); /* initialize network header pointer */ - skb_reset_network_header(skb); + skb_set_network_header(skb, exthdrlen); /* initialize protocol header pointer */ skb->transport_header = skb->network_header + fragheaderlen; @@ -1358,7 +1358,7 @@ emsgsize: (rt->dst.dev->features & NETIF_F_UFO) && (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { err = ip6_ufo_append_data(sk, queue, getfrag, from, length, - hh_len, fragheaderlen, + hh_len, fragheaderlen, exthdrlen, transhdrlen, mtu, flags, fl6); if (err) goto error; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index eb2ac4bb09ce..1f20345cbc97 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -252,12 +252,12 @@ static int ip6_tnl_create2(struct net_device *dev) t = netdev_priv(dev); + dev->rtnl_link_ops = &ip6_link_ops; err = register_netdevice(dev); if (err < 0) goto out; strcpy(t->parms.name, dev->name); - dev->rtnl_link_ops = &ip6_link_ops; dev_hold(dev); ip6_tnl_link(ip6n, t); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index ec22078b0914..42de4ccd159f 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -123,12 +123,11 @@ static int l2tp_ip_recv(struct sk_buff *skb) struct l2tp_tunnel *tunnel = NULL; int length; - /* Point to L2TP header */ - optr = ptr = skb->data; - if (!pskb_may_pull(skb, 4)) goto discard; + /* Point to L2TP header */ + optr = ptr = skb->data; session_id = ntohl(*((__be32 *) ptr)); ptr += 4; @@ -156,6 +155,9 @@ static int l2tp_ip_recv(struct sk_buff *skb) if (!pskb_may_pull(skb, length)) goto discard; + /* Point to L2TP header */ + optr = ptr = skb->data; + ptr += 4; pr_debug("%s: ip recv\n", tunnel->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 6b54ff3ff4cb..cd479903d943 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -136,12 +136,11 @@ static int l2tp_ip6_recv(struct sk_buff *skb) struct l2tp_tunnel *tunnel = NULL; int length; - /* Point to L2TP header */ - optr = ptr = skb->data; - if (!pskb_may_pull(skb, 4)) goto discard; + /* Point to L2TP header */ + optr = ptr = skb->data; session_id = ntohl(*((__be32 *) ptr)); ptr += 4; @@ -169,6 +168,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb) if (!pskb_may_pull(skb, length)) goto discard; + /* Point to L2TP header */ + optr = ptr = skb->data; + ptr += 4; pr_debug("%s: ip recv\n", tunnel->name); print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); } diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 283981108ca8..74142d07ad31 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -343,8 +343,10 @@ static void ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *chandef) { - if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) + if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) { + ieee80211_recalc_chanctx_min_def(local, ctx); return; + } WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 804575ff7af5..422003540169 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1719,6 +1719,10 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta); enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta); void ieee80211_sta_set_rx_nss(struct sta_info *sta); +enum ieee80211_sta_rx_bandwidth +ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width); +enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta); +void ieee80211_sta_set_rx_nss(struct sta_info *sta); void ieee80211_process_mu_groups(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt); u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata, diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c index 5b6aec1a0630..002244bca948 100644 --- a/net/mac80211/mesh_hwmp.c +++ b/net/mac80211/mesh_hwmp.c @@ -530,7 +530,7 @@ static void hwmp_preq_frame_process(struct ieee80211_sub_if_data *sdata, const u8 *target_addr, *orig_addr; const u8 *da; u8 target_flags, ttl, flags; - u32 orig_sn, target_sn, lifetime, target_metric; + u32 orig_sn, target_sn, lifetime, target_metric = 0; bool reply = false; bool forward = true; bool root_is_gate; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index d20bab5c146c..861b93ffbe92 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -67,6 +67,7 @@ static const struct rhashtable_params sta_rht_params = { .nelem_hint = 3, /* start small */ + .insecure_elasticity = true, /* Disable chain-length checks. */ .automatic_shrinking = true, .head_offset = offsetof(struct sta_info, hash_node), .key_offset = offsetof(struct sta_info, addr), @@ -258,11 +259,11 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) } /* Caller must hold local->sta_mtx */ -static void sta_info_hash_add(struct ieee80211_local *local, - struct sta_info *sta) +static int sta_info_hash_add(struct ieee80211_local *local, + struct sta_info *sta) { - rhashtable_insert_fast(&local->sta_hash, &sta->hash_node, - sta_rht_params); + return rhashtable_insert_fast(&local->sta_hash, &sta->hash_node, + sta_rht_params); } static void sta_deliver_ps_frames(struct work_struct *wk) @@ -524,7 +525,9 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) set_sta_flag(sta, WLAN_STA_BLOCK_BA); /* make the station visible */ - sta_info_hash_add(local, sta); + err = sta_info_hash_add(local, sta); + if (err) + goto out_drop_sta; list_add_tail_rcu(&sta->list, &local->sta_list); @@ -557,6 +560,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU) out_remove: sta_info_hash_del(local, sta); list_del_rcu(&sta->list); + out_drop_sta: local->num_sta--; synchronize_net(); __cleanup_single_sta(sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 053f5c4fa495..62193f4bc37b 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -377,7 +377,6 @@ DECLARE_EWMA(signal, 1024, 8) * @uploaded: set to true when sta is uploaded to the driver * @sta: station information we share with the driver * @sta_state: duplicates information about station state (for debug) - * @beacon_loss_count: number of times beacon loss has triggered * @rcu_head: RCU head used for freeing this station struct * @cur_max_bandwidth: maximum bandwidth to use for TX to the station, * taken from HT/VHT capabilities or VHT operating mode notification diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index c9eeb3f12808..a29ea813b7d5 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c @@ -4,7 +4,7 @@ * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> * Copyright 2014, Intel Corporation * Copyright 2014 Intel Mobile Communications GmbH - * Copyright 2015 Intel Deutschland GmbH + * Copyright 2015 - 2016 Intel Deutschland GmbH * * This file is GPLv2 as found in COPYING. */ @@ -15,6 +15,7 @@ #include <linux/rtnetlink.h> #include "ieee80211_i.h" #include "driver-ops.h" +#include "rate.h" /* give usermode some time for retries in setting up the TDLS session */ #define TDLS_PEER_SETUP_TIMEOUT (15 * HZ) @@ -302,7 +303,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, /* IEEE802.11ac-2013 Table E-4 */ u16 centers_80mhz[] = { 5210, 5290, 5530, 5610, 5690, 5775 }; struct cfg80211_chan_def uc = sta->tdls_chandef; - enum nl80211_chan_width max_width = ieee80211_get_sta_bw(&sta->sta); + enum nl80211_chan_width max_width = ieee80211_sta_cap_chan_bw(sta); int i; /* only support upgrading non-narrow channels up to 80Mhz */ @@ -313,7 +314,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, if (max_width > NL80211_CHAN_WIDTH_80) max_width = NL80211_CHAN_WIDTH_80; - if (uc.width == max_width) + if (uc.width >= max_width) return; /* * Channel usage constrains in the IEEE802.11ac-2013 specification only @@ -324,6 +325,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, for (i = 0; i < ARRAY_SIZE(centers_80mhz); i++) if (abs(uc.chan->center_freq - centers_80mhz[i]) <= 30) { uc.center_freq1 = centers_80mhz[i]; + uc.center_freq2 = 0; uc.width = NL80211_CHAN_WIDTH_80; break; } @@ -332,7 +334,7 @@ ieee80211_tdls_chandef_vht_upgrade(struct ieee80211_sub_if_data *sdata, return; /* proceed to downgrade the chandef until usable or the same */ - while (uc.width > max_width && + while (uc.width > max_width || !cfg80211_reg_can_beacon_relax(sdata->local->hw.wiphy, &uc, sdata->wdev.iftype)) ieee80211_chandef_downgrade(&uc); @@ -1242,18 +1244,44 @@ int ieee80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev, return ret; } -static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata) +static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata, + struct sta_info *sta) { struct ieee80211_local *local = sdata->local; struct ieee80211_chanctx_conf *conf; struct ieee80211_chanctx *ctx; + enum nl80211_chan_width width; + struct ieee80211_supported_band *sband; mutex_lock(&local->chanctx_mtx); conf = rcu_dereference_protected(sdata->vif.chanctx_conf, lockdep_is_held(&local->chanctx_mtx)); if (conf) { + width = conf->def.width; + sband = local->hw.wiphy->bands[conf->def.chan->band]; ctx = container_of(conf, struct ieee80211_chanctx, conf); ieee80211_recalc_chanctx_chantype(local, ctx); + + /* if width changed and a peer is given, update its BW */ + if (width != conf->def.width && sta && + test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) { + enum ieee80211_sta_rx_bandwidth bw; + + bw = ieee80211_chan_width_to_rx_bw(conf->def.width); + bw = min(bw, ieee80211_sta_cap_rx_bw(sta)); + if (bw != sta->sta.bandwidth) { + sta->sta.bandwidth = bw; + rate_control_rate_update(local, sband, sta, + IEEE80211_RC_BW_CHANGED); + /* + * if a TDLS peer BW was updated, we need to + * recalc the chandef width again, to get the + * correct chanctx min_def + */ + ieee80211_recalc_chanctx_chantype(local, ctx); + } + } + } mutex_unlock(&local->chanctx_mtx); } @@ -1350,8 +1378,6 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; } - iee80211_tdls_recalc_chanctx(sdata); - mutex_lock(&local->sta_mtx); sta = sta_info_get(sdata, peer); if (!sta) { @@ -1360,6 +1386,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, break; } + iee80211_tdls_recalc_chanctx(sdata, sta); iee80211_tdls_recalc_ht_protection(sdata, sta); set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH); @@ -1390,7 +1417,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev, iee80211_tdls_recalc_ht_protection(sdata, NULL); mutex_unlock(&local->sta_mtx); - iee80211_tdls_recalc_chanctx(sdata); + iee80211_tdls_recalc_chanctx(sdata, NULL); break; default: ret = -ENOTSUPP; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 62ad5321257d..21f6602395f7 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1116,11 +1116,15 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, reset_agg_timer = true; } else { queued = true; + if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) { + clear_sta_flag(tx->sta, WLAN_STA_SP); + ps_dbg(tx->sta->sdata, + "STA %pM aid %d: SP frame queued, close the SP w/o telling the peer\n", + tx->sta->sta.addr, tx->sta->sta.aid); + } info->control.vif = &tx->sdata->vif; info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; - info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | - IEEE80211_TX_CTL_NO_PS_BUFFER | - IEEE80211_TX_STATUS_EOSP; + info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; __skb_queue_tail(&tid_tx->pending, skb); if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) purge_skb = __skb_dequeue(&tid_tx->pending); @@ -1247,7 +1251,8 @@ static void ieee80211_drv_tx(struct ieee80211_local *local, struct txq_info *txqi; u8 ac; - if (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE) + if ((info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) || + (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE)) goto tx_normal; if (!ieee80211_is_data(hdr->frame_control)) diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 89e04d55aa18..e590e2ef9eaf 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -319,7 +319,30 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cap_rx_bw(struct sta_info *sta) return IEEE80211_STA_RX_BW_80; } -static enum ieee80211_sta_rx_bandwidth +enum nl80211_chan_width ieee80211_sta_cap_chan_bw(struct sta_info *sta) +{ + struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap; + u32 cap_width; + + if (!vht_cap->vht_supported) { + if (!sta->sta.ht_cap.ht_supported) + return NL80211_CHAN_WIDTH_20_NOHT; + + return sta->sta.ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 ? + NL80211_CHAN_WIDTH_40 : NL80211_CHAN_WIDTH_20; + } + + cap_width = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK; + + if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ) + return NL80211_CHAN_WIDTH_160; + else if (cap_width == IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) + return NL80211_CHAN_WIDTH_80P80; + + return NL80211_CHAN_WIDTH_80; +} + +enum ieee80211_sta_rx_bandwidth ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) { switch (width) { @@ -347,10 +370,7 @@ enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta) bw = ieee80211_sta_cap_rx_bw(sta); bw = min(bw, sta->cur_max_bandwidth); - - /* do not cap the BW of TDLS WIDER_BW peers by the bss */ - if (!test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW)) - bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); + bw = min(bw, ieee80211_chan_width_to_rx_bw(bss_width)); return bw; } diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index b18c5ed42d95..0b80a7140cc4 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c @@ -543,6 +543,9 @@ static struct net_device *find_outdev(struct net *net, if (!dev) return ERR_PTR(-ENODEV); + if (IS_ERR(dev)) + return dev; + /* The caller is holding rtnl anyways, so release the dev reference */ dev_put(dev); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1ecfa710ca98..f12c17f355d9 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -4151,7 +4151,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, /* Opening a Tx-ring is NOT supported in TPACKET_V3 */ if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) { - WARN(1, "Tx-ring is not supported.\n"); + net_warn_ratelimited("Tx-ring is not supported.\n"); goto out; } diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c index 977fb86065b7..abc8cc805e8d 100644 --- a/net/rds/ib_recv.c +++ b/net/rds/ib_recv.c @@ -796,7 +796,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn, addr = kmap_atomic(sg_page(&frag->f_sg)); - src = addr + frag_off; + src = addr + frag->f_sg.offset + frag_off; dst = (void *)map->m_page_addrs[map_page] + map_off; for (k = 0; k < to_copy; k += 8) { /* Record ports that became uncongested, ie diff --git a/net/rds/page.c b/net/rds/page.c index 616f21f4e7d7..e2b5a5832d3d 100644 --- a/net/rds/page.c +++ b/net/rds/page.c @@ -135,8 +135,8 @@ int rds_page_remainder_alloc(struct scatterlist *scat, unsigned long bytes, if (rem->r_offset != 0) rds_stats_inc(s_page_remainder_hit); - rem->r_offset += bytes; - if (rem->r_offset == PAGE_SIZE) { + rem->r_offset += ALIGN(bytes, 8); + if (rem->r_offset >= PAGE_SIZE) { __free_page(rem->r_page); rem->r_page = NULL; } diff --git a/net/sctp/output.c b/net/sctp/output.c index 97745351d58c..9844fe573029 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -705,7 +705,8 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet, /* Check whether this chunk and all the rest of pending data will fit * or delay in hopes of bundling a full sized packet. */ - if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead) + if (chunk->skb->len + q->out_qlen > + transport->pathmtu - packet->overhead - sizeof(sctp_data_chunk_t) - 4) /* Enough data queued to fill a packet */ return SCTP_XMIT_OK; diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 8c6bc795f060..15612ffa8d57 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -1728,8 +1728,8 @@ alloc_enc_pages(struct rpc_rqst *rqstp) return 0; } - first = snd_buf->page_base >> PAGE_CACHE_SHIFT; - last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT; + first = snd_buf->page_base >> PAGE_SHIFT; + last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_SHIFT; rqstp->rq_enc_pages_num = last - first + 1 + 1; rqstp->rq_enc_pages = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *), @@ -1775,10 +1775,10 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, status = alloc_enc_pages(rqstp); if (status) return status; - first = snd_buf->page_base >> PAGE_CACHE_SHIFT; + first = snd_buf->page_base >> PAGE_SHIFT; inpages = snd_buf->pages + first; snd_buf->pages = rqstp->rq_enc_pages; - snd_buf->page_base -= first << PAGE_CACHE_SHIFT; + snd_buf->page_base -= first << PAGE_SHIFT; /* * Give the tail its own page, in case we need extra space in the * head when wrapping: diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index d94a8e1e9f05..045e11ecd332 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c @@ -465,7 +465,7 @@ encryptor(struct scatterlist *sg, void *data) page_pos = desc->pos - outbuf->head[0].iov_len; if (page_pos >= 0 && page_pos < outbuf->page_len) { /* pages are not in place: */ - int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT; + int i = (page_pos + outbuf->page_base) >> PAGE_SHIFT; in_page = desc->pages[i]; } else { in_page = sg_page(sg); diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 765088e4ad84..a737c2da0837 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -79,9 +79,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) len -= buf->head[0].iov_len; if (len <= buf->page_len) { unsigned int last = (buf->page_base + len - 1) - >>PAGE_CACHE_SHIFT; + >>PAGE_SHIFT; unsigned int offset = (buf->page_base + len - 1) - & (PAGE_CACHE_SIZE - 1); + & (PAGE_SIZE - 1); ptr = kmap_atomic(buf->pages[last]); pad = *(ptr + offset); kunmap_atomic(ptr); diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 008c25d1b9f9..553bf95f7003 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -881,7 +881,7 @@ static ssize_t cache_downcall(struct address_space *mapping, char *kaddr; ssize_t ret = -ENOMEM; - if (count >= PAGE_CACHE_SIZE) + if (count >= PAGE_SIZE) goto out_slow; page = find_or_create_page(mapping, 0, GFP_KERNEL); @@ -892,7 +892,7 @@ static ssize_t cache_downcall(struct address_space *mapping, ret = cache_do_downcall(kaddr, buf, count, cd); kunmap(page); unlock_page(page); - page_cache_release(page); + put_page(page); return ret; out_slow: return cache_slow_downcall(buf, count, cd); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 31789ef3e614..fc48eca21fd2 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1390,8 +1390,8 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); int err; - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; + sb->s_blocksize = PAGE_SIZE; + sb->s_blocksize_bits = PAGE_SHIFT; sb->s_magic = RPCAUTH_GSSMAGIC; sb->s_op = &s_ops; sb->s_d_op = &simple_dentry_operations; diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 2df87f78e518..de70c78025d7 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -96,8 +96,8 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct if (base || xdr->page_base) { pglen -= base; base += xdr->page_base; - ppage += base >> PAGE_CACHE_SHIFT; - base &= ~PAGE_CACHE_MASK; + ppage += base >> PAGE_SHIFT; + base &= ~PAGE_MASK; } do { char *kaddr; @@ -113,7 +113,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct } } - len = PAGE_CACHE_SIZE; + len = PAGE_SIZE; kaddr = kmap_atomic(*ppage); if (base) { len -= base; diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 4439ac4c1b53..6bdb3865212d 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -164,7 +164,7 @@ EXPORT_SYMBOL_GPL(xdr_inline_pages); * Note: the addresses pgto_base and pgfrom_base are both calculated in * the same way: * if a memory area starts at byte 'base' in page 'pages[i]', - * then its address is given as (i << PAGE_CACHE_SHIFT) + base + * then its address is given as (i << PAGE_SHIFT) + base * Also note: pgfrom_base must be < pgto_base, but the memory areas * they point to may overlap. */ @@ -181,20 +181,20 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base, pgto_base += len; pgfrom_base += len; - pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT); - pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT); + pgto = pages + (pgto_base >> PAGE_SHIFT); + pgfrom = pages + (pgfrom_base >> PAGE_SHIFT); - pgto_base &= ~PAGE_CACHE_MASK; - pgfrom_base &= ~PAGE_CACHE_MASK; + pgto_base &= ~PAGE_MASK; + pgfrom_base &= ~PAGE_MASK; do { /* Are any pointers crossing a page boundary? */ if (pgto_base == 0) { - pgto_base = PAGE_CACHE_SIZE; + pgto_base = PAGE_SIZE; pgto--; } if (pgfrom_base == 0) { - pgfrom_base = PAGE_CACHE_SIZE; + pgfrom_base = PAGE_SIZE; pgfrom--; } @@ -236,11 +236,11 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) char *vto; size_t copy; - pgto = pages + (pgbase >> PAGE_CACHE_SHIFT); - pgbase &= ~PAGE_CACHE_MASK; + pgto = pages + (pgbase >> PAGE_SHIFT); + pgbase &= ~PAGE_MASK; for (;;) { - copy = PAGE_CACHE_SIZE - pgbase; + copy = PAGE_SIZE - pgbase; if (copy > len) copy = len; @@ -253,7 +253,7 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len) break; pgbase += copy; - if (pgbase == PAGE_CACHE_SIZE) { + if (pgbase == PAGE_SIZE) { flush_dcache_page(*pgto); pgbase = 0; pgto++; @@ -280,11 +280,11 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) char *vfrom; size_t copy; - pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT); - pgbase &= ~PAGE_CACHE_MASK; + pgfrom = pages + (pgbase >> PAGE_SHIFT); + pgbase &= ~PAGE_MASK; do { - copy = PAGE_CACHE_SIZE - pgbase; + copy = PAGE_SIZE - pgbase; if (copy > len) copy = len; @@ -293,7 +293,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len) kunmap_atomic(vfrom); pgbase += copy; - if (pgbase == PAGE_CACHE_SIZE) { + if (pgbase == PAGE_SIZE) { pgbase = 0; pgfrom++; } @@ -1038,8 +1038,8 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, if (base < buf->page_len) { subbuf->page_len = min(buf->page_len - base, len); base += buf->page_base; - subbuf->page_base = base & ~PAGE_CACHE_MASK; - subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; + subbuf->page_base = base & ~PAGE_MASK; + subbuf->pages = &buf->pages[base >> PAGE_SHIFT]; len -= subbuf->page_len; base = 0; } else { @@ -1297,9 +1297,9 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, todo -= avail_here; base += buf->page_base; - ppages = buf->pages + (base >> PAGE_CACHE_SHIFT); - base &= ~PAGE_CACHE_MASK; - avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base, + ppages = buf->pages + (base >> PAGE_SHIFT); + base &= ~PAGE_MASK; + avail_page = min_t(unsigned int, PAGE_SIZE - base, avail_here); c = kmap(*ppages) + base; @@ -1383,7 +1383,7 @@ xdr_xcode_array2(struct xdr_buf *buf, unsigned int base, } avail_page = min(avail_here, - (unsigned int) PAGE_CACHE_SIZE); + (unsigned int) PAGE_SIZE); } base = buf->page_len; /* align to start of tail */ } @@ -1479,9 +1479,9 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, if (page_len > len) page_len = len; len -= page_len; - page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); - i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; - thislen = PAGE_CACHE_SIZE - page_offset; + page_offset = (offset + buf->page_base) & (PAGE_SIZE - 1); + i = (offset + buf->page_base) >> PAGE_SHIFT; + thislen = PAGE_SIZE - page_offset; do { if (thislen > page_len) thislen = page_len; @@ -1492,7 +1492,7 @@ xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, page_len -= thislen; i++; page_offset = 0; - thislen = PAGE_CACHE_SIZE; + thislen = PAGE_SIZE; } while (page_len != 0); offset = 0; } diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index 0a369bb440e7..662bdd20a748 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -842,7 +842,7 @@ static void vmci_transport_peer_detach_cb(u32 sub_id, * qp_handle. */ if (vmci_handle_is_invalid(e_payload->handle) || - vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) + !vmci_handle_is_equal(trans->qp_handle, e_payload->handle)) return; /* We don't ask for delayed CBs when we subscribe to this event (we @@ -2154,7 +2154,7 @@ module_exit(vmci_transport_exit); MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); -MODULE_VERSION("1.0.2.0-k"); +MODULE_VERSION("1.0.3.0-k"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("vmware_vsock"); MODULE_ALIAS_NETPROTO(PF_VSOCK); diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 502c9fc8db85..b820cc96a3bc 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -76,16 +76,10 @@ HOSTLOADLIBES_offwaketime += -lelf HOSTLOADLIBES_spintest += -lelf HOSTLOADLIBES_map_perf_test += -lelf -lrt -# point this to your LLVM backend with bpf support -LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc - -# asm/sysreg.h inline assmbly used by it is incompatible with llvm. -# But, ehere is not easy way to fix it, so just exclude it since it is +# asm/sysreg.h - inline assembly used by it is incompatible with llvm. +# But, there is no easy way to fix it, so just exclude it since it is # useless for BPF samples. $(obj)/%.o: $(src)/%.c clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ - -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@ - clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \ - -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ - -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s + -O2 -emit-llvm -c $< -o -| llc -march=bpf -filetype=obj -o $@ diff --git a/samples/bpf/bpf_helpers.h b/samples/bpf/bpf_helpers.h index 9363500131a7..7904a2a493de 100644 --- a/samples/bpf/bpf_helpers.h +++ b/samples/bpf/bpf_helpers.h @@ -82,6 +82,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag #define PT_REGS_FP(x) ((x)->bp) #define PT_REGS_RC(x) ((x)->ax) #define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->ip) #elif defined(__s390x__) @@ -94,6 +95,7 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag #define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */ #define PT_REGS_RC(x) ((x)->gprs[2]) #define PT_REGS_SP(x) ((x)->gprs[15]) +#define PT_REGS_IP(x) ((x)->ip) #elif defined(__aarch64__) @@ -106,6 +108,30 @@ static int (*bpf_l4_csum_replace)(void *ctx, int off, int from, int to, int flag #define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */ #define PT_REGS_RC(x) ((x)->regs[0]) #define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->pc) + +#elif defined(__powerpc__) + +#define PT_REGS_PARM1(x) ((x)->gpr[3]) +#define PT_REGS_PARM2(x) ((x)->gpr[4]) +#define PT_REGS_PARM3(x) ((x)->gpr[5]) +#define PT_REGS_PARM4(x) ((x)->gpr[6]) +#define PT_REGS_PARM5(x) ((x)->gpr[7]) +#define PT_REGS_RC(x) ((x)->gpr[3]) +#define PT_REGS_SP(x) ((x)->sp) +#define PT_REGS_IP(x) ((x)->nip) #endif + +#ifdef __powerpc__ +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; }) +#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP +#else +#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ \ + bpf_probe_read(&(ip), sizeof(ip), (void *)PT_REGS_RET(ctx)); }) +#define BPF_KRETPROBE_READ_RET_IP(ip, ctx) ({ \ + bpf_probe_read(&(ip), sizeof(ip), \ + (void *)(PT_REGS_FP(ctx) + sizeof(ip))); }) +#endif + #endif diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c index 95af56ec5739..3147377e8fd3 100644 --- a/samples/bpf/map_perf_test_user.c +++ b/samples/bpf/map_perf_test_user.c @@ -17,6 +17,7 @@ #include <linux/bpf.h> #include <string.h> #include <time.h> +#include <sys/resource.h> #include "libbpf.h" #include "bpf_load.h" diff --git a/samples/bpf/spintest_kern.c b/samples/bpf/spintest_kern.c index 4b27619d91a4..ce0167d09cdc 100644 --- a/samples/bpf/spintest_kern.c +++ b/samples/bpf/spintest_kern.c @@ -34,7 +34,7 @@ struct bpf_map_def SEC("maps") stackmap = { #define PROG(foo) \ int foo(struct pt_regs *ctx) \ { \ - long v = ctx->ip, *val; \ + long v = PT_REGS_IP(ctx), *val; \ \ val = bpf_map_lookup_elem(&my_map, &v); \ bpf_map_update_elem(&my_map, &v, &v, BPF_ANY); \ diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c index 09c1adc27d42..6d6eefd0d465 100644 --- a/samples/bpf/tracex2_kern.c +++ b/samples/bpf/tracex2_kern.c @@ -27,10 +27,10 @@ int bpf_prog2(struct pt_regs *ctx) long init_val = 1; long *value; - /* x64/s390x specific: read ip of kfree_skb caller. + /* read ip of kfree_skb caller. * non-portable version of __builtin_return_address(0) */ - bpf_probe_read(&loc, sizeof(loc), (void *)PT_REGS_RET(ctx)); + BPF_KPROBE_READ_RET_IP(loc, ctx); value = bpf_map_lookup_elem(&my_map, &loc); if (value) diff --git a/samples/bpf/tracex4_kern.c b/samples/bpf/tracex4_kern.c index ac4671420cf1..6dd8e384de96 100644 --- a/samples/bpf/tracex4_kern.c +++ b/samples/bpf/tracex4_kern.c @@ -40,7 +40,7 @@ int bpf_prog2(struct pt_regs *ctx) long ip = 0; /* get ip address of kmem_cache_alloc_node() caller */ - bpf_probe_read(&ip, sizeof(ip), (void *)(PT_REGS_FP(ctx) + sizeof(ip))); + BPF_KRETPROBE_READ_RET_IP(ip, ctx); struct pair v = { .val = bpf_ktime_get_ns(), diff --git a/sound/firewire/Kconfig b/sound/firewire/Kconfig index 2a779c2f63ab..ab894ed1ff67 100644 --- a/sound/firewire/Kconfig +++ b/sound/firewire/Kconfig @@ -134,6 +134,7 @@ config SND_FIREWIRE_TASCAM Say Y here to include support for TASCAM. * FW-1884 * FW-1082 + * FW-1804 To compile this driver as a module, choose M here: the module will be called snd-firewire-tascam. diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c index ed2902609a4c..4484242da0e6 100644 --- a/sound/firewire/amdtp-stream.c +++ b/sound/firewire/amdtp-stream.c @@ -102,6 +102,10 @@ EXPORT_SYMBOL(amdtp_stream_init); */ void amdtp_stream_destroy(struct amdtp_stream *s) { + /* Not initialized. */ + if (s->protocol == NULL) + return; + WARN_ON(amdtp_stream_running(s)); kfree(s->protocol); mutex_destroy(&s->mutex); diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index 3e4e0756e3fe..f7e2cbd2a313 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -67,7 +67,7 @@ static DECLARE_BITMAP(devices_used, SNDRV_CARDS); #define MODEL_MAUDIO_PROJECTMIX 0x00010091 static int -name_device(struct snd_bebob *bebob, unsigned int vendor_id) +name_device(struct snd_bebob *bebob) { struct fw_device *fw_dev = fw_parent_device(bebob->unit); char vendor[24] = {0}; @@ -126,6 +126,17 @@ end: return err; } +static void bebob_free(struct snd_bebob *bebob) +{ + snd_bebob_stream_destroy_duplex(bebob); + fw_unit_put(bebob->unit); + + kfree(bebob->maudio_special_quirk); + + mutex_destroy(&bebob->mutex); + kfree(bebob); +} + /* * This module releases the FireWire unit data after all ALSA character devices * are released by applications. This is for releasing stream data or finishing @@ -137,18 +148,11 @@ bebob_card_free(struct snd_card *card) { struct snd_bebob *bebob = card->private_data; - snd_bebob_stream_destroy_duplex(bebob); - fw_unit_put(bebob->unit); - - kfree(bebob->maudio_special_quirk); - - if (bebob->card_index >= 0) { - mutex_lock(&devices_mutex); - clear_bit(bebob->card_index, devices_used); - mutex_unlock(&devices_mutex); - } + mutex_lock(&devices_mutex); + clear_bit(bebob->card_index, devices_used); + mutex_unlock(&devices_mutex); - mutex_destroy(&bebob->mutex); + bebob_free(card->private_data); } static const struct snd_bebob_spec * @@ -176,16 +180,17 @@ check_audiophile_booted(struct fw_unit *unit) return strncmp(name, "FW Audiophile Bootloader", 15) != 0; } -static int -bebob_probe(struct fw_unit *unit, - const struct ieee1394_device_id *entry) +static void +do_registration(struct work_struct *work) { - struct snd_card *card; - struct snd_bebob *bebob; - const struct snd_bebob_spec *spec; + struct snd_bebob *bebob = + container_of(work, struct snd_bebob, dwork.work); unsigned int card_index; int err; + if (bebob->registered) + return; + mutex_lock(&devices_mutex); for (card_index = 0; card_index < SNDRV_CARDS; card_index++) { @@ -193,64 +198,39 @@ bebob_probe(struct fw_unit *unit, break; } if (card_index >= SNDRV_CARDS) { - err = -ENOENT; - goto end; + mutex_unlock(&devices_mutex); + return; } - if ((entry->vendor_id == VEN_FOCUSRITE) && - (entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH)) - spec = get_saffire_spec(unit); - else if ((entry->vendor_id == VEN_MAUDIO1) && - (entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH) && - !check_audiophile_booted(unit)) - spec = NULL; - else - spec = (const struct snd_bebob_spec *)entry->driver_data; - - if (spec == NULL) { - if ((entry->vendor_id == VEN_MAUDIO1) || - (entry->vendor_id == VEN_MAUDIO2)) - err = snd_bebob_maudio_load_firmware(unit); - else - err = -ENOSYS; - goto end; + err = snd_card_new(&bebob->unit->device, index[card_index], + id[card_index], THIS_MODULE, 0, &bebob->card); + if (err < 0) { + mutex_unlock(&devices_mutex); + return; } - err = snd_card_new(&unit->device, index[card_index], id[card_index], - THIS_MODULE, sizeof(struct snd_bebob), &card); + err = name_device(bebob); if (err < 0) - goto end; - bebob = card->private_data; - bebob->card_index = card_index; - set_bit(card_index, devices_used); - card->private_free = bebob_card_free; - - bebob->card = card; - bebob->unit = fw_unit_get(unit); - bebob->spec = spec; - mutex_init(&bebob->mutex); - spin_lock_init(&bebob->lock); - init_waitqueue_head(&bebob->hwdep_wait); + goto error; - err = name_device(bebob, entry->vendor_id); + if (bebob->spec == &maudio_special_spec) { + if (bebob->entry->model_id == MODEL_MAUDIO_FW1814) + err = snd_bebob_maudio_special_discover(bebob, true); + else + err = snd_bebob_maudio_special_discover(bebob, false); + } else { + err = snd_bebob_stream_discover(bebob); + } if (err < 0) goto error; - if ((entry->vendor_id == VEN_MAUDIO1) && - (entry->model_id == MODEL_MAUDIO_FW1814)) - err = snd_bebob_maudio_special_discover(bebob, true); - else if ((entry->vendor_id == VEN_MAUDIO1) && - (entry->model_id == MODEL_MAUDIO_PROJECTMIX)) - err = snd_bebob_maudio_special_discover(bebob, false); - else - err = snd_bebob_stream_discover(bebob); + err = snd_bebob_stream_init_duplex(bebob); if (err < 0) goto error; snd_bebob_proc_init(bebob); - if ((bebob->midi_input_ports > 0) || - (bebob->midi_output_ports > 0)) { + if (bebob->midi_input_ports > 0 || bebob->midi_output_ports > 0) { err = snd_bebob_create_midi_devices(bebob); if (err < 0) goto error; @@ -264,16 +244,75 @@ bebob_probe(struct fw_unit *unit, if (err < 0) goto error; - err = snd_bebob_stream_init_duplex(bebob); + err = snd_card_register(bebob->card); if (err < 0) goto error; - if (!bebob->maudio_special_quirk) { - err = snd_card_register(card); - if (err < 0) { - snd_bebob_stream_destroy_duplex(bebob); - goto error; - } + set_bit(card_index, devices_used); + mutex_unlock(&devices_mutex); + + /* + * After registered, bebob instance can be released corresponding to + * releasing the sound card instance. + */ + bebob->card->private_free = bebob_card_free; + bebob->card->private_data = bebob; + bebob->registered = true; + + return; +error: + mutex_unlock(&devices_mutex); + snd_bebob_stream_destroy_duplex(bebob); + snd_card_free(bebob->card); + dev_info(&bebob->unit->device, + "Sound card registration failed: %d\n", err); +} + +static int +bebob_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry) +{ + struct snd_bebob *bebob; + const struct snd_bebob_spec *spec; + + if (entry->vendor_id == VEN_FOCUSRITE && + entry->model_id == MODEL_FOCUSRITE_SAFFIRE_BOTH) + spec = get_saffire_spec(unit); + else if (entry->vendor_id == VEN_MAUDIO1 && + entry->model_id == MODEL_MAUDIO_AUDIOPHILE_BOTH && + !check_audiophile_booted(unit)) + spec = NULL; + else + spec = (const struct snd_bebob_spec *)entry->driver_data; + + if (spec == NULL) { + if (entry->vendor_id == VEN_MAUDIO1 || + entry->vendor_id == VEN_MAUDIO2) + return snd_bebob_maudio_load_firmware(unit); + else + return -ENODEV; + } + + /* Allocate this independent of sound card instance. */ + bebob = kzalloc(sizeof(struct snd_bebob), GFP_KERNEL); + if (bebob == NULL) + return -ENOMEM; + + bebob->unit = fw_unit_get(unit); + bebob->entry = entry; + bebob->spec = spec; + dev_set_drvdata(&unit->device, bebob); + + mutex_init(&bebob->mutex); + spin_lock_init(&bebob->lock); + init_waitqueue_head(&bebob->hwdep_wait); + + /* Allocate and register this sound card later. */ + INIT_DEFERRABLE_WORK(&bebob->dwork, do_registration); + + if (entry->vendor_id != VEN_MAUDIO1 || + (entry->model_id != MODEL_MAUDIO_FW1814 && + entry->model_id != MODEL_MAUDIO_PROJECTMIX)) { + snd_fw_schedule_registration(unit, &bebob->dwork); } else { /* * This is a workaround. This bus reset seems to have an effect @@ -285,19 +324,11 @@ bebob_probe(struct fw_unit *unit, * signals from dbus and starts I/Os. To avoid I/Os till the * future bus reset, registration is done in next update(). */ - bebob->deferred_registration = true; fw_schedule_bus_reset(fw_parent_device(bebob->unit)->card, false, true); } - dev_set_drvdata(&unit->device, bebob); -end: - mutex_unlock(&devices_mutex); - return err; -error: - mutex_unlock(&devices_mutex); - snd_card_free(card); - return err; + return 0; } /* @@ -324,15 +355,11 @@ bebob_update(struct fw_unit *unit) if (bebob == NULL) return; - fcp_bus_reset(bebob->unit); - - if (bebob->deferred_registration) { - if (snd_card_register(bebob->card) < 0) { - snd_bebob_stream_destroy_duplex(bebob); - snd_card_free(bebob->card); - } - bebob->deferred_registration = false; - } + /* Postpone a workqueue for deferred registration. */ + if (!bebob->registered) + snd_fw_schedule_registration(unit, &bebob->dwork); + else + fcp_bus_reset(bebob->unit); } static void bebob_remove(struct fw_unit *unit) @@ -342,8 +369,20 @@ static void bebob_remove(struct fw_unit *unit) if (bebob == NULL) return; - /* No need to wait for releasing card object in this context. */ - snd_card_free_when_closed(bebob->card); + /* + * Confirm to stop the work for registration before the sound card is + * going to be released. The work is not scheduled again because bus + * reset handler is not called anymore. + */ + cancel_delayed_work_sync(&bebob->dwork); + + if (bebob->registered) { + /* No need to wait for releasing card object in this context. */ + snd_card_free_when_closed(bebob->card); + } else { + /* Don't forget this case. */ + bebob_free(bebob); + } } static const struct snd_bebob_rate_spec normal_rate_spec = { diff --git a/sound/firewire/bebob/bebob.h b/sound/firewire/bebob/bebob.h index b50bb33d9d46..2a442a7a2119 100644 --- a/sound/firewire/bebob/bebob.h +++ b/sound/firewire/bebob/bebob.h @@ -83,6 +83,10 @@ struct snd_bebob { struct mutex mutex; spinlock_t lock; + bool registered; + struct delayed_work dwork; + + const struct ieee1394_device_id *entry; const struct snd_bebob_spec *spec; unsigned int midi_input_ports; @@ -111,7 +115,6 @@ struct snd_bebob { /* for M-Audio special devices */ void *maudio_special_quirk; - bool deferred_registration; /* For BeBoB version quirk. */ unsigned int version; diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 8b64aef31a86..96fe68f42e5d 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c @@ -20,8 +20,6 @@ MODULE_LICENSE("GPL v2"); #define WEISS_CATEGORY_ID 0x00 #define LOUD_CATEGORY_ID 0x10 -#define PROBE_DELAY_MS (2 * MSEC_PER_SEC) - /* * Some models support several isochronous channels, while these streams are not * always available. In this case, add the model name to this list. @@ -201,6 +199,10 @@ static void do_registration(struct work_struct *work) dice_card_strings(dice); + err = snd_dice_stream_init_duplex(dice); + if (err < 0) + goto error; + snd_dice_create_proc(dice); err = snd_dice_create_pcm(dice); @@ -229,28 +231,14 @@ static void do_registration(struct work_struct *work) return; error: + snd_dice_stream_destroy_duplex(dice); snd_dice_transaction_destroy(dice); + snd_dice_stream_destroy_duplex(dice); snd_card_free(dice->card); dev_info(&dice->unit->device, "Sound card registration failed: %d\n", err); } -static void schedule_registration(struct snd_dice *dice) -{ - struct fw_card *fw_card = fw_parent_device(dice->unit)->card; - u64 now, delay; - - now = get_jiffies_64(); - delay = fw_card->reset_jiffies + msecs_to_jiffies(PROBE_DELAY_MS); - - if (time_after64(delay, now)) - delay -= now; - else - delay = 0; - - mod_delayed_work(system_wq, &dice->dwork, delay); -} - static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) { struct snd_dice *dice; @@ -273,15 +261,9 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) init_completion(&dice->clock_accepted); init_waitqueue_head(&dice->hwdep_wait); - err = snd_dice_stream_init_duplex(dice); - if (err < 0) { - dice_free(dice); - return err; - } - /* Allocate and register this sound card later. */ INIT_DEFERRABLE_WORK(&dice->dwork, do_registration); - schedule_registration(dice); + snd_fw_schedule_registration(unit, &dice->dwork); return 0; } @@ -312,7 +294,7 @@ static void dice_bus_reset(struct fw_unit *unit) /* Postpone a workqueue for deferred registration. */ if (!dice->registered) - schedule_registration(dice); + snd_fw_schedule_registration(unit, &dice->dwork); /* The handler address register becomes initialized. */ snd_dice_transaction_reinit(dice); diff --git a/sound/firewire/digi00x/digi00x-transaction.c b/sound/firewire/digi00x/digi00x-transaction.c index 554324d8c602..735d35640807 100644 --- a/sound/firewire/digi00x/digi00x-transaction.c +++ b/sound/firewire/digi00x/digi00x-transaction.c @@ -126,12 +126,17 @@ int snd_dg00x_transaction_register(struct snd_dg00x *dg00x) return err; error: fw_core_remove_address_handler(&dg00x->async_handler); - dg00x->async_handler.address_callback = NULL; + dg00x->async_handler.callback_data = NULL; return err; } void snd_dg00x_transaction_unregister(struct snd_dg00x *dg00x) { + if (dg00x->async_handler.callback_data == NULL) + return; + snd_fw_async_midi_port_destroy(&dg00x->out_control); fw_core_remove_address_handler(&dg00x->async_handler); + + dg00x->async_handler.callback_data = NULL; } diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c index 1f33b7a1fca4..cc4776c6ded3 100644 --- a/sound/firewire/digi00x/digi00x.c +++ b/sound/firewire/digi00x/digi00x.c @@ -40,10 +40,8 @@ static int name_card(struct snd_dg00x *dg00x) return 0; } -static void dg00x_card_free(struct snd_card *card) +static void dg00x_free(struct snd_dg00x *dg00x) { - struct snd_dg00x *dg00x = card->private_data; - snd_dg00x_stream_destroy_duplex(dg00x); snd_dg00x_transaction_unregister(dg00x); @@ -52,28 +50,24 @@ static void dg00x_card_free(struct snd_card *card) mutex_destroy(&dg00x->mutex); } -static int snd_dg00x_probe(struct fw_unit *unit, - const struct ieee1394_device_id *entry) +static void dg00x_card_free(struct snd_card *card) { - struct snd_card *card; - struct snd_dg00x *dg00x; - int err; + dg00x_free(card->private_data); +} - /* create card */ - err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, - sizeof(struct snd_dg00x), &card); - if (err < 0) - return err; - card->private_free = dg00x_card_free; +static void do_registration(struct work_struct *work) +{ + struct snd_dg00x *dg00x = + container_of(work, struct snd_dg00x, dwork.work); + int err; - /* initialize myself */ - dg00x = card->private_data; - dg00x->card = card; - dg00x->unit = fw_unit_get(unit); + if (dg00x->registered) + return; - mutex_init(&dg00x->mutex); - spin_lock_init(&dg00x->lock); - init_waitqueue_head(&dg00x->hwdep_wait); + err = snd_card_new(&dg00x->unit->device, -1, NULL, THIS_MODULE, 0, + &dg00x->card); + if (err < 0) + return; err = name_card(dg00x); if (err < 0) @@ -101,35 +95,86 @@ static int snd_dg00x_probe(struct fw_unit *unit, if (err < 0) goto error; - err = snd_card_register(card); + err = snd_card_register(dg00x->card); if (err < 0) goto error; - dev_set_drvdata(&unit->device, dg00x); + dg00x->card->private_free = dg00x_card_free; + dg00x->card->private_data = dg00x; + dg00x->registered = true; - return err; + return; error: - snd_card_free(card); - return err; + snd_dg00x_transaction_unregister(dg00x); + snd_dg00x_stream_destroy_duplex(dg00x); + snd_card_free(dg00x->card); + dev_info(&dg00x->unit->device, + "Sound card registration failed: %d\n", err); +} + +static int snd_dg00x_probe(struct fw_unit *unit, + const struct ieee1394_device_id *entry) +{ + struct snd_dg00x *dg00x; + + /* Allocate this independent of sound card instance. */ + dg00x = kzalloc(sizeof(struct snd_dg00x), GFP_KERNEL); + if (dg00x == NULL) + return -ENOMEM; + + dg00x->unit = fw_unit_get(unit); + dev_set_drvdata(&unit->device, dg00x); + + mutex_init(&dg00x->mutex); + spin_lock_init(&dg00x->lock); + init_waitqueue_head(&dg00x->hwdep_wait); + + /* Allocate and register this sound card later. */ + INIT_DEFERRABLE_WORK(&dg00x->dwork, do_registration); + snd_fw_schedule_registration(unit, &dg00x->dwork); + + return 0; } static void snd_dg00x_update(struct fw_unit *unit) { struct snd_dg00x *dg00x = dev_get_drvdata(&unit->device); + /* Postpone a workqueue for deferred registration. */ + if (!dg00x->registered) + snd_fw_schedule_registration(unit, &dg00x->dwork); + snd_dg00x_transaction_reregister(dg00x); - mutex_lock(&dg00x->mutex); - snd_dg00x_stream_update_duplex(dg00x); - mutex_unlock(&dg00x->mutex); + /* + * After registration, userspace can start packet streaming, then this + * code block works fine. + */ + if (dg00x->registered) { + mutex_lock(&dg00x->mutex); + snd_dg00x_stream_update_duplex(dg00x); + mutex_unlock(&dg00x->mutex); + } } static void snd_dg00x_remove(struct fw_unit *unit) { struct snd_dg00x *dg00x = dev_get_drvdata(&unit->device); - /* No need to wait for releasing card object in this context. */ - snd_card_free_when_closed(dg00x->card); + /* + * Confirm to stop the work for registration before the sound card is + * going to be released. The work is not scheduled again because bus + * reset handler is not called anymore. + */ + cancel_delayed_work_sync(&dg00x->dwork); + + if (dg00x->registered) { + /* No need to wait for releasing card object in this context. */ + snd_card_free_when_closed(dg00x->card); + } else { + /* Don't forget this case. */ + dg00x_free(dg00x); + } } static const struct ieee1394_device_id snd_dg00x_id_table[] = { diff --git a/sound/firewire/digi00x/digi00x.h b/sound/firewire/digi00x/digi00x.h index 907e73993677..2cd465c0caae 100644 --- a/sound/firewire/digi00x/digi00x.h +++ b/sound/firewire/digi00x/digi00x.h @@ -37,6 +37,9 @@ struct snd_dg00x { struct mutex mutex; spinlock_t lock; + bool registered; + struct delayed_work dwork; + struct amdtp_stream tx_stream; struct fw_iso_resources tx_resources; diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 8f27b67503c8..71a0613d3da0 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -168,11 +168,34 @@ get_hardware_info(struct snd_efw *efw) sizeof(struct snd_efw_phys_grp) * hwinfo->phys_in_grp_count); memcpy(&efw->phys_out_grps, hwinfo->phys_out_grps, sizeof(struct snd_efw_phys_grp) * hwinfo->phys_out_grp_count); + + /* AudioFire8 (since 2009) and AudioFirePre8 */ + if (hwinfo->type == MODEL_ECHO_AUDIOFIRE_9) + efw->is_af9 = true; + /* These models uses the same firmware. */ + if (hwinfo->type == MODEL_ECHO_AUDIOFIRE_2 || + hwinfo->type == MODEL_ECHO_AUDIOFIRE_4 || + hwinfo->type == MODEL_ECHO_AUDIOFIRE_9 || + hwinfo->type == MODEL_GIBSON_RIP || + hwinfo->type == MODEL_GIBSON_GOLDTOP) + efw->is_fireworks3 = true; end: kfree(hwinfo); return err; } +static void efw_free(struct snd_efw *efw) +{ + snd_efw_stream_destroy_duplex(efw); + snd_efw_transaction_remove_instance(efw); + fw_unit_put(efw->unit); + + kfree(efw->resp_buf); + + mutex_destroy(&efw->mutex); + kfree(efw); +} + /* * This module releases the FireWire unit data after all ALSA character devices * are released by applications. This is for releasing stream data or finishing @@ -184,28 +207,24 @@ efw_card_free(struct snd_card *card) { struct snd_efw *efw = card->private_data; - snd_efw_stream_destroy_duplex(efw); - snd_efw_transaction_remove_instance(efw); - fw_unit_put(efw->unit); - - kfree(efw->resp_buf); - if (efw->card_index >= 0) { mutex_lock(&devices_mutex); clear_bit(efw->card_index, devices_used); mutex_unlock(&devices_mutex); } - mutex_destroy(&efw->mutex); + efw_free(card->private_data); } -static int -efw_probe(struct fw_unit *unit, - const struct ieee1394_device_id *entry) +static void +do_registration(struct work_struct *work) { - struct snd_card *card; - struct snd_efw *efw; - int card_index, err; + struct snd_efw *efw = container_of(work, struct snd_efw, dwork.work); + unsigned int card_index; + int err; + + if (efw->registered) + return; mutex_lock(&devices_mutex); @@ -215,24 +234,16 @@ efw_probe(struct fw_unit *unit, break; } if (card_index >= SNDRV_CARDS) { - err = -ENOENT; - goto end; + mutex_unlock(&devices_mutex); + return; } - err = snd_card_new(&unit->device, index[card_index], id[card_index], - THIS_MODULE, sizeof(struct snd_efw), &card); - if (err < 0) - goto end; - efw = card->private_data; - efw->card_index = card_index; - set_bit(card_index, devices_used); - card->private_free = efw_card_free; - - efw->card = card; - efw->unit = fw_unit_get(unit); - mutex_init(&efw->mutex); - spin_lock_init(&efw->lock); - init_waitqueue_head(&efw->hwdep_wait); + err = snd_card_new(&efw->unit->device, index[card_index], + id[card_index], THIS_MODULE, 0, &efw->card); + if (err < 0) { + mutex_unlock(&devices_mutex); + return; + } /* prepare response buffer */ snd_efw_resp_buf_size = clamp(snd_efw_resp_buf_size, @@ -248,16 +259,10 @@ efw_probe(struct fw_unit *unit, err = get_hardware_info(efw); if (err < 0) goto error; - /* AudioFire8 (since 2009) and AudioFirePre8 */ - if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) - efw->is_af9 = true; - /* These models uses the same firmware. */ - if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 || - entry->model_id == MODEL_ECHO_AUDIOFIRE_4 || - entry->model_id == MODEL_ECHO_AUDIOFIRE_9 || - entry->model_id == MODEL_GIBSON_RIP || - entry->model_id == MODEL_GIBSON_GOLDTOP) - efw->is_fireworks3 = true; + + err = snd_efw_stream_init_duplex(efw); + if (err < 0) + goto error; snd_efw_proc_init(efw); @@ -275,44 +280,93 @@ efw_probe(struct fw_unit *unit, if (err < 0) goto error; - err = snd_efw_stream_init_duplex(efw); + err = snd_card_register(efw->card); if (err < 0) goto error; - err = snd_card_register(card); - if (err < 0) { - snd_efw_stream_destroy_duplex(efw); - goto error; - } - - dev_set_drvdata(&unit->device, efw); -end: + set_bit(card_index, devices_used); mutex_unlock(&devices_mutex); - return err; + + /* + * After registered, efw instance can be released corresponding to + * releasing the sound card instance. + */ + efw->card->private_free = efw_card_free; + efw->card->private_data = efw; + efw->registered = true; + + return; error: - snd_efw_transaction_remove_instance(efw); mutex_unlock(&devices_mutex); - snd_card_free(card); - return err; + snd_efw_transaction_remove_instance(efw); + snd_efw_stream_destroy_duplex(efw); + snd_card_free(efw->card); + dev_info(&efw->unit->device, + "Sound card registration failed: %d\n", err); +} + +static int +efw_probe(struct fw_unit *unit, const struct ieee1394_device_id *entry) +{ + struct snd_efw *efw; + + efw = kzalloc(sizeof(struct snd_efw), GFP_KERNEL); + if (efw == NULL) + return -ENOMEM; + + efw->unit = fw_unit_get(unit); + dev_set_drvdata(&unit->device, efw); + + mutex_init(&efw->mutex); + spin_lock_init(&efw->lock); + init_waitqueue_head(&efw->hwdep_wait); + + /* Allocate and register this sound card later. */ + INIT_DEFERRABLE_WORK(&efw->dwork, do_registration); + snd_fw_schedule_registration(unit, &efw->dwork); + + return 0; } static void efw_update(struct fw_unit *unit) { struct snd_efw *efw = dev_get_drvdata(&unit->device); + /* Postpone a workqueue for deferred registration. */ + if (!efw->registered) + snd_fw_schedule_registration(unit, &efw->dwork); + snd_efw_transaction_bus_reset(efw->unit); - mutex_lock(&efw->mutex); - snd_efw_stream_update_duplex(efw); - mutex_unlock(&efw->mutex); + /* + * After registration, userspace can start packet streaming, then this + * code block works fine. + */ + if (efw->registered) { + mutex_lock(&efw->mutex); + snd_efw_stream_update_duplex(efw); + mutex_unlock(&efw->mutex); + } } static void efw_remove(struct fw_unit *unit) { struct snd_efw *efw = dev_get_drvdata(&unit->device); - /* No need to wait for releasing card object in this context. */ - snd_card_free_when_closed(efw->card); + /* + * Confirm to stop the work for registration before the sound card is + * going to be released. The work is not scheduled again because bus + * reset handler is not called anymore. + */ + cancel_delayed_work_sync(&efw->dwork); + + if (efw->registered) { + /* No need to wait for releasing card object in this context. */ + snd_card_free_when_closed(efw->card); + } else { + /* Don't forget this case. */ + efw_free(efw); + } } static const struct ieee1394_device_id efw_id_table[] = { diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h index 96c4e0c6a9bd..471c77254dfd 100644 --- a/sound/firewire/fireworks/fireworks.h +++ b/sound/firewire/fireworks/fireworks.h @@ -65,6 +65,9 @@ struct snd_efw { struct mutex mutex; spinlock_t lock; + bool registered; + struct delayed_work dwork; + /* for transaction */ u32 seqnum; bool resp_addr_changable; diff --git a/sound/firewire/lib.c b/sound/firewire/lib.c index f80aafa44c89..ca4dfcf43175 100644 --- a/sound/firewire/lib.c +++ b/sound/firewire/lib.c @@ -67,6 +67,38 @@ int snd_fw_transaction(struct fw_unit *unit, int tcode, } EXPORT_SYMBOL(snd_fw_transaction); +#define PROBE_DELAY_MS (2 * MSEC_PER_SEC) + +/** + * snd_fw_schedule_registration - schedule work for sound card registration + * @unit: an instance for unit on IEEE 1394 bus + * @dwork: delayed work with callback function + * + * This function is not designed for general purposes. When new unit is + * connected to IEEE 1394 bus, the bus is under bus-reset state because of + * topological change. In this state, units tend to fail both of asynchronous + * and isochronous communication. To avoid this problem, this function is used + * to postpone sound card registration after the state. The callers must + * set up instance of delayed work in advance. + */ +void snd_fw_schedule_registration(struct fw_unit *unit, + struct delayed_work *dwork) +{ + u64 now, delay; + + now = get_jiffies_64(); + delay = fw_parent_device(unit)->card->reset_jiffies + + msecs_to_jiffies(PROBE_DELAY_MS); + + if (time_after64(delay, now)) + delay -= now; + else + delay = 0; + + mod_delayed_work(system_wq, dwork, delay); +} +EXPORT_SYMBOL(snd_fw_schedule_registration); + static void async_midi_port_callback(struct fw_card *card, int rcode, void *data, size_t length, void *callback_data) diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h index f3f6f84c48d6..f6769312ebfc 100644 --- a/sound/firewire/lib.h +++ b/sound/firewire/lib.h @@ -22,6 +22,9 @@ static inline bool rcode_is_permanent_error(int rcode) return rcode == RCODE_TYPE_ERROR || rcode == RCODE_ADDRESS_ERROR; } +void snd_fw_schedule_registration(struct fw_unit *unit, + struct delayed_work *dwork); + struct snd_fw_async_midi_port; typedef int (*snd_fw_async_midi_port_fill)( struct snd_rawmidi_substream *substream, diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index abedc2207261..e629b88f7d93 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -118,15 +118,8 @@ end: return err; } -/* - * This module releases the FireWire unit data after all ALSA character devices - * are released by applications. This is for releasing stream data or finishing - * transactions safely. Thus at returning from .remove(), this module still keep - * references for the unit. - */ -static void oxfw_card_free(struct snd_card *card) +static void oxfw_free(struct snd_oxfw *oxfw) { - struct snd_oxfw *oxfw = card->private_data; unsigned int i; snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); @@ -144,6 +137,17 @@ static void oxfw_card_free(struct snd_card *card) mutex_destroy(&oxfw->mutex); } +/* + * This module releases the FireWire unit data after all ALSA character devices + * are released by applications. This is for releasing stream data or finishing + * transactions safely. Thus at returning from .remove(), this module still keep + * references for the unit. + */ +static void oxfw_card_free(struct snd_card *card) +{ + oxfw_free(card->private_data); +} + static int detect_quirks(struct snd_oxfw *oxfw) { struct fw_device *fw_dev = fw_parent_device(oxfw->unit); @@ -205,41 +209,39 @@ static int detect_quirks(struct snd_oxfw *oxfw) return 0; } -static int oxfw_probe(struct fw_unit *unit, - const struct ieee1394_device_id *entry) +static void do_registration(struct work_struct *work) { - struct snd_card *card; - struct snd_oxfw *oxfw; + struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work); int err; - if (entry->vendor_id == VENDOR_LOUD && !detect_loud_models(unit)) - return -ENODEV; + if (oxfw->registered) + return; - err = snd_card_new(&unit->device, -1, NULL, THIS_MODULE, - sizeof(*oxfw), &card); + err = snd_card_new(&oxfw->unit->device, -1, NULL, THIS_MODULE, 0, + &oxfw->card); if (err < 0) - return err; + return; - card->private_free = oxfw_card_free; - oxfw = card->private_data; - oxfw->card = card; - mutex_init(&oxfw->mutex); - oxfw->unit = fw_unit_get(unit); - oxfw->entry = entry; - spin_lock_init(&oxfw->lock); - init_waitqueue_head(&oxfw->hwdep_wait); + err = name_card(oxfw); + if (err < 0) + goto error; - err = snd_oxfw_stream_discover(oxfw); + err = detect_quirks(oxfw); if (err < 0) goto error; - err = name_card(oxfw); + err = snd_oxfw_stream_discover(oxfw); if (err < 0) goto error; - err = detect_quirks(oxfw); + err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->rx_stream); if (err < 0) goto error; + if (oxfw->has_output) { + err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->tx_stream); + if (err < 0) + goto error; + } err = snd_oxfw_create_pcm(oxfw); if (err < 0) @@ -255,54 +257,97 @@ static int oxfw_probe(struct fw_unit *unit, if (err < 0) goto error; - err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->rx_stream); + err = snd_card_register(oxfw->card); if (err < 0) goto error; - if (oxfw->has_output) { - err = snd_oxfw_stream_init_simplex(oxfw, &oxfw->tx_stream); - if (err < 0) - goto error; - } - err = snd_card_register(card); - if (err < 0) { - snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); - if (oxfw->has_output) - snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); - goto error; - } + /* + * After registered, oxfw instance can be released corresponding to + * releasing the sound card instance. + */ + oxfw->card->private_free = oxfw_card_free; + oxfw->card->private_data = oxfw; + oxfw->registered = true; + + return; +error: + snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); + if (oxfw->has_output) + snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); + snd_card_free(oxfw->card); + dev_info(&oxfw->unit->device, + "Sound card registration failed: %d\n", err); +} + +static int oxfw_probe(struct fw_unit *unit, + const struct ieee1394_device_id *entry) +{ + struct snd_oxfw *oxfw; + + if (entry->vendor_id == VENDOR_LOUD && !detect_loud_models(unit)) + return -ENODEV; + + /* Allocate this independent of sound card instance. */ + oxfw = kzalloc(sizeof(struct snd_oxfw), GFP_KERNEL); + if (oxfw == NULL) + return -ENOMEM; + + oxfw->entry = entry; + oxfw->unit = fw_unit_get(unit); dev_set_drvdata(&unit->device, oxfw); + mutex_init(&oxfw->mutex); + spin_lock_init(&oxfw->lock); + init_waitqueue_head(&oxfw->hwdep_wait); + + /* Allocate and register this sound card later. */ + INIT_DEFERRABLE_WORK(&oxfw->dwork, do_registration); + snd_fw_schedule_registration(unit, &oxfw->dwork); + return 0; -error: - snd_card_free(card); - return err; } static void oxfw_bus_reset(struct fw_unit *unit) { struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device); + if (!oxfw->registered) + snd_fw_schedule_registration(unit, &oxfw->dwork); + fcp_bus_reset(oxfw->unit); - mutex_lock(&oxfw->mutex); + if (oxfw->registered) { + mutex_lock(&oxfw->mutex); - snd_oxfw_stream_update_simplex(oxfw, &oxfw->rx_stream); - if (oxfw->has_output) - snd_oxfw_stream_update_simplex(oxfw, &oxfw->tx_stream); + snd_oxfw_stream_update_simplex(oxfw, &oxfw->rx_stream); + if (oxfw->has_output) + snd_oxfw_stream_update_simplex(oxfw, &oxfw->tx_stream); - mutex_unlock(&oxfw->mutex); + mutex_unlock(&oxfw->mutex); - if (oxfw->entry->vendor_id == OUI_STANTON) - snd_oxfw_scs1x_update(oxfw); + if (oxfw->entry->vendor_id == OUI_STANTON) + snd_oxfw_scs1x_update(oxfw); + } } static void oxfw_remove(struct fw_unit *unit) { struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device); - /* No need to wait for releasing card object in this context. */ - snd_card_free_when_closed(oxfw->card); + /* + * Confirm to stop the work for registration before the sound card is + * going to be released. The work is not scheduled again because bus + * reset handler is not called anymore. + */ + cancel_delayed_work_sync(&oxfw->dwork); + + if (oxfw->registered) { + /* No need to wait for releasing card object in this context. */ + snd_card_free_when_closed(oxfw->card); + } else { + /* Don't forget this case. */ + oxfw_free(oxfw); + } } static const struct compat_info griffin_firewave = { diff --git a/sound/firewire/oxfw/oxfw.h b/sound/firewire/oxfw/oxfw.h index 9beecc214767..2047dcb27625 100644 --- a/sound/firewire/oxfw/oxfw.h +++ b/sound/firewire/oxfw/oxfw.h @@ -36,10 +36,12 @@ struct snd_oxfw { struct snd_card *card; struct fw_unit *unit; - const struct device_info *device_info; struct mutex mutex; spinlock_t lock; + bool registered; + struct delayed_work dwork; + bool wrong_dbs; bool has_output; u8 *tx_stream_formats[SND_OXFW_STREAM_FORMAT_ENTRIES]; diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 54babe1c0b16..6800e0c5a38f 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -118,22 +118,40 @@ int snd_hdac_get_display_clk(struct hdac_bus *bus) } EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk); -/* There is a fixed mapping between audio pin node and display port - * on current Intel platforms: +/* There is a fixed mapping between audio pin node and display port. + * on SNB, IVY, HSW, BSW, SKL, BXT, KBL: * Pin Widget 5 - PORT B (port = 1 in i915 driver) * Pin Widget 6 - PORT C (port = 2 in i915 driver) * Pin Widget 7 - PORT D (port = 3 in i915 driver) + * + * on VLV, ILK: + * Pin Widget 4 - PORT B (port = 1 in i915 driver) + * Pin Widget 5 - PORT C (port = 2 in i915 driver) + * Pin Widget 6 - PORT D (port = 3 in i915 driver) */ -static int pin2port(hda_nid_t pin_nid) +static int pin2port(struct hdac_device *codec, hda_nid_t pin_nid) { - if (WARN_ON(pin_nid < 5 || pin_nid > 7)) + int base_nid; + + switch (codec->vendor_id) { + case 0x80860054: /* ILK */ + case 0x80862804: /* ILK */ + case 0x80862882: /* VLV */ + base_nid = 3; + break; + default: + base_nid = 4; + break; + } + + if (WARN_ON(pin_nid <= base_nid || pin_nid > base_nid + 3)) return -1; - return pin_nid - 4; + return pin_nid - base_nid; } /** * snd_hdac_sync_audio_rate - Set N/CTS based on the sample rate - * @bus: HDA core bus + * @codec: HDA codec * @nid: the pin widget NID * @rate: the sample rate to set * @@ -143,14 +161,15 @@ static int pin2port(hda_nid_t pin_nid) * This function sets N/CTS value based on the given sample rate. * Returns zero for success, or a negative error code. */ -int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate) +int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid, int rate) { + struct hdac_bus *bus = codec->bus; struct i915_audio_component *acomp = bus->audio_component; int port; if (!acomp || !acomp->ops || !acomp->ops->sync_audio_rate) return -ENODEV; - port = pin2port(nid); + port = pin2port(codec, nid); if (port < 0) return -EINVAL; return acomp->ops->sync_audio_rate(acomp->dev, port, rate); @@ -159,7 +178,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate); /** * snd_hdac_acomp_get_eld - Get the audio state and ELD via component - * @bus: HDA core bus + * @codec: HDA codec * @nid: the pin widget NID * @audio_enabled: the pointer to store the current audio state * @buffer: the buffer pointer to store ELD bytes @@ -177,16 +196,17 @@ EXPORT_SYMBOL_GPL(snd_hdac_sync_audio_rate); * thus it may be over @max_bytes. If it's over @max_bytes, it implies * that only a part of ELD bytes have been fetched. */ -int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid, +int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, bool *audio_enabled, char *buffer, int max_bytes) { + struct hdac_bus *bus = codec->bus; struct i915_audio_component *acomp = bus->audio_component; int port; if (!acomp || !acomp->ops || !acomp->ops->get_eld) return -ENODEV; - port = pin2port(nid); + port = pin2port(codec, nid); if (port < 0) return -EINVAL; return acomp->ops->get_eld(acomp->dev, port, audio_enabled, @@ -298,6 +318,9 @@ int snd_hdac_i915_init(struct hdac_bus *bus) struct i915_audio_component *acomp; int ret; + if (WARN_ON(hdac_acomp)) + return -EBUSY; + if (!i915_gfx_present()) return -ENODEV; @@ -331,6 +354,7 @@ out_master_del: out_err: kfree(acomp); bus->audio_component = NULL; + hdac_acomp = NULL; dev_info(dev, "failed to add i915 component master (%d)\n", ret); return ret; @@ -364,6 +388,7 @@ int snd_hdac_i915_exit(struct hdac_bus *bus) kfree(acomp); bus->audio_component = NULL; + hdac_acomp = NULL; return 0; } diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c index d7ec86263828..c6c75e7e0981 100644 --- a/sound/hda/hdmi_chmap.c +++ b/sound/hda/hdmi_chmap.c @@ -625,13 +625,30 @@ static void hdmi_cea_alloc_to_tlv_chmap(struct hdac_chmap *hchmap, WARN_ON(count != channels); } +static int spk_mask_from_spk_alloc(int spk_alloc) +{ + int i; + int spk_mask = eld_speaker_allocation_bits[0]; + + for (i = 0; i < ARRAY_SIZE(eld_speaker_allocation_bits); i++) { + if (spk_alloc & (1 << i)) + spk_mask |= eld_speaker_allocation_bits[i]; + } + + return spk_mask; +} + static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct snd_pcm_chmap *info = snd_kcontrol_chip(kcontrol); struct hdac_chmap *chmap = info->private_data; + int pcm_idx = kcontrol->private_value; unsigned int __user *dst; int chs, count = 0; + unsigned long max_chs; + int type; + int spk_alloc, spk_mask; if (size < 8) return -ENOMEM; @@ -639,40 +656,59 @@ static int hdmi_chmap_ctl_tlv(struct snd_kcontrol *kcontrol, int op_flag, return -EFAULT; size -= 8; dst = tlv + 2; - for (chs = 2; chs <= chmap->channels_max; chs++) { + + spk_alloc = chmap->ops.get_spk_alloc(chmap->hdac, pcm_idx); + spk_mask = spk_mask_from_spk_alloc(spk_alloc); + + max_chs = hweight_long(spk_mask); + + for (chs = 2; chs <= max_chs; chs++) { int i; struct hdac_cea_channel_speaker_allocation *cap; cap = channel_allocations; for (i = 0; i < ARRAY_SIZE(channel_allocations); i++, cap++) { int chs_bytes = chs * 4; - int type = chmap->ops.chmap_cea_alloc_validate_get_type( - chmap, cap, chs); unsigned int tlv_chmap[8]; - if (type < 0) + if (cap->channels != chs) + continue; + + if (!(cap->spk_mask == (spk_mask & cap->spk_mask))) continue; + + type = chmap->ops.chmap_cea_alloc_validate_get_type( + chmap, cap, chs); + if (type < 0) + return -ENODEV; if (size < 8) return -ENOMEM; + if (put_user(type, dst) || put_user(chs_bytes, dst + 1)) return -EFAULT; + dst += 2; size -= 8; count += 8; + if (size < chs_bytes) return -ENOMEM; + size -= chs_bytes; count += chs_bytes; chmap->ops.cea_alloc_to_tlv_chmap(chmap, cap, tlv_chmap, chs); + if (copy_to_user(dst, tlv_chmap, chs_bytes)) return -EFAULT; dst += chs; } } + if (put_user(count, tlv + 1)) return -EFAULT; + return 0; } diff --git a/sound/pci/ctxfi/cttimer.c b/sound/pci/ctxfi/cttimer.c index a5d460453d7b..8f945341720b 100644 --- a/sound/pci/ctxfi/cttimer.c +++ b/sound/pci/ctxfi/cttimer.c @@ -49,7 +49,7 @@ struct ct_timer { spinlock_t lock; /* global timer lock (for xfitimer) */ spinlock_t list_lock; /* lock for instance list */ struct ct_atc *atc; - struct ct_timer_ops *ops; + const struct ct_timer_ops *ops; struct list_head instance_head; struct list_head running_head; unsigned int wc; /* current wallclock */ @@ -128,7 +128,7 @@ static void ct_systimer_prepare(struct ct_timer_instance *ti) #define ct_systimer_free ct_systimer_prepare -static struct ct_timer_ops ct_systimer_ops = { +static const struct ct_timer_ops ct_systimer_ops = { .init = ct_systimer_init, .free_instance = ct_systimer_free, .prepare = ct_systimer_prepare, @@ -322,7 +322,7 @@ static void ct_xfitimer_free_global(struct ct_timer *atimer) ct_xfitimer_irq_stop(atimer); } -static struct ct_timer_ops ct_xfitimer_ops = { +static const struct ct_timer_ops ct_xfitimer_ops = { .prepare = ct_xfitimer_prepare, .start = ct_xfitimer_start, .stop = ct_xfitimer_stop, diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 5af372d01834..9452384d2000 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -114,6 +114,9 @@ struct hdmi_ops { int (*setup_stream)(struct hda_codec *codec, hda_nid_t cvt_nid, hda_nid_t pin_nid, u32 stream_tag, int format); + void (*pin_cvt_fixup)(struct hda_codec *codec, + struct hdmi_spec_per_pin *per_pin, + hda_nid_t cvt_nid); }; struct hdmi_pcm { @@ -684,7 +687,8 @@ static void hdmi_setup_audio_infoframe(struct hda_codec *codec, if (!channels) return; - if (is_haswell_plus(codec)) + /* some HW (e.g. HSW+) needs reprogramming the amp at each time */ + if (get_wcaps(codec, pin_nid) & AC_WCAP_OUT_AMP) snd_hda_codec_write(codec, pin_nid, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); @@ -864,9 +868,6 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, struct hdmi_spec *spec = codec->spec; int err; - if (is_haswell_plus(codec)) - haswell_verify_D0(codec, cvt_nid, pin_nid); - err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format)); if (err) { @@ -884,7 +885,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, * of the pin. */ static int hdmi_choose_cvt(struct hda_codec *codec, - int pin_idx, int *cvt_id, int *mux_id) + int pin_idx, int *cvt_id) { struct hdmi_spec *spec = codec->spec; struct hdmi_spec_per_pin *per_pin; @@ -925,8 +926,6 @@ static int hdmi_choose_cvt(struct hda_codec *codec, if (cvt_id) *cvt_id = cvt_idx; - if (mux_id) - *mux_id = mux_idx; return 0; } @@ -1019,9 +1018,6 @@ static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec, int mux_idx; struct hdmi_spec *spec = codec->spec; - if (!is_haswell_plus(codec) && !is_valleyview_plus(codec)) - return; - /* On Intel platform, the mapping of converter nid to * mux index of the pins are always the same. * The pin nid may be 0, this means all pins will not @@ -1032,6 +1028,17 @@ static void intel_not_share_assigned_cvt_nid(struct hda_codec *codec, intel_not_share_assigned_cvt(codec, pin_nid, mux_idx); } +/* skeleton caller of pin_cvt_fixup ops */ +static void pin_cvt_fixup(struct hda_codec *codec, + struct hdmi_spec_per_pin *per_pin, + hda_nid_t cvt_nid) +{ + struct hdmi_spec *spec = codec->spec; + + if (spec->ops.pin_cvt_fixup) + spec->ops.pin_cvt_fixup(codec, per_pin, cvt_nid); +} + /* called in hdmi_pcm_open when no pin is assigned to the PCM * in dyn_pcm_assign mode. */ @@ -1049,7 +1056,7 @@ static int hdmi_pcm_open_no_pin(struct hda_pcm_stream *hinfo, if (pcm_idx < 0) return -EINVAL; - err = hdmi_choose_cvt(codec, -1, &cvt_idx, NULL); + err = hdmi_choose_cvt(codec, -1, &cvt_idx); if (err) return err; @@ -1057,7 +1064,7 @@ static int hdmi_pcm_open_no_pin(struct hda_pcm_stream *hinfo, per_cvt->assigned = 1; hinfo->nid = per_cvt->cvt_nid; - intel_not_share_assigned_cvt_nid(codec, 0, per_cvt->cvt_nid); + pin_cvt_fixup(codec, NULL, per_cvt->cvt_nid); set_bit(pcm_idx, &spec->pcm_in_use); /* todo: setup spdif ctls assign */ @@ -1089,7 +1096,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, { struct hdmi_spec *spec = codec->spec; struct snd_pcm_runtime *runtime = substream->runtime; - int pin_idx, cvt_idx, pcm_idx, mux_idx = 0; + int pin_idx, cvt_idx, pcm_idx; struct hdmi_spec_per_pin *per_pin; struct hdmi_eld *eld; struct hdmi_spec_per_cvt *per_cvt = NULL; @@ -1118,7 +1125,7 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, } } - err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx, &mux_idx); + err = hdmi_choose_cvt(codec, pin_idx, &cvt_idx); if (err < 0) { mutex_unlock(&spec->pcm_lock); return err; @@ -1135,11 +1142,10 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0, AC_VERB_SET_CONNECT_SEL, - mux_idx); + per_pin->mux_idx); /* configure unused pins to choose other converters */ - if (is_haswell_plus(codec) || is_valleyview_plus(codec)) - intel_not_share_assigned_cvt(codec, per_pin->pin_nid, mux_idx); + pin_cvt_fixup(codec, per_pin, 0); snd_hda_spdif_ctls_assign(codec, pcm_idx, per_cvt->cvt_nid); @@ -1372,12 +1378,7 @@ static void update_eld(struct hda_codec *codec, * and this can make HW reset converter selection on a pin. */ if (eld->eld_valid && !old_eld_valid && per_pin->setup) { - if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { - intel_verify_pin_cvt_connect(codec, per_pin); - intel_not_share_assigned_cvt(codec, per_pin->pin_nid, - per_pin->mux_idx); - } - + pin_cvt_fixup(codec, per_pin, 0); hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm); } @@ -1485,7 +1486,7 @@ static void sync_eld_via_acomp(struct hda_codec *codec, mutex_lock(&per_pin->lock); eld->monitor_present = false; - size = snd_hdac_acomp_get_eld(&codec->bus->core, per_pin->pin_nid, + size = snd_hdac_acomp_get_eld(&codec->core, per_pin->pin_nid, &eld->monitor_present, eld->eld_buffer, ELD_MAX_SIZE); if (size > 0) { @@ -1712,7 +1713,7 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, * skip pin setup and return 0 to make audio playback * be ongoing */ - intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid); + pin_cvt_fixup(codec, NULL, cvt_nid); snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format); mutex_unlock(&spec->pcm_lock); @@ -1725,23 +1726,21 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, } per_pin = get_pin(spec, pin_idx); pin_nid = per_pin->pin_nid; - if (is_haswell_plus(codec) || is_valleyview_plus(codec)) { - /* Verify pin:cvt selections to avoid silent audio after S3. - * After S3, the audio driver restores pin:cvt selections - * but this can happen before gfx is ready and such selection - * is overlooked by HW. Thus multiple pins can share a same - * default convertor and mute control will affect each other, - * which can cause a resumed audio playback become silent - * after S3. - */ - intel_verify_pin_cvt_connect(codec, per_pin); - intel_not_share_assigned_cvt(codec, pin_nid, per_pin->mux_idx); - } + + /* Verify pin:cvt selections to avoid silent audio after S3. + * After S3, the audio driver restores pin:cvt selections + * but this can happen before gfx is ready and such selection + * is overlooked by HW. Thus multiple pins can share a same + * default convertor and mute control will affect each other, + * which can cause a resumed audio playback become silent + * after S3. + */ + pin_cvt_fixup(codec, per_pin, 0); /* Call sync_audio_rate to set the N/CTS/M manually if necessary */ /* Todo: add DP1.2 MST audio support later */ if (codec_has_acomp(codec)) - snd_hdac_sync_audio_rate(&codec->bus->core, pin_nid, runtime->rate); + snd_hdac_sync_audio_rate(&codec->core, pin_nid, runtime->rate); non_pcm = check_non_pcm_per_cvt(codec, cvt_nid); mutex_lock(&per_pin->lock); @@ -1838,6 +1837,18 @@ static const struct hda_pcm_ops generic_ops = { .cleanup = generic_hdmi_playback_pcm_cleanup, }; +static int hdmi_get_spk_alloc(struct hdac_device *hdac, int pcm_idx) +{ + struct hda_codec *codec = container_of(hdac, struct hda_codec, core); + struct hdmi_spec *spec = codec->spec; + struct hdmi_spec_per_pin *per_pin = pcm_idx_to_pin(spec, pcm_idx); + + if (!per_pin) + return 0; + + return per_pin->sink_eld.info.spk_alloc; +} + static void hdmi_get_chmap(struct hdac_device *hdac, int pcm_idx, unsigned char *chmap) { @@ -2074,6 +2085,20 @@ static void hdmi_array_free(struct hdmi_spec *spec) snd_array_free(&spec->cvts); } +static void generic_spec_free(struct hda_codec *codec) +{ + struct hdmi_spec *spec = codec->spec; + + if (spec) { + if (spec->i915_bound) + snd_hdac_i915_exit(&codec->bus->core); + hdmi_array_free(spec); + kfree(spec); + codec->spec = NULL; + } + codec->dp_mst = false; +} + static void generic_hdmi_free(struct hda_codec *codec) { struct hdmi_spec *spec = codec->spec; @@ -2098,10 +2123,7 @@ static void generic_hdmi_free(struct hda_codec *codec) spec->pcm_rec[pcm_idx].jack = NULL; } - if (spec->i915_bound) - snd_hdac_i915_exit(&codec->bus->core); - hdmi_array_free(spec); - kfree(spec); + generic_spec_free(codec); } #ifdef CONFIG_PM @@ -2139,6 +2161,55 @@ static const struct hdmi_ops generic_standard_hdmi_ops = { .setup_stream = hdmi_setup_stream, }; +/* allocate codec->spec and assign/initialize generic parser ops */ +static int alloc_generic_hdmi(struct hda_codec *codec) +{ + struct hdmi_spec *spec; + + spec = kzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->ops = generic_standard_hdmi_ops; + mutex_init(&spec->pcm_lock); + snd_hdac_register_chmap_ops(&codec->core, &spec->chmap); + + spec->chmap.ops.get_chmap = hdmi_get_chmap; + spec->chmap.ops.set_chmap = hdmi_set_chmap; + spec->chmap.ops.is_pcm_attached = is_hdmi_pcm_attached; + spec->chmap.ops.get_spk_alloc = hdmi_get_spk_alloc, + + codec->spec = spec; + hdmi_array_init(spec, 4); + + codec->patch_ops = generic_hdmi_patch_ops; + + return 0; +} + +/* generic HDMI parser */ +static int patch_generic_hdmi(struct hda_codec *codec) +{ + int err; + + err = alloc_generic_hdmi(codec); + if (err < 0) + return err; + + err = hdmi_parse_codec(codec); + if (err < 0) { + generic_spec_free(codec); + return err; + } + + generic_hdmi_init_per_pins(codec); + return 0; +} + +/* + * Intel codec parsers and helpers + */ + static void intel_haswell_fixup_connect_list(struct hda_codec *codec, hda_nid_t nid) { @@ -2216,12 +2287,23 @@ static void haswell_set_power_state(struct hda_codec *codec, hda_nid_t fg, static void intel_pin_eld_notify(void *audio_ptr, int port) { struct hda_codec *codec = audio_ptr; - int pin_nid = port + 0x04; + int pin_nid; /* we assume only from port-B to port-D */ if (port < 1 || port > 3) return; + switch (codec->core.vendor_id) { + case 0x80860054: /* ILK */ + case 0x80862804: /* ILK */ + case 0x80862882: /* VLV */ + pin_nid = port + 0x03; + break; + default: + pin_nid = port + 0x04; + break; + } + /* skip notification during system suspend (but not in runtime PM); * the state will be updated at resume */ @@ -2234,93 +2316,159 @@ static void intel_pin_eld_notify(void *audio_ptr, int port) check_presence_and_report(codec, pin_nid); } -static int patch_generic_hdmi(struct hda_codec *codec) +/* register i915 component pin_eld_notify callback */ +static void register_i915_notifier(struct hda_codec *codec) { - struct hdmi_spec *spec; + struct hdmi_spec *spec = codec->spec; - spec = kzalloc(sizeof(*spec), GFP_KERNEL); - if (spec == NULL) - return -ENOMEM; + spec->use_acomp_notifier = true; + spec->i915_audio_ops.audio_ptr = codec; + /* intel_audio_codec_enable() or intel_audio_codec_disable() + * will call pin_eld_notify with using audio_ptr pointer + * We need make sure audio_ptr is really setup + */ + wmb(); + spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify; + snd_hdac_i915_register_notifier(&spec->i915_audio_ops); +} - spec->ops = generic_standard_hdmi_ops; - mutex_init(&spec->pcm_lock); - snd_hdac_register_chmap_ops(&codec->core, &spec->chmap); +/* setup_stream ops override for HSW+ */ +static int i915_hsw_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, + hda_nid_t pin_nid, u32 stream_tag, int format) +{ + haswell_verify_D0(codec, cvt_nid, pin_nid); + return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format); +} - spec->chmap.ops.get_chmap = hdmi_get_chmap; - spec->chmap.ops.set_chmap = hdmi_set_chmap; - spec->chmap.ops.is_pcm_attached = is_hdmi_pcm_attached; +/* pin_cvt_fixup ops override for HSW+ and VLV+ */ +static void i915_pin_cvt_fixup(struct hda_codec *codec, + struct hdmi_spec_per_pin *per_pin, + hda_nid_t cvt_nid) +{ + if (per_pin) { + intel_verify_pin_cvt_connect(codec, per_pin); + intel_not_share_assigned_cvt(codec, per_pin->pin_nid, + per_pin->mux_idx); + } else { + intel_not_share_assigned_cvt_nid(codec, 0, cvt_nid); + } +} - codec->spec = spec; - hdmi_array_init(spec, 4); +/* Intel Haswell and onwards; audio component with eld notifier */ +static int patch_i915_hsw_hdmi(struct hda_codec *codec) +{ + struct hdmi_spec *spec; + int err; -#ifdef CONFIG_SND_HDA_I915 - /* Try to bind with i915 for Intel HSW+ codecs (if not done yet) */ - if ((codec->core.vendor_id >> 16) == 0x8086 && - is_haswell_plus(codec)) { -#if 0 - /* on-demand binding leads to an unbalanced refcount when - * both i915 and hda drivers are probed concurrently; - * disabled temporarily for now - */ - if (!codec->bus->core.audio_component) - if (!snd_hdac_i915_init(&codec->bus->core)) - spec->i915_bound = true; -#endif - /* use i915 audio component notifier for hotplug */ - if (codec->bus->core.audio_component) - spec->use_acomp_notifier = true; + /* HSW+ requires i915 binding */ + if (!codec->bus->core.audio_component) { + codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n"); + return -ENODEV; } -#endif - if (is_haswell_plus(codec)) { - intel_haswell_enable_all_pins(codec, true); - intel_haswell_fixup_enable_dp12(codec); - } + err = alloc_generic_hdmi(codec); + if (err < 0) + return err; + spec = codec->spec; - /* For Valleyview/Cherryview, only the display codec is in the display - * power well and can use link_power ops to request/release the power. - * For Haswell/Broadwell, the controller is also in the power well and + intel_haswell_enable_all_pins(codec, true); + intel_haswell_fixup_enable_dp12(codec); + + /* For Haswell/Broadwell, the controller is also in the power well and * can cover the codec power request, and so need not set this flag. - * For previous platforms, there is no such power well feature. */ - if (is_valleyview_plus(codec) || is_skylake(codec) || - is_broxton(codec)) + if (!is_haswell(codec) && !is_broadwell(codec)) codec->core.link_power_control = 1; - if (hdmi_parse_codec(codec) < 0) { - if (spec->i915_bound) - snd_hdac_i915_exit(&codec->bus->core); - codec->spec = NULL; - kfree(spec); - return -EINVAL; + codec->patch_ops.set_power_state = haswell_set_power_state; + codec->dp_mst = true; + codec->depop_delay = 0; + codec->auto_runtime_pm = 1; + + spec->ops.setup_stream = i915_hsw_setup_stream; + spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup; + + err = hdmi_parse_codec(codec); + if (err < 0) { + generic_spec_free(codec); + return err; } - codec->patch_ops = generic_hdmi_patch_ops; - if (is_haswell_plus(codec)) { - codec->patch_ops.set_power_state = haswell_set_power_state; - codec->dp_mst = true; + + generic_hdmi_init_per_pins(codec); + register_i915_notifier(codec); + return 0; +} + +/* Intel Baytrail and Braswell; with eld notifier */ +static int patch_i915_byt_hdmi(struct hda_codec *codec) +{ + struct hdmi_spec *spec; + int err; + + /* requires i915 binding */ + if (!codec->bus->core.audio_component) { + codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n"); + return -ENODEV; } - /* Enable runtime pm for HDMI audio codec of HSW/BDW/SKL/BYT/BSW */ - if (is_haswell_plus(codec) || is_valleyview_plus(codec)) - codec->auto_runtime_pm = 1; + err = alloc_generic_hdmi(codec); + if (err < 0) + return err; + spec = codec->spec; - generic_hdmi_init_per_pins(codec); + /* For Valleyview/Cherryview, only the display codec is in the display + * power well and can use link_power ops to request/release the power. + */ + codec->core.link_power_control = 1; + codec->depop_delay = 0; + codec->auto_runtime_pm = 1; - if (codec_has_acomp(codec)) { - codec->depop_delay = 0; - spec->i915_audio_ops.audio_ptr = codec; - /* intel_audio_codec_enable() or intel_audio_codec_disable() - * will call pin_eld_notify with using audio_ptr pointer - * We need make sure audio_ptr is really setup - */ - wmb(); - spec->i915_audio_ops.pin_eld_notify = intel_pin_eld_notify; - snd_hdac_i915_register_notifier(&spec->i915_audio_ops); + spec->ops.pin_cvt_fixup = i915_pin_cvt_fixup; + + err = hdmi_parse_codec(codec); + if (err < 0) { + generic_spec_free(codec); + return err; } - WARN_ON(spec->dyn_pcm_assign && !codec_has_acomp(codec)); + generic_hdmi_init_per_pins(codec); + register_i915_notifier(codec); + return 0; +} + +/* Intel IronLake, SandyBridge and IvyBridge; with eld notifier */ +static int patch_i915_cpt_hdmi(struct hda_codec *codec) +{ + struct hdmi_spec *spec; + int err; + + /* no i915 component should have been bound before this */ + if (WARN_ON(codec->bus->core.audio_component)) + return -EBUSY; + + err = alloc_generic_hdmi(codec); + if (err < 0) + return err; + spec = codec->spec; + + /* Try to bind with i915 now */ + err = snd_hdac_i915_init(&codec->bus->core); + if (err < 0) + goto error; + spec->i915_bound = true; + + err = hdmi_parse_codec(codec); + if (err < 0) + goto error; + + generic_hdmi_init_per_pins(codec); + register_i915_notifier(codec); return 0; + + error: + generic_spec_free(codec); + return err; } /* @@ -3491,21 +3639,21 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862802, "Cantiga HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862803, "Eaglelake HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862804, "IbexPeak HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862805, "CougarPoint HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862806, "PantherPoint HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x80862804, "IbexPeak HDMI", patch_i915_cpt_hdmi), +HDA_CODEC_ENTRY(0x80862805, "CougarPoint HDMI", patch_i915_cpt_hdmi), +HDA_CODEC_ENTRY(0x80862806, "PantherPoint HDMI", patch_i915_cpt_hdmi), +HDA_CODEC_ENTRY(0x80862807, "Haswell HDMI", patch_i915_hsw_hdmi), +HDA_CODEC_ENTRY(0x80862808, "Broadwell HDMI", patch_i915_hsw_hdmi), +HDA_CODEC_ENTRY(0x80862809, "Skylake HDMI", patch_i915_hsw_hdmi), +HDA_CODEC_ENTRY(0x8086280a, "Broxton HDMI", patch_i915_hsw_hdmi), +HDA_CODEC_ENTRY(0x8086280b, "Kabylake HDMI", patch_i915_hsw_hdmi), HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_generic_hdmi), -HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), +HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), /* special ID for generic HDMI */ HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi), diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c index 8151318a69a2..9720a30dbfff 100644 --- a/sound/pci/intel8x0.c +++ b/sound/pci/intel8x0.c @@ -42,12 +42,6 @@ #include <asm/pgtable.h> #include <asm/cacheflush.h> -#ifdef CONFIG_KVM_GUEST -#include <linux/kvm_para.h> -#else -#define kvm_para_available() (0) -#endif - MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Intel 82801AA,82901AB,i810,i820,i830,i840,i845,MX440; SiS 7012; Ali 5455"); MODULE_LICENSE("GPL"); @@ -2972,25 +2966,17 @@ static int snd_intel8x0_inside_vm(struct pci_dev *pci) goto fini; } - /* detect KVM and Parallels virtual environments */ - result = kvm_para_available(); -#ifdef X86_FEATURE_HYPERVISOR - result = result || boot_cpu_has(X86_FEATURE_HYPERVISOR); -#endif - if (!result) - goto fini; - /* check for known (emulated) devices */ + result = 0; if (pci->subsystem_vendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET && pci->subsystem_device == PCI_SUBDEVICE_ID_QEMU) { /* KVM emulated sound, PCI SSID: 1af4:1100 */ msg = "enable KVM"; + result = 1; } else if (pci->subsystem_vendor == 0x1ab8) { /* Parallels VM emulated sound, PCI SSID: 1ab8:xxxx */ msg = "enable Parallels VM"; - } else { - msg = "disable (unknown or VT-d) VM"; - result = 0; + result = 1; } fini: diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig index d14bf411515b..a452ad7cec40 100644 --- a/sound/usb/Kconfig +++ b/sound/usb/Kconfig @@ -15,7 +15,6 @@ config SND_USB_AUDIO select SND_RAWMIDI select SND_PCM select BITREVERSE - select SND_USB_AUDIO_USE_MEDIA_CONTROLLER if MEDIA_CONTROLLER && (MEDIA_SUPPORT=y || MEDIA_SUPPORT=SND_USB_AUDIO) help Say Y here to include support for USB audio and USB MIDI devices. @@ -23,9 +22,6 @@ config SND_USB_AUDIO To compile this driver as a module, choose M here: the module will be called snd-usb-audio. -config SND_USB_AUDIO_USE_MEDIA_CONTROLLER - bool - config SND_USB_UA101 tristate "Edirol UA-101/UA-1000 driver" select SND_PCM diff --git a/sound/usb/Makefile b/sound/usb/Makefile index 8dca3c407f5a..2d2d122b069f 100644 --- a/sound/usb/Makefile +++ b/sound/usb/Makefile @@ -15,8 +15,6 @@ snd-usb-audio-objs := card.o \ quirks.o \ stream.o -snd-usb-audio-$(CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER) += media.o - snd-usbmidi-lib-objs := midi.o # Toplevel Module Dependency diff --git a/sound/usb/card.c b/sound/usb/card.c index 63244bbba8c7..3fc63583a537 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -66,7 +66,6 @@ #include "format.h" #include "power.h" #include "stream.h" -#include "media.h" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("USB Audio"); @@ -612,11 +611,6 @@ static int usb_audio_probe(struct usb_interface *intf, if (err < 0) goto __error; - if (quirk->media_device) { - /* don't want to fail when media_snd_device_create() fails */ - media_snd_device_create(chip, intf); - } - usb_chip[chip->index] = chip; chip->num_interfaces++; usb_set_intfdata(intf, chip); @@ -673,14 +667,6 @@ static void usb_audio_disconnect(struct usb_interface *intf) list_for_each(p, &chip->midi_list) { snd_usbmidi_disconnect(p); } - /* - * Nice to check quirk && quirk->media_device - * need some special handlings. Doesn't look like - * we have access to quirk here - * Acceses mixer_list - */ - media_snd_device_delete(chip); - /* release mixer resources */ list_for_each_entry(mixer, &chip->mixer_list, list) { snd_usb_mixer_disconnect(mixer); diff --git a/sound/usb/card.h b/sound/usb/card.h index 34a0898e2238..71778ca4b26a 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -105,8 +105,6 @@ struct snd_usb_endpoint { struct list_head list; }; -struct media_ctl; - struct snd_usb_substream { struct snd_usb_stream *stream; struct usb_device *dev; @@ -158,7 +156,6 @@ struct snd_usb_substream { } dsd_dop; bool trigger_tstamp_pending_update; /* trigger timestamp being updated from initial estimate */ - struct media_ctl *media_ctl; }; struct snd_usb_stream { diff --git a/sound/usb/media.c b/sound/usb/media.c deleted file mode 100644 index 93a50d01490c..000000000000 --- a/sound/usb/media.c +++ /dev/null @@ -1,318 +0,0 @@ -/* - * media.c - Media Controller specific ALSA driver code - * - * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com> - * Copyright (c) 2016 Samsung Electronics Co., Ltd. - * - * This file is released under the GPLv2. - */ - -/* - * This file adds Media Controller support to ALSA driver - * to use the Media Controller API to share tuner with DVB - * and V4L2 drivers that control media device. Media device - * is created based on existing quirks framework. Using this - * approach, the media controller API usage can be added for - * a specific device. -*/ - -#include <linux/init.h> -#include <linux/list.h> -#include <linux/mutex.h> -#include <linux/slab.h> -#include <linux/usb.h> - -#include <sound/pcm.h> -#include <sound/core.h> - -#include "usbaudio.h" -#include "card.h" -#include "mixer.h" -#include "media.h" - -static int media_snd_enable_source(struct media_ctl *mctl) -{ - if (mctl && mctl->media_dev->enable_source) - return mctl->media_dev->enable_source(&mctl->media_entity, - &mctl->media_pipe); - return 0; -} - -static void media_snd_disable_source(struct media_ctl *mctl) -{ - if (mctl && mctl->media_dev->disable_source) - mctl->media_dev->disable_source(&mctl->media_entity); -} - -int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm, - int stream) -{ - struct media_device *mdev; - struct media_ctl *mctl; - struct device *pcm_dev = &pcm->streams[stream].dev; - u32 intf_type; - int ret = 0; - u16 mixer_pad; - struct media_entity *entity; - - mdev = subs->stream->chip->media_dev; - if (!mdev) - return -ENODEV; - - if (subs->media_ctl) - return 0; - - /* allocate media_ctl */ - mctl = kzalloc(sizeof(*mctl), GFP_KERNEL); - if (!mctl) - return -ENOMEM; - - mctl->media_dev = mdev; - if (stream == SNDRV_PCM_STREAM_PLAYBACK) { - intf_type = MEDIA_INTF_T_ALSA_PCM_PLAYBACK; - mctl->media_entity.function = MEDIA_ENT_F_AUDIO_PLAYBACK; - mctl->media_pad.flags = MEDIA_PAD_FL_SOURCE; - mixer_pad = 1; - } else { - intf_type = MEDIA_INTF_T_ALSA_PCM_CAPTURE; - mctl->media_entity.function = MEDIA_ENT_F_AUDIO_CAPTURE; - mctl->media_pad.flags = MEDIA_PAD_FL_SINK; - mixer_pad = 2; - } - mctl->media_entity.name = pcm->name; - media_entity_pads_init(&mctl->media_entity, 1, &mctl->media_pad); - ret = media_device_register_entity(mctl->media_dev, - &mctl->media_entity); - if (ret) - goto free_mctl; - - mctl->intf_devnode = media_devnode_create(mdev, intf_type, 0, - MAJOR(pcm_dev->devt), - MINOR(pcm_dev->devt)); - if (!mctl->intf_devnode) { - ret = -ENOMEM; - goto unregister_entity; - } - mctl->intf_link = media_create_intf_link(&mctl->media_entity, - &mctl->intf_devnode->intf, - MEDIA_LNK_FL_ENABLED); - if (!mctl->intf_link) { - ret = -ENOMEM; - goto devnode_remove; - } - - /* create link between mixer and audio */ - media_device_for_each_entity(entity, mdev) { - switch (entity->function) { - case MEDIA_ENT_F_AUDIO_MIXER: - ret = media_create_pad_link(entity, mixer_pad, - &mctl->media_entity, 0, - MEDIA_LNK_FL_ENABLED); - if (ret) - goto remove_intf_link; - break; - } - } - - subs->media_ctl = mctl; - return 0; - -remove_intf_link: - media_remove_intf_link(mctl->intf_link); -devnode_remove: - media_devnode_remove(mctl->intf_devnode); -unregister_entity: - media_device_unregister_entity(&mctl->media_entity); -free_mctl: - kfree(mctl); - return ret; -} - -void media_snd_stream_delete(struct snd_usb_substream *subs) -{ - struct media_ctl *mctl = subs->media_ctl; - - if (mctl && mctl->media_dev) { - struct media_device *mdev; - - mdev = subs->stream->chip->media_dev; - if (mdev && media_devnode_is_registered(&mdev->devnode)) { - media_devnode_remove(mctl->intf_devnode); - media_device_unregister_entity(&mctl->media_entity); - media_entity_cleanup(&mctl->media_entity); - } - kfree(mctl); - subs->media_ctl = NULL; - } -} - -int media_snd_start_pipeline(struct snd_usb_substream *subs) -{ - struct media_ctl *mctl = subs->media_ctl; - - if (mctl) - return media_snd_enable_source(mctl); - return 0; -} - -void media_snd_stop_pipeline(struct snd_usb_substream *subs) -{ - struct media_ctl *mctl = subs->media_ctl; - - if (mctl) - media_snd_disable_source(mctl); -} - -int media_snd_mixer_init(struct snd_usb_audio *chip) -{ - struct device *ctl_dev = &chip->card->ctl_dev; - struct media_intf_devnode *ctl_intf; - struct usb_mixer_interface *mixer; - struct media_device *mdev = chip->media_dev; - struct media_mixer_ctl *mctl; - u32 intf_type = MEDIA_INTF_T_ALSA_CONTROL; - int ret; - - if (!mdev) - return -ENODEV; - - ctl_intf = chip->ctl_intf_media_devnode; - if (!ctl_intf) { - ctl_intf = media_devnode_create(mdev, intf_type, 0, - MAJOR(ctl_dev->devt), - MINOR(ctl_dev->devt)); - if (!ctl_intf) - return -ENOMEM; - chip->ctl_intf_media_devnode = ctl_intf; - } - - list_for_each_entry(mixer, &chip->mixer_list, list) { - - if (mixer->media_mixer_ctl) - continue; - - /* allocate media_mixer_ctl */ - mctl = kzalloc(sizeof(*mctl), GFP_KERNEL); - if (!mctl) - return -ENOMEM; - - mctl->media_dev = mdev; - mctl->media_entity.function = MEDIA_ENT_F_AUDIO_MIXER; - mctl->media_entity.name = chip->card->mixername; - mctl->media_pad[0].flags = MEDIA_PAD_FL_SINK; - mctl->media_pad[1].flags = MEDIA_PAD_FL_SOURCE; - mctl->media_pad[2].flags = MEDIA_PAD_FL_SOURCE; - media_entity_pads_init(&mctl->media_entity, MEDIA_MIXER_PAD_MAX, - mctl->media_pad); - ret = media_device_register_entity(mctl->media_dev, - &mctl->media_entity); - if (ret) { - kfree(mctl); - return ret; - } - - mctl->intf_link = media_create_intf_link(&mctl->media_entity, - &ctl_intf->intf, - MEDIA_LNK_FL_ENABLED); - if (!mctl->intf_link) { - media_device_unregister_entity(&mctl->media_entity); - media_entity_cleanup(&mctl->media_entity); - kfree(mctl); - return -ENOMEM; - } - mctl->intf_devnode = ctl_intf; - mixer->media_mixer_ctl = mctl; - } - return 0; -} - -static void media_snd_mixer_delete(struct snd_usb_audio *chip) -{ - struct usb_mixer_interface *mixer; - struct media_device *mdev = chip->media_dev; - - if (!mdev) - return; - - list_for_each_entry(mixer, &chip->mixer_list, list) { - struct media_mixer_ctl *mctl; - - mctl = mixer->media_mixer_ctl; - if (!mixer->media_mixer_ctl) - continue; - - if (media_devnode_is_registered(&mdev->devnode)) { - media_device_unregister_entity(&mctl->media_entity); - media_entity_cleanup(&mctl->media_entity); - } - kfree(mctl); - mixer->media_mixer_ctl = NULL; - } - if (media_devnode_is_registered(&mdev->devnode)) - media_devnode_remove(chip->ctl_intf_media_devnode); - chip->ctl_intf_media_devnode = NULL; -} - -int media_snd_device_create(struct snd_usb_audio *chip, - struct usb_interface *iface) -{ - struct media_device *mdev; - struct usb_device *usbdev = interface_to_usbdev(iface); - int ret; - - mdev = media_device_get_devres(&usbdev->dev); - if (!mdev) - return -ENOMEM; - if (!mdev->dev) { - /* register media device */ - mdev->dev = &usbdev->dev; - if (usbdev->product) - strlcpy(mdev->model, usbdev->product, - sizeof(mdev->model)); - if (usbdev->serial) - strlcpy(mdev->serial, usbdev->serial, - sizeof(mdev->serial)); - strcpy(mdev->bus_info, usbdev->devpath); - mdev->hw_revision = le16_to_cpu(usbdev->descriptor.bcdDevice); - media_device_init(mdev); - } - if (!media_devnode_is_registered(&mdev->devnode)) { - ret = media_device_register(mdev); - if (ret) { - dev_err(&usbdev->dev, - "Couldn't register media device. Error: %d\n", - ret); - return ret; - } - } - - /* save media device - avoid lookups */ - chip->media_dev = mdev; - - /* Create media entities for mixer and control dev */ - ret = media_snd_mixer_init(chip); - if (ret) { - dev_err(&usbdev->dev, - "Couldn't create media mixer entities. Error: %d\n", - ret); - - /* clear saved media_dev */ - chip->media_dev = NULL; - - return ret; - } - return 0; -} - -void media_snd_device_delete(struct snd_usb_audio *chip) -{ - struct media_device *mdev = chip->media_dev; - - media_snd_mixer_delete(chip); - - if (mdev) { - if (media_devnode_is_registered(&mdev->devnode)) - media_device_unregister(mdev); - chip->media_dev = NULL; - } -} diff --git a/sound/usb/media.h b/sound/usb/media.h deleted file mode 100644 index 1dcdcdc5f7aa..000000000000 --- a/sound/usb/media.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * media.h - Media Controller specific ALSA driver code - * - * Copyright (c) 2016 Shuah Khan <shuahkh@osg.samsung.com> - * Copyright (c) 2016 Samsung Electronics Co., Ltd. - * - * This file is released under the GPLv2. - */ - -/* - * This file adds Media Controller support to ALSA driver - * to use the Media Controller API to share tuner with DVB - * and V4L2 drivers that control media device. Media device - * is created based on existing quirks framework. Using this - * approach, the media controller API usage can be added for - * a specific device. -*/ -#ifndef __MEDIA_H - -#ifdef CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER - -#include <media/media-device.h> -#include <media/media-entity.h> -#include <sound/asound.h> - -struct media_ctl { - struct media_device *media_dev; - struct media_entity media_entity; - struct media_intf_devnode *intf_devnode; - struct media_link *intf_link; - struct media_pad media_pad; - struct media_pipeline media_pipe; -}; - -/* - * One source pad each for SNDRV_PCM_STREAM_CAPTURE and - * SNDRV_PCM_STREAM_PLAYBACK. One for sink pad to link - * to AUDIO Source -*/ -#define MEDIA_MIXER_PAD_MAX (SNDRV_PCM_STREAM_LAST + 2) - -struct media_mixer_ctl { - struct media_device *media_dev; - struct media_entity media_entity; - struct media_intf_devnode *intf_devnode; - struct media_link *intf_link; - struct media_pad media_pad[MEDIA_MIXER_PAD_MAX]; - struct media_pipeline media_pipe; -}; - -int media_snd_device_create(struct snd_usb_audio *chip, - struct usb_interface *iface); -void media_snd_device_delete(struct snd_usb_audio *chip); -int media_snd_stream_init(struct snd_usb_substream *subs, struct snd_pcm *pcm, - int stream); -void media_snd_stream_delete(struct snd_usb_substream *subs); -int media_snd_start_pipeline(struct snd_usb_substream *subs); -void media_snd_stop_pipeline(struct snd_usb_substream *subs); -#else -static inline int media_snd_device_create(struct snd_usb_audio *chip, - struct usb_interface *iface) - { return 0; } -static inline void media_snd_device_delete(struct snd_usb_audio *chip) { } -static inline int media_snd_stream_init(struct snd_usb_substream *subs, - struct snd_pcm *pcm, int stream) - { return 0; } -static inline void media_snd_stream_delete(struct snd_usb_substream *subs) { } -static inline int media_snd_start_pipeline(struct snd_usb_substream *subs) - { return 0; } -static inline void media_snd_stop_pipeline(struct snd_usb_substream *subs) { } -#endif -#endif /* __MEDIA_H */ diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 4f85757009b3..2f8c388ef84f 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -45,6 +45,7 @@ #include <linux/bitops.h> #include <linux/init.h> #include <linux/list.h> +#include <linux/log2.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/usb.h> @@ -1378,6 +1379,71 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc, snd_usb_mixer_add_control(&cval->head, kctl); } +static int parse_clock_source_unit(struct mixer_build *state, int unitid, + void *_ftr) +{ + struct uac_clock_source_descriptor *hdr = _ftr; + struct usb_mixer_elem_info *cval; + struct snd_kcontrol *kctl; + char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; + int ret; + + if (state->mixer->protocol != UAC_VERSION_2) + return -EINVAL; + + if (hdr->bLength != sizeof(*hdr)) { + usb_audio_dbg(state->chip, + "Bogus clock source descriptor length of %d, ignoring.\n", + hdr->bLength); + return 0; + } + + /* + * The only property of this unit we are interested in is the + * clock source validity. If that isn't readable, just bail out. + */ + if (!uac2_control_is_readable(hdr->bmControls, + ilog2(UAC2_CS_CONTROL_CLOCK_VALID))) + return 0; + + cval = kzalloc(sizeof(*cval), GFP_KERNEL); + if (!cval) + return -ENOMEM; + + snd_usb_mixer_elem_init_std(&cval->head, state->mixer, hdr->bClockID); + + cval->min = 0; + cval->max = 1; + cval->channels = 1; + cval->val_type = USB_MIXER_BOOLEAN; + cval->control = UAC2_CS_CONTROL_CLOCK_VALID; + + if (uac2_control_is_writeable(hdr->bmControls, + ilog2(UAC2_CS_CONTROL_CLOCK_VALID))) + kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval); + else { + cval->master_readonly = 1; + kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval); + } + + if (!kctl) { + kfree(cval); + return -ENOMEM; + } + + kctl->private_free = snd_usb_mixer_elem_free; + ret = snd_usb_copy_string_desc(state, hdr->iClockSource, + name, sizeof(name)); + if (ret > 0) + snprintf(kctl->id.name, sizeof(kctl->id.name), + "%s Validity", name); + else + snprintf(kctl->id.name, sizeof(kctl->id.name), + "Clock Source %d Validity", hdr->bClockID); + + return snd_usb_mixer_add_control(&cval->head, kctl); +} + /* * parse a feature unit * @@ -2126,10 +2192,11 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) switch (p1[2]) { case UAC_INPUT_TERMINAL: - case UAC2_CLOCK_SOURCE: return 0; /* NOP */ case UAC_MIXER_UNIT: return parse_audio_mixer_unit(state, unitid, p1); + case UAC2_CLOCK_SOURCE: + return parse_clock_source_unit(state, unitid, p1); case UAC_SELECTOR_UNIT: case UAC2_CLOCK_SELECTOR: return parse_audio_selector_unit(state, unitid, p1); @@ -2307,6 +2374,7 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, __u8 unitid = (index >> 8) & 0xff; __u8 control = (value >> 8) & 0xff; __u8 channel = value & 0xff; + unsigned int count = 0; if (channel >= MAX_CHANNELS) { usb_audio_dbg(mixer->chip, @@ -2315,6 +2383,12 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, return; } + for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) + count++; + + if (count == 0) + return; + for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) { struct usb_mixer_elem_info *info; @@ -2322,7 +2396,7 @@ static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer, continue; info = (struct usb_mixer_elem_info *)list; - if (info->control != control) + if (count > 1 && info->control != control) continue; switch (attribute) { diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index f3789446ab9c..3417ef347e40 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h @@ -3,8 +3,6 @@ #include <sound/info.h> -struct media_mixer_ctl; - struct usb_mixer_interface { struct snd_usb_audio *chip; struct usb_host_interface *hostif; @@ -24,7 +22,6 @@ struct usb_mixer_interface { struct urb *rc_urb; struct usb_ctrlrequest *rc_setup_packet; u8 rc_buffer[6]; - struct media_mixer_ctl *media_mixer_ctl; }; #define MAX_CHANNELS 16 /* max logical channels */ diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 0e4e0640c504..44d178ee9177 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -35,7 +35,6 @@ #include "pcm.h" #include "clock.h" #include "power.h" -#include "media.h" #define SUBSTREAM_FLAG_DATA_EP_STARTED 0 #define SUBSTREAM_FLAG_SYNC_EP_STARTED 1 @@ -718,14 +717,10 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, struct audioformat *fmt; int ret; - ret = media_snd_start_pipeline(subs); - if (ret) - return ret; - ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (ret < 0) - goto err_ret; + return ret; subs->pcm_format = params_format(hw_params); subs->period_bytes = params_period_bytes(hw_params); @@ -739,27 +734,22 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, dev_dbg(&subs->dev->dev, "cannot set format: format = %#x, rate = %d, channels = %d\n", subs->pcm_format, subs->cur_rate, subs->channels); - ret = -EINVAL; - goto err_ret; + return -EINVAL; } ret = snd_usb_lock_shutdown(subs->stream->chip); if (ret < 0) - goto err_ret; + return ret; ret = set_format(subs, fmt); snd_usb_unlock_shutdown(subs->stream->chip); if (ret < 0) - goto err_ret; + return ret; subs->interface = fmt->iface; subs->altset_idx = fmt->altset_idx; subs->need_setup_ep = true; return 0; - -err_ret: - media_snd_stop_pipeline(subs); - return ret; } /* @@ -771,7 +761,6 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) { struct snd_usb_substream *subs = substream->runtime->private_data; - media_snd_stop_pipeline(subs); subs->cur_audiofmt = NULL; subs->cur_rate = 0; subs->period_bytes = 0; @@ -1232,7 +1221,6 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction) struct snd_usb_stream *as = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_usb_substream *subs = &as->substream[direction]; - int ret; subs->interface = -1; subs->altset_idx = 0; @@ -1246,12 +1234,7 @@ static int snd_usb_pcm_open(struct snd_pcm_substream *substream, int direction) subs->dsd_dop.channel = 0; subs->dsd_dop.marker = 1; - ret = setup_hw_info(runtime, subs); - if (ret == 0) - ret = media_snd_stream_init(subs, as->pcm, direction); - if (ret) - snd_usb_autosuspend(subs->stream->chip); - return ret; + return setup_hw_info(runtime, subs); } static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) @@ -1260,7 +1243,6 @@ static int snd_usb_pcm_close(struct snd_pcm_substream *substream, int direction) struct snd_usb_substream *subs = &as->substream[direction]; stop_endpoints(subs, true); - media_snd_stop_pipeline(subs); if (subs->interface >= 0 && !snd_usb_lock_shutdown(subs->stream->chip)) { diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 9d087b19c70c..c60a776e815d 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -2886,7 +2886,6 @@ YAMAHA_DEVICE(0x7010, "UB99"), .product_name = pname, \ .ifnum = QUIRK_ANY_INTERFACE, \ .type = QUIRK_AUDIO_ALIGN_TRANSFER, \ - .media_device = 1, \ } \ } diff --git a/sound/usb/stream.c b/sound/usb/stream.c index 6fe7f210bd4e..8e9548bc1f1a 100644 --- a/sound/usb/stream.c +++ b/sound/usb/stream.c @@ -36,7 +36,6 @@ #include "format.h" #include "clock.h" #include "stream.h" -#include "media.h" /* * free a substream @@ -53,7 +52,6 @@ static void free_substream(struct snd_usb_substream *subs) kfree(fp); } kfree(subs->rate_list.list); - media_snd_stream_delete(subs); } diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index a161c7c1b126..b665d85555cb 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h @@ -30,9 +30,6 @@ * */ -struct media_device; -struct media_intf_devnode; - struct snd_usb_audio { int index; struct usb_device *dev; @@ -63,8 +60,6 @@ struct snd_usb_audio { bool autoclock; /* from the 'autoclock' module param */ struct usb_host_interface *ctrl_intf; /* the audio control interface */ - struct media_device *media_dev; - struct media_intf_devnode *ctl_intf_media_devnode; }; #define usb_audio_err(chip, fmt, args...) \ @@ -115,7 +110,6 @@ struct snd_usb_audio_quirk { const char *product_name; int16_t ifnum; uint16_t type; - bool media_device; const void *data; }; diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 20a257a12ea5..acbf7ff2ee6e 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -66,6 +66,8 @@ unsigned int do_slm_cstates; unsigned int use_c1_residency_msr; unsigned int has_aperf; unsigned int has_epb; +unsigned int do_irtl_snb; +unsigned int do_irtl_hsw; unsigned int units = 1000000; /* MHz etc */ unsigned int genuine_intel; unsigned int has_invariant_tsc; @@ -187,7 +189,7 @@ struct pkg_data { unsigned long long pkg_any_core_c0; unsigned long long pkg_any_gfxe_c0; unsigned long long pkg_both_core_gfxe_c0; - unsigned long long gfx_rc6_ms; + long long gfx_rc6_ms; unsigned int gfx_mhz; unsigned int package_id; unsigned int energy_pkg; /* MSR_PKG_ENERGY_STATUS */ @@ -621,8 +623,14 @@ int format_counters(struct thread_data *t, struct core_data *c, outp += sprintf(outp, "%8d", p->pkg_temp_c); /* GFXrc6 */ - if (do_gfx_rc6_ms) - outp += sprintf(outp, "%8.2f", 100.0 * p->gfx_rc6_ms / 1000.0 / interval_float); + if (do_gfx_rc6_ms) { + if (p->gfx_rc6_ms == -1) { /* detect counter reset */ + outp += sprintf(outp, " ***.**"); + } else { + outp += sprintf(outp, "%8.2f", + p->gfx_rc6_ms / 10.0 / interval_float); + } + } /* GFXMHz */ if (do_gfx_mhz) @@ -766,7 +774,12 @@ delta_package(struct pkg_data *new, struct pkg_data *old) old->pc10 = new->pc10 - old->pc10; old->pkg_temp_c = new->pkg_temp_c; - old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; + /* flag an error when rc6 counter resets/wraps */ + if (old->gfx_rc6_ms > new->gfx_rc6_ms) + old->gfx_rc6_ms = -1; + else + old->gfx_rc6_ms = new->gfx_rc6_ms - old->gfx_rc6_ms; + old->gfx_mhz = new->gfx_mhz; DELTA_WRAP32(new->energy_pkg, old->energy_pkg); @@ -1296,6 +1309,7 @@ int hsw_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL__3, PCL__6, PCL__7, PCL_7S, int slv_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCLRSV, PCLRSV, PCL__4, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int amt_pkg_cstate_limits[16] = {PCL__0, PCL__1, PCL__2, PCLRSV, PCLRSV, PCLRSV, PCL__6, PCL__7, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; int phi_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCL_6N, PCL_6R, PCLRSV, PCLRSV, PCLRSV, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; +int bxt_pkg_cstate_limits[16] = {PCL__0, PCL__2, PCLUNL, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV, PCLRSV}; static void @@ -1579,6 +1593,47 @@ dump_config_tdp(void) fprintf(outf, " lock=%d", (unsigned int)(msr >> 31) & 1); fprintf(outf, ")\n"); } + +unsigned int irtl_time_units[] = {1, 32, 1024, 32768, 1048576, 33554432, 0, 0 }; + +void print_irtl(void) +{ + unsigned long long msr; + + get_msr(base_cpu, MSR_PKGC3_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC3_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + + get_msr(base_cpu, MSR_PKGC6_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC6_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + + get_msr(base_cpu, MSR_PKGC7_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC7_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + + if (!do_irtl_hsw) + return; + + get_msr(base_cpu, MSR_PKGC8_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC8_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + + get_msr(base_cpu, MSR_PKGC9_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC9_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + + get_msr(base_cpu, MSR_PKGC10_IRTL, &msr); + fprintf(outf, "cpu%d: MSR_PKGC10_IRTL: 0x%08llx (", base_cpu, msr); + fprintf(outf, "%svalid, %lld ns)\n", msr & (1 << 15) ? "" : "NOT", + (msr & 0x3FF) * irtl_time_units[(msr >> 10) & 0x3]); + +} void free_fd_percpu(void) { int i; @@ -2144,6 +2199,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case 0x56: /* BDX-DE */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ + case 0x55: /* SKX */ pkg_cstate_limits = hsw_pkg_cstate_limits; break; case 0x37: /* BYT */ @@ -2156,6 +2214,9 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) case 0x57: /* PHI */ pkg_cstate_limits = phi_pkg_cstate_limits; break; + case 0x5C: /* BXT */ + pkg_cstate_limits = bxt_pkg_cstate_limits; + break; default: return 0; } @@ -2248,6 +2309,9 @@ int has_config_tdp(unsigned int family, unsigned int model) case 0x56: /* BDX-DE */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ + case 0x55: /* SKX */ case 0x57: /* Knights Landing */ return 1; @@ -2585,13 +2649,19 @@ void rapl_probe(unsigned int family, unsigned int model) case 0x47: /* BDW */ do_rapl = RAPL_PKG | RAPL_CORES | RAPL_CORE_POLICY | RAPL_GFX | RAPL_PKG_POWER_INFO; break; + case 0x5C: /* BXT */ + do_rapl = RAPL_PKG | RAPL_PKG_POWER_INFO; + break; case 0x4E: /* SKL */ case 0x5E: /* SKL */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; break; case 0x3F: /* HSX */ case 0x4F: /* BDX */ case 0x56: /* BDX-DE */ + case 0x55: /* SKX */ case 0x57: /* KNL */ do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO; break; @@ -2871,6 +2941,10 @@ int has_snb_msrs(unsigned int family, unsigned int model) case 0x56: /* BDX-DE */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ + case 0x55: /* SKX */ + case 0x5C: /* BXT */ return 1; } return 0; @@ -2879,9 +2953,14 @@ int has_snb_msrs(unsigned int family, unsigned int model) /* * HSW adds support for additional MSRs: * - * MSR_PKG_C8_RESIDENCY 0x00000630 - * MSR_PKG_C9_RESIDENCY 0x00000631 - * MSR_PKG_C10_RESIDENCY 0x00000632 + * MSR_PKG_C8_RESIDENCY 0x00000630 + * MSR_PKG_C9_RESIDENCY 0x00000631 + * MSR_PKG_C10_RESIDENCY 0x00000632 + * + * MSR_PKGC8_IRTL 0x00000633 + * MSR_PKGC9_IRTL 0x00000634 + * MSR_PKGC10_IRTL 0x00000635 + * */ int has_hsw_msrs(unsigned int family, unsigned int model) { @@ -2893,6 +2972,9 @@ int has_hsw_msrs(unsigned int family, unsigned int model) case 0x3D: /* BDW */ case 0x4E: /* SKL */ case 0x5E: /* SKL */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ + case 0x5C: /* BXT */ return 1; } return 0; @@ -2914,6 +2996,8 @@ int has_skl_msrs(unsigned int family, unsigned int model) switch (model) { case 0x4E: /* SKL */ case 0x5E: /* SKL */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ return 1; } return 0; @@ -3187,7 +3271,7 @@ void process_cpuid() if (debug) decode_misc_enable_msr(); - if (max_level >= 0x7) { + if (max_level >= 0x7 && debug) { int has_sgx; ecx = 0; @@ -3221,7 +3305,15 @@ void process_cpuid() switch(model) { case 0x4E: /* SKL */ case 0x5E: /* SKL */ - crystal_hz = 24000000; /* 24 MHz */ + case 0x8E: /* KBL */ + case 0x9E: /* KBL */ + crystal_hz = 24000000; /* 24.0 MHz */ + break; + case 0x55: /* SKX */ + crystal_hz = 25000000; /* 25.0 MHz */ + break; + case 0x5C: /* BXT */ + crystal_hz = 19200000; /* 19.2 MHz */ break; default: crystal_hz = 0; @@ -3254,11 +3346,13 @@ void process_cpuid() do_nhm_platform_info = do_nhm_cstates = do_smi = probe_nhm_msrs(family, model); do_snb_cstates = has_snb_msrs(family, model); + do_irtl_snb = has_snb_msrs(family, model); do_pc2 = do_snb_cstates && (pkg_cstate_limit >= PCL__2); do_pc3 = (pkg_cstate_limit >= PCL__3); do_pc6 = (pkg_cstate_limit >= PCL__6); do_pc7 = do_snb_cstates && (pkg_cstate_limit >= PCL__7); do_c8_c9_c10 = has_hsw_msrs(family, model); + do_irtl_hsw = has_hsw_msrs(family, model); do_skl_residency = has_skl_msrs(family, model); do_slm_cstates = is_slm(family, model); do_knl_cstates = is_knl(family, model); @@ -3564,6 +3658,9 @@ void turbostat_init() if (debug) for_all_cpus(print_thermal, ODD_COUNTERS); + + if (debug && do_irtl_snb) + print_irtl(); } int fork_it(char **argv) @@ -3629,7 +3726,7 @@ int get_and_dump_counters(void) } void print_version() { - fprintf(outf, "turbostat version 4.11 27 Feb 2016" + fprintf(outf, "turbostat version 4.12 5 Apr 2016" " - Len Brown <lenb@kernel.org>\n"); } diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index b9453b838162..150829dd7998 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -1497,15 +1497,15 @@ TEST_F(TRACE_syscall, syscall_dropped) #define SECCOMP_SET_MODE_FILTER 1 #endif -#ifndef SECCOMP_FLAG_FILTER_TSYNC -#define SECCOMP_FLAG_FILTER_TSYNC 1 +#ifndef SECCOMP_FILTER_FLAG_TSYNC +#define SECCOMP_FILTER_FLAG_TSYNC 1 #endif #ifndef seccomp -int seccomp(unsigned int op, unsigned int flags, struct sock_fprog *filter) +int seccomp(unsigned int op, unsigned int flags, void *args) { errno = 0; - return syscall(__NR_seccomp, op, flags, filter); + return syscall(__NR_seccomp, op, flags, args); } #endif @@ -1613,7 +1613,7 @@ TEST(TSYNC_first) TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); @@ -1831,7 +1831,7 @@ TEST_F(TSYNC, two_siblings_with_ancestor) self->sibling_count++; } - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret) { TH_LOG("Could install filter on all threads!"); @@ -1892,7 +1892,7 @@ TEST_F(TSYNC, two_siblings_with_no_filter) TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); @@ -1940,7 +1940,7 @@ TEST_F(TSYNC, two_siblings_with_one_divergence) self->sibling_count++; } - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(self->sibling[0].system_tid, ret) { TH_LOG("Did not fail on diverged sibling."); @@ -1992,7 +1992,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(ret, self->sibling[0].system_tid) { TH_LOG("Did not fail on diverged sibling."); @@ -2021,7 +2021,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) /* Switch to the remaining sibling */ sib = !sib; - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret) { TH_LOG("Expected the remaining sibling to sync"); @@ -2044,7 +2044,7 @@ TEST_F(TSYNC, two_siblings_not_under_filter) while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1); - ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FLAG_FILTER_TSYNC, + ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret); /* just us chickens */ } |