diff options
459 files changed, 3848 insertions, 2270 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-ioatdma b/Documentation/ABI/stable/sysfs-driver-dma-ioatdma index 420c1d09e42f..3a4e2cd0ddcc 100644 --- a/Documentation/ABI/stable/sysfs-driver-dma-ioatdma +++ b/Documentation/ABI/stable/sysfs-driver-dma-ioatdma @@ -1,29 +1,29 @@ -What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap +What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/cap Date: December 3, 2009 KernelVersion: 2.6.32 Contact: dmaengine@vger.kernel.org Description: Capabilities the DMA supports.Currently there are DMA_PQ, DMA_PQ_VAL, DMA_XOR,DMA_XOR_VAL,DMA_INTERRUPT. -What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active +What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_active Date: December 3, 2009 KernelVersion: 2.6.32 Contact: dmaengine@vger.kernel.org Description: The number of descriptors active in the ring. -What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size +What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/ring_size Date: December 3, 2009 KernelVersion: 2.6.32 Contact: dmaengine@vger.kernel.org Description: Descriptor ring size, total number of descriptors available. -What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version +What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/version Date: December 3, 2009 KernelVersion: 2.6.32 Contact: dmaengine@vger.kernel.org Description: Version of ioatdma device. -What: sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce +What: /sys/devices/pciXXXX:XX/0000:XX:XX.X/dma/dma<n>chan<n>/quickdata/intr_coalesce Date: August 8, 2017 KernelVersion: 4.14 Contact: dmaengine@vger.kernel.org diff --git a/Documentation/ABI/testing/sysfs-class-net b/Documentation/ABI/testing/sysfs-class-net index 7670012ae9b6..1f2002df5ba2 100644 --- a/Documentation/ABI/testing/sysfs-class-net +++ b/Documentation/ABI/testing/sysfs-class-net @@ -152,7 +152,7 @@ Description: When an interface is under test, it cannot be expected to pass packets as normal. -What: /sys/clas/net/<iface>/duplex +What: /sys/class/net/<iface>/duplex Date: October 2009 KernelVersion: 2.6.33 Contact: netdev@vger.kernel.org diff --git a/Documentation/Makefile b/Documentation/Makefile index 6a59a13d3c53..61a7310b49e0 100644 --- a/Documentation/Makefile +++ b/Documentation/Makefile @@ -26,6 +26,10 @@ BUILDDIR = $(obj)/output PDFLATEX = xelatex LATEXOPTS = -interaction=batchmode +ifeq ($(KBUILD_VERBOSE),0) +SPHINXOPTS += "-q" +endif + # User-friendly check for sphinx-build HAVE_SPHINX := $(shell if which $(SPHINXBUILD) >/dev/null 2>&1; then echo 1; else echo 0; fi) diff --git a/Documentation/admin-guide/LSM/SafeSetID.rst b/Documentation/admin-guide/LSM/SafeSetID.rst index 17996c9070e2..0ec34863c674 100644 --- a/Documentation/admin-guide/LSM/SafeSetID.rst +++ b/Documentation/admin-guide/LSM/SafeSetID.rst @@ -107,7 +107,7 @@ for a UID/GID will prevent that UID/GID from obtaining auxiliary setid privileges, such as allowing a user to set up user namespace UID/GID mappings. Note on GID policies and setgroups() -================== +==================================== In v5.9 we are adding support for limiting CAP_SETGID privileges as was done previously for CAP_SETUID. However, for compatibility with common sandboxing related code conventions in userspace, we currently allow arbitrary diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst index 37940a0584ec..10fde58d0869 100644 --- a/Documentation/admin-guide/pm/cpuidle.rst +++ b/Documentation/admin-guide/pm/cpuidle.rst @@ -478,7 +478,7 @@ order to ask the hardware to enter that state. Also, for each statistics of the given idle state. That information is exposed by the kernel via ``sysfs``. -For each CPU in the system, there is a :file:`/sys/devices/system/cpu<N>/cpuidle/` +For each CPU in the system, there is a :file:`/sys/devices/system/cpu/cpu<N>/cpuidle/` directory in ``sysfs``, where the number ``<N>`` is assigned to the given CPU at the initialization time. That directory contains a set of subdirectories called :file:`state0`, :file:`state1` and so on, up to the number of idle state @@ -494,7 +494,7 @@ object corresponding to it, as follows: residency. ``below`` - Total number of times this idle state had been asked for, but cerainly + Total number of times this idle state had been asked for, but certainly a deeper idle state would have been a better match for the observed idle duration. diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst index 57fd6ce68fe0..f2ab8a5b6a4b 100644 --- a/Documentation/admin-guide/sysctl/net.rst +++ b/Documentation/admin-guide/sysctl/net.rst @@ -300,6 +300,7 @@ Note: 0: 0 1 2 3 4 5 6 7 RSS hash key: 84:50:f4:00:a8:15:d1:a7:e9:7f:1d:60:35:c7:47:25:42:97:74:ca:56:bb:b6:a1:d8:43:e3:c9:0c:fd:17:55:c2:3a:4d:69:ed:f1:42:89 + netdev_tstamp_prequeue ---------------------- diff --git a/Documentation/arm/sunxi.rst b/Documentation/arm/sunxi.rst index 62b533d0ba94..0c536ae1d7c2 100644 --- a/Documentation/arm/sunxi.rst +++ b/Documentation/arm/sunxi.rst @@ -148,3 +148,13 @@ SunXi family * User Manual http://dl.linux-sunxi.org/A64/Allwinner%20A64%20User%20Manual%20v1.0.pdf + + - Allwinner H6 + + * Datasheet + + https://linux-sunxi.org/images/5/5c/Allwinner_H6_V200_Datasheet_V1.1.pdf + + * User Manual + + https://linux-sunxi.org/images/4/46/Allwinner_H6_V200_User_Manual_V1.1.pdf diff --git a/Documentation/conf.py b/Documentation/conf.py index 1e44983853a2..ed2b43ec7754 100644 --- a/Documentation/conf.py +++ b/Documentation/conf.py @@ -51,7 +51,7 @@ if major >= 3: support for Sphinx v3.0 and above is brand new. Be prepared for possible issues in the generated output. ''') - if minor > 0 or patch >= 2: + if (major > 3) or (minor > 0 or patch >= 2): # Sphinx c function parser is more pedantic with regards to type # checking. Due to that, having macros at c:function cause problems. # Those needed to be scaped by using c_id_attributes[] array diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst index c09c9ca2ff1c..2b68addaadcd 100644 --- a/Documentation/dev-tools/kasan.rst +++ b/Documentation/dev-tools/kasan.rst @@ -295,11 +295,13 @@ print the number of the test and the status of the test: pass:: ok 28 - kmalloc_double_kzfree + or, if kmalloc failed:: # kmalloc_large_oob_right: ASSERTION FAILED at lib/test_kasan.c:163 Expected ptr is not null, but is not ok 4 - kmalloc_large_oob_right + or, if a KASAN report was expected, but not found:: # kmalloc_double_kzfree: EXPECTATION FAILED at lib/test_kasan.c:629 diff --git a/Documentation/dev-tools/kunit/start.rst b/Documentation/dev-tools/kunit/start.rst index d23385e3e159..454f307813ea 100644 --- a/Documentation/dev-tools/kunit/start.rst +++ b/Documentation/dev-tools/kunit/start.rst @@ -197,7 +197,7 @@ Now add the following to ``drivers/misc/Kconfig``: config MISC_EXAMPLE_TEST bool "Test for my example" - depends on MISC_EXAMPLE && KUNIT + depends on MISC_EXAMPLE && KUNIT=y and the following to ``drivers/misc/Makefile``: diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst index 961d3ea3ca19..62142a47488c 100644 --- a/Documentation/dev-tools/kunit/usage.rst +++ b/Documentation/dev-tools/kunit/usage.rst @@ -561,6 +561,11 @@ Once the kernel is built and installed, a simple ...will run the tests. +.. note:: + Note that you should make sure your test depends on ``KUNIT=y`` in Kconfig + if the test does not support module build. Otherwise, it will trigger + compile errors if ``CONFIG_KUNIT`` is ``m``. + Writing new tests for other architectures ----------------------------------------- diff --git a/Documentation/devicetree/bindings/clock/hi6220-clock.txt b/Documentation/devicetree/bindings/clock/hi6220-clock.txt index ef3deb7b86ea..17ac4a3dd26a 100644 --- a/Documentation/devicetree/bindings/clock/hi6220-clock.txt +++ b/Documentation/devicetree/bindings/clock/hi6220-clock.txt @@ -4,7 +4,7 @@ Clock control registers reside in different Hi6220 system controllers, please refer the following document to know more about the binding rules for these system controllers: -Documentation/devicetree/bindings/arm/hisilicon/hisilicon.txt +Documentation/devicetree/bindings/arm/hisilicon/hisilicon.yaml Required Properties: diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml index f6c3fcc4bdfd..b5af12011499 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-inta.yaml @@ -32,6 +32,11 @@ description: | | | vint | bit | | 0 |.....|63| vintx | | +--------------+ +------------+ | | | + | Unmap | + | +--------------+ | + Unmapped events ---->| | umapidx |-------------------------> Globalevents + | +--------------+ | + | | +-----------------------------------------+ Configuration of these Intmap registers that maps global events to vint is @@ -70,6 +75,11 @@ properties: - description: | "limit" specifies the limit for translation + ti,unmapped-event-sources: + $ref: /schemas/types.yaml#definitions/phandle-array + description: + Array of phandles to DMA controllers where the unmapped events originate. + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/can/can-controller.yaml b/Documentation/devicetree/bindings/net/can/can-controller.yaml new file mode 100644 index 000000000000..9cf2ae097156 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/can-controller.yaml @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/can/can-controller.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: CAN Controller Generic Binding + +maintainers: + - Marc Kleine-Budde <mkl@pengutronix.de> + +properties: + $nodename: + pattern: "^can(@.*)?$" + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml new file mode 100644 index 000000000000..43df15ba8fa4 --- /dev/null +++ b/Documentation/devicetree/bindings/net/can/fsl,flexcan.yaml @@ -0,0 +1,135 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/can/fsl,flexcan.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: + Flexcan CAN controller on Freescale's ARM and PowerPC system-on-a-chip (SOC). + +maintainers: + - Marc Kleine-Budde <mkl@pengutronix.de> + +allOf: + - $ref: can-controller.yaml# + +properties: + compatible: + oneOf: + - enum: + - fsl,imx8qm-flexcan + - fsl,imx8mp-flexcan + - fsl,imx6q-flexcan + - fsl,imx53-flexcan + - fsl,imx35-flexcan + - fsl,imx28-flexcan + - fsl,imx25-flexcan + - fsl,p1010-flexcan + - fsl,vf610-flexcan + - fsl,ls1021ar2-flexcan + - fsl,lx2160ar1-flexcan + - items: + - enum: + - fsl,imx7d-flexcan + - fsl,imx6ul-flexcan + - fsl,imx6sx-flexcan + - const: fsl,imx6q-flexcan + - items: + - enum: + - fsl,ls1028ar1-flexcan + - const: fsl,lx2160ar1-flexcan + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 2 + + clock-names: + items: + - const: ipg + - const: per + + clock-frequency: + description: | + The oscillator frequency driving the flexcan device, filled in by the + boot loader. This property should only be used the used operating system + doesn't support the clocks and clock-names property. + + xceiver-supply: + description: Regulator that powers the CAN transceiver. + + big-endian: + $ref: /schemas/types.yaml#/definitions/flag + description: | + This means the registers of FlexCAN controller are big endian. This is + optional property.i.e. if this property is not present in device tree + node then controller is assumed to be little endian. If this property is + present then controller is assumed to be big endian. + + fsl,stop-mode: + description: | + Register bits of stop mode control. + + The format should be as follows: + <gpr req_gpr req_bit> + gpr is the phandle to general purpose register node. + req_gpr is the gpr register offset of CAN stop request. + req_bit is the bit offset of CAN stop request. + $ref: /schemas/types.yaml#/definitions/phandle-array + items: + - description: The 'gpr' is the phandle to general purpose register node. + - description: The 'req_gpr' is the gpr register offset of CAN stop request. + maximum: 0xff + - description: The 'req_bit' is the bit offset of CAN stop request. + maximum: 0x1f + + fsl,clk-source: + description: | + Select the clock source to the CAN Protocol Engine (PE). It's SoC + implementation dependent. Refer to RM for detailed definition. If this + property is not set in device tree node then driver selects clock source 1 + by default. + 0: clock source 0 (oscillator clock) + 1: clock source 1 (peripheral clock) + $ref: /schemas/types.yaml#/definitions/uint32 + default: 1 + minimum: 0 + maximum: 1 + + wakeup-source: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enable CAN remote wakeup. + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + can@1c000 { + compatible = "fsl,p1010-flexcan"; + reg = <0x1c000 0x1000>; + interrupts = <48 0x2>; + interrupt-parent = <&mpic>; + clock-frequency = <200000000>; + fsl,clk-source = <0>; + }; + - | + #include <dt-bindings/interrupt-controller/irq.h> + + can@2090000 { + compatible = "fsl,imx6q-flexcan"; + reg = <0x02090000 0x4000>; + interrupts = <0 110 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&clks 1>, <&clks 2>; + clock-names = "ipg", "per"; + fsl,stop-mode = <&gpr 0x34 28>; + }; diff --git a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt b/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt deleted file mode 100644 index e10b6eb955e1..000000000000 --- a/Documentation/devicetree/bindings/net/can/fsl-flexcan.txt +++ /dev/null @@ -1,57 +0,0 @@ -Flexcan CAN controller on Freescale's ARM and PowerPC system-on-a-chip (SOC). - -Required properties: - -- compatible : Should be "fsl,<processor>-flexcan" - - where <processor> is imx8qm, imx6q, imx28, imx53, imx35, imx25, p1010, - vf610, ls1021ar2, lx2160ar1, ls1028ar1. - - The ls1028ar1 must be followed by lx2160ar1, e.g. - - "fsl,ls1028ar1-flexcan", "fsl,lx2160ar1-flexcan" - - An implementation should also claim any of the following compatibles - that it is fully backwards compatible with: - - - fsl,p1010-flexcan - -- reg : Offset and length of the register set for this device -- interrupts : Interrupt tuple for this device - -Optional properties: - -- clock-frequency : The oscillator frequency driving the flexcan device - -- xceiver-supply: Regulator that powers the CAN transceiver - -- big-endian: This means the registers of FlexCAN controller are big endian. - This is optional property.i.e. if this property is not present in - device tree node then controller is assumed to be little endian. - if this property is present then controller is assumed to be big - endian. - -- fsl,stop-mode: register bits of stop mode control, the format is - <&gpr req_gpr req_bit>. - gpr is the phandle to general purpose register node. - req_gpr is the gpr register offset of CAN stop request. - req_bit is the bit offset of CAN stop request. - -- fsl,clk-source: Select the clock source to the CAN Protocol Engine (PE). - It's SoC Implementation dependent. Refer to RM for detailed - definition. If this property is not set in device tree node - then driver selects clock source 1 by default. - 0: clock source 0 (oscillator clock) - 1: clock source 1 (peripheral clock) - -- wakeup-source: enable CAN remote wakeup - -Example: - - can@1c000 { - compatible = "fsl,p1010-flexcan"; - reg = <0x1c000 0x1000>; - interrupts = <48 0x2>; - interrupt-parent = <&mpic>; - clock-frequency = <200000000>; // filled in by bootloader - fsl,clk-source = <0>; // select clock source 0 for PE - }; diff --git a/Documentation/filesystems/api-summary.rst b/Documentation/filesystems/api-summary.rst index bbb0c1c0e5cf..a94f17d9b836 100644 --- a/Documentation/filesystems/api-summary.rst +++ b/Documentation/filesystems/api-summary.rst @@ -86,9 +86,6 @@ Other Functions .. kernel-doc:: fs/dax.c :export: -.. kernel-doc:: fs/direct-io.c - :export: - .. kernel-doc:: fs/libfs.c :export: diff --git a/Documentation/gpu/amdgpu.rst b/Documentation/gpu/amdgpu.rst index 1f9ea8221f80..2062a6023678 100644 --- a/Documentation/gpu/amdgpu.rst +++ b/Documentation/gpu/amdgpu.rst @@ -83,10 +83,6 @@ AMDGPU XGMI Support =================== .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c - :doc: AMDGPU XGMI Support - -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c - :internal: AMDGPU RAS Support ================== @@ -124,9 +120,6 @@ RAS VRAM Bad Pages sysfs Interface .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c :doc: AMDGPU RAS sysfs gpu_vram_bad_pages Interface -.. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c - :internal: - Sample Code ----------- Sample code for testing error injection can be found here: diff --git a/Documentation/hwmon/adm1266.rst b/Documentation/hwmon/adm1266.rst index 9257f8a48650..2b877011cfdf 100644 --- a/Documentation/hwmon/adm1266.rst +++ b/Documentation/hwmon/adm1266.rst @@ -20,7 +20,7 @@ ADM1266 is a sequencer that features voltage readback from 17 channels via an integrated 12 bit SAR ADC, accessed using a PMBus interface. The driver is a client driver to the core PMBus driver. Please see -Documentation/hwmon/pmbus for details on PMBus client drivers. +Documentation/hwmon/pmbus.rst for details on PMBus client drivers. Sysfs entries diff --git a/Documentation/hwmon/index.rst b/Documentation/hwmon/index.rst index e6b91ab12978..b797db738225 100644 --- a/Documentation/hwmon/index.rst +++ b/Documentation/hwmon/index.rst @@ -132,6 +132,7 @@ Hardware Monitoring Kernel Drivers mcp3021 menf21bmc mlxreg-fan + mp2975 nct6683 nct6775 nct7802 diff --git a/Documentation/hwmon/mp2975.rst b/Documentation/hwmon/mp2975.rst index 5b0609c62f48..81d816b71490 100644 --- a/Documentation/hwmon/mp2975.rst +++ b/Documentation/hwmon/mp2975.rst @@ -20,6 +20,7 @@ This driver implements support for Monolithic Power Systems, Inc. (MPS) vendor dual-loop, digital, multi-phase controller MP2975. This device: + - Supports up to two power rail. - Provides 8 pulse-width modulations (PWMs), and can be configured up to 8-phase operation for rail 1 and up to 4-phase operation for rail @@ -32,10 +33,12 @@ This device: 10-mV DAC, IMVP9 mode with 5-mV DAC. Device supports: + - SVID interface. - AVSBus interface. Device complaint with: + - PMBus rev 1.3 interface. Device supports direct format for reading output current, output voltage, @@ -45,11 +48,14 @@ Device supports VID and direct formats for reading output voltage. The below VID modes are supported: VR12, VR13, IMVP9. The driver provides the next attributes for the current: + - for current in: input, maximum alarm; - for current out input, maximum alarm and highest values; - for phase current: input and label. -attributes. + attributes. + The driver exports the following attributes via the 'sysfs' files, where + - 'n' is number of telemetry pages (from 1 to 2); - 'k' is number of configured phases (from 1 to 8); - indexes 1, 1*n for "iin"; @@ -65,11 +71,14 @@ The driver exports the following attributes via the 'sysfs' files, where **curr[1-{2n+k}]_label** The driver provides the next attributes for the voltage: + - for voltage in: input, high critical threshold, high critical alarm, all only from page 0; - for voltage out: input, low and high critical thresholds, low and high critical alarms, from pages 0 and 1; + The driver exports the following attributes via the 'sysfs' files, where + - 'n' is number of telemetry pages (from 1 to 2); - indexes 1 for "iin"; - indexes n+1, n+2 for "vout"; @@ -87,9 +96,12 @@ The driver exports the following attributes via the 'sysfs' files, where **in[2-{n+1}1_lcrit_alarm** The driver provides the next attributes for the power: + - for power in alarm and input. - for power out: highest and input. + The driver exports the following attributes via the 'sysfs' files, where + - 'n' is number of telemetry pages (from 1 to 2); - indexes 1 for "pin"; - indexes n+1, n+2 for "pout"; diff --git a/Documentation/leds/index.rst b/Documentation/leds/index.rst index 53e6090454af..e5d63b940045 100644 --- a/Documentation/leds/index.rst +++ b/Documentation/leds/index.rst @@ -25,3 +25,4 @@ LEDs leds-lp5562 leds-lp55xx leds-mlxcpld + leds-sc27xx diff --git a/Documentation/locking/lockdep-design.rst b/Documentation/locking/lockdep-design.rst index cec03bd1294a..9f3cfca9f8a4 100644 --- a/Documentation/locking/lockdep-design.rst +++ b/Documentation/locking/lockdep-design.rst @@ -42,6 +42,7 @@ The validator tracks lock-class usage history and divides the usage into (4 usages * n STATEs + 1) categories: where the 4 usages can be: + - 'ever held in STATE context' - 'ever held as readlock in STATE context' - 'ever held with STATE enabled' @@ -49,10 +50,12 @@ where the 4 usages can be: where the n STATEs are coded in kernel/locking/lockdep_states.h and as of now they include: + - hardirq - softirq where the last 1 category is: + - 'ever used' [ == !unused ] When locking rules are violated, these usage bits are presented in the @@ -96,9 +99,9 @@ exact case is for the lock as of the reporting time. +--------------+-------------+--------------+ | | irq enabled | irq disabled | +--------------+-------------+--------------+ - | ever in irq | ? | - | + | ever in irq | '?' | '-' | +--------------+-------------+--------------+ - | never in irq | + | . | + | never in irq | '+' | '.' | +--------------+-------------+--------------+ The character '-' suggests irq is disabled because if otherwise the @@ -216,7 +219,7 @@ looks like this:: BD_MUTEX_PARTITION }; -mutex_lock_nested(&bdev->bd_contains->bd_mutex, BD_MUTEX_PARTITION); + mutex_lock_nested(&bdev->bd_contains->bd_mutex, BD_MUTEX_PARTITION); In this case the locking is done on a bdev object that is known to be a partition. @@ -334,7 +337,7 @@ Troubleshooting: ---------------- The validator tracks a maximum of MAX_LOCKDEP_KEYS number of lock classes. -Exceeding this number will trigger the following lockdep warning: +Exceeding this number will trigger the following lockdep warning:: (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) @@ -420,7 +423,8 @@ the critical section of another reader of the same lock instance. The difference between recursive readers and non-recursive readers is because: recursive readers get blocked only by a write lock *holder*, while non-recursive -readers could get blocked by a write lock *waiter*. Considering the follow example: +readers could get blocked by a write lock *waiter*. Considering the follow +example:: TASK A: TASK B: @@ -448,20 +452,22 @@ There are simply four block conditions: Block condition matrix, Y means the row blocks the column, and N means otherwise. - | E | r | R | +---+---+---+---+ - E | Y | Y | Y | + | | E | r | R | + +---+---+---+---+ + | E | Y | Y | Y | + +---+---+---+---+ + | r | Y | Y | N | +---+---+---+---+ - r | Y | Y | N | + | R | Y | Y | N | +---+---+---+---+ - R | Y | Y | N | (W: writers, r: non-recursive readers, R: recursive readers) acquired recursively. Unlike non-recursive read locks, recursive read locks only get blocked by current write lock *holders* other than write lock -*waiters*, for example: +*waiters*, for example:: TASK A: TASK B: @@ -491,7 +497,7 @@ Recursive locks don't block each other, while non-recursive locks do (this is even true for two non-recursive read locks). A non-recursive lock can block the corresponding recursive lock, and vice versa. -A deadlock case with recursive locks involved is as follow: +A deadlock case with recursive locks involved is as follow:: TASK A: TASK B: @@ -510,7 +516,7 @@ because there are 3 types for lockers, there are, in theory, 9 types of lock dependencies, but we can show that 4 types of lock dependencies are enough for deadlock detection. -For each lock dependency: +For each lock dependency:: L1 -> L2 @@ -525,20 +531,25 @@ same types). With the above combination for simplification, there are 4 types of dependency edges in the lockdep graph: -1) -(ER)->: exclusive writer to recursive reader dependency, "X -(ER)-> Y" means +1) -(ER)->: + exclusive writer to recursive reader dependency, "X -(ER)-> Y" means X -> Y and X is a writer and Y is a recursive reader. -2) -(EN)->: exclusive writer to non-recursive locker dependency, "X -(EN)-> Y" means +2) -(EN)->: + exclusive writer to non-recursive locker dependency, "X -(EN)-> Y" means X -> Y and X is a writer and Y is either a writer or non-recursive reader. -3) -(SR)->: shared reader to recursive reader dependency, "X -(SR)-> Y" means +3) -(SR)->: + shared reader to recursive reader dependency, "X -(SR)-> Y" means X -> Y and X is a reader (recursive or not) and Y is a recursive reader. -4) -(SN)->: shared reader to non-recursive locker dependency, "X -(SN)-> Y" means +4) -(SN)->: + shared reader to non-recursive locker dependency, "X -(SN)-> Y" means X -> Y and X is a reader (recursive or not) and Y is either a writer or non-recursive reader. -Note that given two locks, they may have multiple dependencies between them, for example: +Note that given two locks, they may have multiple dependencies between them, +for example:: TASK A: @@ -592,11 +603,11 @@ circles that won't cause deadlocks. Proof for sufficiency (Lemma 1): -Let's say we have a strong circle: +Let's say we have a strong circle:: L1 -> L2 ... -> Ln -> L1 -, which means we have dependencies: +, which means we have dependencies:: L1 -> L2 L2 -> L3 @@ -633,7 +644,7 @@ a lock held by P2, and P2 is waiting for a lock held by P3, ... and Pn is waitin for a lock held by P1. Let's name the lock Px is waiting as Lx, so since P1 is waiting for L1 and holding Ln, so we will have Ln -> L1 in the dependency graph. Similarly, we have L1 -> L2, L2 -> L3, ..., Ln-1 -> Ln in the dependency graph, which means we -have a circle: +have a circle:: Ln -> L1 -> L2 -> ... -> Ln diff --git a/Documentation/misc-devices/index.rst b/Documentation/misc-devices/index.rst index 46072ce3d7ef..64420b3314fe 100644 --- a/Documentation/misc-devices/index.rst +++ b/Documentation/misc-devices/index.rst @@ -24,7 +24,6 @@ fit into other categories. isl29003 lis3lv02d max6875 - mic/index pci-endpoint-test spear-pcie-gadget uacce diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst index b165181d5d4d..a432dc419fa4 100644 --- a/Documentation/networking/devlink/ice.rst +++ b/Documentation/networking/devlink/ice.rst @@ -70,6 +70,7 @@ The ``ice`` driver reports the following versions that both the name (as reported by ``fw.app.name``) and version are required to uniquely identify the package. * - ``fw.app.bundle_id`` + - running - 0xc0000001 - Unique identifier for the DDP package loaded in the device. Also referred to as the DDP Track ID. Can be used to uniquely identify diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst index f5be243d250a..0a4b73b03b99 100644 --- a/Documentation/networking/j1939.rst +++ b/Documentation/networking/j1939.rst @@ -10,9 +10,9 @@ Overview / What Is J1939 SAE J1939 defines a higher layer protocol on CAN. It implements a more sophisticated addressing scheme and extends the maximum packet size above 8 bytes. Several derived specifications exist, which differ from the original -J1939 on the application level, like MilCAN A, NMEA2000 and especially +J1939 on the application level, like MilCAN A, NMEA2000, and especially ISO-11783 (ISOBUS). This last one specifies the so-called ETP (Extended -Transport Protocol) which is has been included in this implementation. This +Transport Protocol), which has been included in this implementation. This results in a maximum packet size of ((2 ^ 24) - 1) * 7 bytes == 111 MiB. Specifications used @@ -32,15 +32,15 @@ sockets, we found some reasons to justify a kernel implementation for the addressing and transport methods used by J1939. * **Addressing:** when a process on an ECU communicates via J1939, it should - not necessarily know its source address. Although at least one process per + not necessarily know its source address. Although, at least one process per ECU should know the source address. Other processes should be able to reuse that address. This way, address parameters for different processes cooperating for the same ECU, are not duplicated. This way of working is - closely related to the UNIX concept where programs do just one thing, and do + closely related to the UNIX concept, where programs do just one thing and do it well. * **Dynamic addressing:** Address Claiming in J1939 is time critical. - Furthermore data transport should be handled properly during the address + Furthermore, data transport should be handled properly during the address negotiation. Putting this functionality in the kernel eliminates it as a requirement for _every_ user space process that communicates via J1939. This results in a consistent J1939 bus with proper addressing. @@ -58,7 +58,7 @@ Therefore, these parts are left to user space. The J1939 sockets operate on CAN network devices (see SocketCAN). Any J1939 user space library operating on CAN raw sockets will still operate properly. -Since such library does not communicate with the in-kernel implementation, care +Since such a library does not communicate with the in-kernel implementation, care must be taken that these two do not interfere. In practice, this means they cannot share ECU addresses. A single ECU (or virtual ECU) address is used by the library exclusively, or by the in-kernel system exclusively. @@ -77,13 +77,13 @@ is composed as follows: 8 bits : PS (PDU Specific) In J1939-21 distinction is made between PDU1 format (where PF < 240) and PDU2 -format (where PF >= 240). Furthermore, when using PDU2 format, the PS-field +format (where PF >= 240). Furthermore, when using the PDU2 format, the PS-field contains a so-called Group Extension, which is part of the PGN. When using PDU2 format, the Group Extension is set in the PS-field. On the other hand, when using PDU1 format, the PS-field contains a so-called Destination Address, which is _not_ part of the PGN. When communicating a PGN -from user space to kernel (or visa versa) and PDU2 format is used, the PS-field +from user space to kernel (or vice versa) and PDU2 format is used, the PS-field of the PGN shall be set to zero. The Destination Address shall be set elsewhere. @@ -96,15 +96,15 @@ Addressing Both static and dynamic addressing methods can be used. -For static addresses, no extra checks are made by the kernel, and provided +For static addresses, no extra checks are made by the kernel and provided addresses are considered right. This responsibility is for the OEM or system integrator. For dynamic addressing, so-called Address Claiming, extra support is foreseen -in the kernel. In J1939 any ECU is known by it's 64-bit NAME. At the moment of +in the kernel. In J1939 any ECU is known by its 64-bit NAME. At the moment of a successful address claim, the kernel keeps track of both NAME and source address being claimed. This serves as a base for filter schemes. By default, -packets with a destination that is not locally, will be rejected. +packets with a destination that is not locally will be rejected. Mixed mode packets (from a static to a dynamic address or vice versa) are allowed. The BSD sockets define separate API calls for getting/setting the @@ -131,31 +131,31 @@ API Calls --------- On CAN, you first need to open a socket for communicating over a CAN network. -To use J1939, #include <linux/can/j1939.h>. From there, <linux/can.h> will be +To use J1939, ``#include <linux/can/j1939.h>``. From there, ``<linux/can.h>`` will be included too. To open a socket, use: .. code-block:: C s = socket(PF_CAN, SOCK_DGRAM, CAN_J1939); -J1939 does use SOCK_DGRAM sockets. In the J1939 specification, connections are +J1939 does use ``SOCK_DGRAM`` sockets. In the J1939 specification, connections are mentioned in the context of transport protocol sessions. These still deliver -packets to the other end (using several CAN packets). SOCK_STREAM is not +packets to the other end (using several CAN packets). ``SOCK_STREAM`` is not supported. -After the successful creation of the socket, you would normally use the bind(2) -and/or connect(2) system call to bind the socket to a CAN interface. After -binding and/or connecting the socket, you can read(2) and write(2) from/to the -socket or use send(2), sendto(2), sendmsg(2) and the recv*() counterpart +After the successful creation of the socket, you would normally use the ``bind(2)`` +and/or ``connect(2)`` system call to bind the socket to a CAN interface. After +binding and/or connecting the socket, you can ``read(2)`` and ``write(2)`` from/to the +socket or use ``send(2)``, ``sendto(2)``, ``sendmsg(2)`` and the ``recv*()`` counterpart operations on the socket as usual. There are also J1939 specific socket options described below. -In order to send data, a bind(2) must have been successful. bind(2) assigns a +In order to send data, a ``bind(2)`` must have been successful. ``bind(2)`` assigns a local address to a socket. -Different from CAN is that the payload data is just the data that get send, -without it's header info. The header info is derived from the sockaddr supplied -to bind(2), connect(2), sendto(2) and recvfrom(2). A write(2) with size 4 will +Different from CAN is that the payload data is just the data that get sends, +without its header info. The header info is derived from the sockaddr supplied +to ``bind(2)``, ``connect(2)``, ``sendto(2)`` and ``recvfrom(2)``. A ``write(2)`` with size 4 will result in a packet with 4 bytes. The sockaddr structure has extensions for use with J1939 as specified below: @@ -180,47 +180,47 @@ The sockaddr structure has extensions for use with J1939 as specified below: } can_addr; } -can_family & can_ifindex serve the same purpose as for other SocketCAN sockets. +``can_family`` & ``can_ifindex`` serve the same purpose as for other SocketCAN sockets. -can_addr.j1939.pgn specifies the PGN (max 0x3ffff). Individual bits are +``can_addr.j1939.pgn`` specifies the PGN (max 0x3ffff). Individual bits are specified above. -can_addr.j1939.name contains the 64-bit J1939 NAME. +``can_addr.j1939.name`` contains the 64-bit J1939 NAME. -can_addr.j1939.addr contains the address. +``can_addr.j1939.addr`` contains the address. -The bind(2) system call assigns the local address, i.e. the source address when -sending packages. If a PGN during bind(2) is set, it's used as a RX filter. -I.e. only packets with a matching PGN are received. If an ADDR or NAME is set +The ``bind(2)`` system call assigns the local address, i.e. the source address when +sending packages. If a PGN during ``bind(2)`` is set, it's used as a RX filter. +I.e. only packets with a matching PGN are received. If an ADDR or NAME is set it is used as a receive filter, too. It will match the destination NAME or ADDR of the incoming packet. The NAME filter will work only if appropriate Address Claiming for this name was done on the CAN bus and registered/cached by the kernel. -On the other hand connect(2) assigns the remote address, i.e. the destination -address. The PGN from connect(2) is used as the default PGN when sending +On the other hand ``connect(2)`` assigns the remote address, i.e. the destination +address. The PGN from ``connect(2)`` is used as the default PGN when sending packets. If ADDR or NAME is set it will be used as the default destination ADDR -or NAME. Further a set ADDR or NAME during connect(2) is used as a receive +or NAME. Further a set ADDR or NAME during ``connect(2)`` is used as a receive filter. It will match the source NAME or ADDR of the incoming packet. -Both write(2) and send(2) will send a packet with local address from bind(2) and -the remote address from connect(2). Use sendto(2) to overwrite the destination +Both ``write(2)`` and ``send(2)`` will send a packet with local address from ``bind(2)`` and the +remote address from ``connect(2)``. Use ``sendto(2)`` to overwrite the destination address. -If can_addr.j1939.name is set (!= 0) the NAME is looked up by the kernel and -the corresponding ADDR is used. If can_addr.j1939.name is not set (== 0), -can_addr.j1939.addr is used. +If ``can_addr.j1939.name`` is set (!= 0) the NAME is looked up by the kernel and +the corresponding ADDR is used. If ``can_addr.j1939.name`` is not set (== 0), +``can_addr.j1939.addr`` is used. When creating a socket, reasonable defaults are set. Some options can be -modified with setsockopt(2) & getsockopt(2). +modified with ``setsockopt(2)`` & ``getsockopt(2)``. RX path related options: -- SO_J1939_FILTER - configure array of filters -- SO_J1939_PROMISC - disable filters set by bind(2) and connect(2) +- ``SO_J1939_FILTER`` - configure array of filters +- ``SO_J1939_PROMISC`` - disable filters set by ``bind(2)`` and ``connect(2)`` By default no broadcast packets can be send or received. To enable sending or -receiving broadcast packets use the socket option SO_BROADCAST: +receiving broadcast packets use the socket option ``SO_BROADCAST``: .. code-block:: C @@ -261,26 +261,26 @@ The following diagram illustrates the RX path: +---------------------------+ TX path related options: -SO_J1939_SEND_PRIO - change default send priority for the socket +``SO_J1939_SEND_PRIO`` - change default send priority for the socket Message Flags during send() and Related System Calls ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -send(2), sendto(2) and sendmsg(2) take a 'flags' argument. Currently +``send(2)``, ``sendto(2)`` and ``sendmsg(2)`` take a 'flags' argument. Currently supported flags are: -* MSG_DONTWAIT, i.e. non-blocking operation. +* ``MSG_DONTWAIT``, i.e. non-blocking operation. recvmsg(2) ^^^^^^^^^^ -In most cases recvmsg(2) is needed if you want to extract more information than -recvfrom(2) can provide. For example package priority and timestamp. The +In most cases ``recvmsg(2)`` is needed if you want to extract more information than +``recvfrom(2)`` can provide. For example package priority and timestamp. The Destination Address, name and packet priority (if applicable) are attached to -the msghdr in the recvmsg(2) call. They can be extracted using cmsg(3) macros, -with cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR, -SCM_J1939_DEST_NAME or SCM_J1939_PRIO. The returned data is a uint8_t for -priority and dst_addr, and uint64_t for dst_name. +the msghdr in the ``recvmsg(2)`` call. They can be extracted using ``cmsg(3)`` macros, +with ``cmsg_level == SOL_J1939 && cmsg_type == SCM_J1939_DEST_ADDR``, +``SCM_J1939_DEST_NAME`` or ``SCM_J1939_PRIO``. The returned data is a ``uint8_t`` for +``priority`` and ``dst_addr``, and ``uint64_t`` for ``dst_name``. .. code-block:: C @@ -305,12 +305,12 @@ Dynamic Addressing Distinction has to be made between using the claimed address and doing an address claim. To use an already claimed address, one has to fill in the -j1939.name member and provide it to bind(2). If the name had claimed an address +``j1939.name`` member and provide it to ``bind(2)``. If the name had claimed an address earlier, all further messages being sent will use that address. And the -j1939.addr member will be ignored. +``j1939.addr`` member will be ignored. An exception on this is PGN 0x0ee00. This is the "Address Claim/Cannot Claim -Address" message and the kernel will use the j1939.addr member for that PGN if +Address" message and the kernel will use the ``j1939.addr`` member for that PGN if necessary. To claim an address following code example can be used: @@ -371,12 +371,12 @@ NAME can send packets. If another ECU claims the address, the kernel will mark the NAME-SA expired. No socket bound to the NAME can send packets (other than address claims). To -claim another address, some socket bound to NAME, must bind(2) again, but with -only j1939.addr changed to the new SA, and must then send a valid address claim +claim another address, some socket bound to NAME, must ``bind(2)`` again, but with +only ``j1939.addr`` changed to the new SA, and must then send a valid address claim packet. This restarts the state machine in the kernel (and any other participant on the bus) for this NAME. -can-utils also include the jacd tool, so it can be used as code example or as +``can-utils`` also include the ``j1939acd`` tool, so it can be used as code example or as default Address Claiming daemon. Send Examples @@ -403,8 +403,8 @@ Bind: bind(sock, (struct sockaddr *)&baddr, sizeof(baddr)); -Now, the socket 'sock' is bound to the SA 0x20. Since no connect(2) was called, -at this point we can use only sendto(2) or sendmsg(2). +Now, the socket 'sock' is bound to the SA 0x20. Since no ``connect(2)`` was called, +at this point we can use only ``sendto(2)`` or ``sendmsg(2)``. Send: @@ -414,8 +414,8 @@ Send: .can_family = AF_CAN, .can_addr.j1939 = { .name = J1939_NO_NAME; - .pgn = 0x30, - .addr = 0x12300, + .addr = 0x30, + .pgn = 0x12300, }, }; diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst index 8e15bc98830b..234abedc29b2 100644 --- a/Documentation/networking/statistics.rst +++ b/Documentation/networking/statistics.rst @@ -175,5 +175,4 @@ The following structures are internal to the kernel, their members are translated to netlink attributes when dumped. Drivers must not overwrite the statistics they don't report with 0. -.. kernel-doc:: include/linux/ethtool.h - :identifiers: ethtool_pause_stats +- ethtool_pause_stats() diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py index 409dbc4100de..3e81ebab26ed 100644 --- a/Documentation/sphinx/automarkup.py +++ b/Documentation/sphinx/automarkup.py @@ -16,28 +16,36 @@ import re from itertools import chain # +# Python 2 lacks re.ASCII... +# +try: + ascii_p3 = re.ASCII +except AttributeError: + ascii_p3 = 0 + +# # Regex nastiness. Of course. # Try to identify "function()" that's not already marked up some # other way. Sphinx doesn't like a lot of stuff right after a # :c:func: block (i.e. ":c:func:`mmap()`s" flakes out), so the last # bit tries to restrict matches to things that won't create trouble. # -RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=re.ASCII) +RE_function = re.compile(r'\b(([a-zA-Z_]\w+)\(\))', flags=ascii_p3) # # Sphinx 2 uses the same :c:type role for struct, union, enum and typedef # RE_generic_type = re.compile(r'\b(struct|union|enum|typedef)\s+([a-zA-Z_]\w+)', - flags=re.ASCII) + flags=ascii_p3) # # Sphinx 3 uses a different C role for each one of struct, union, enum and # typedef # -RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=re.ASCII) -RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=re.ASCII) -RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=re.ASCII) -RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=re.ASCII) +RE_struct = re.compile(r'\b(struct)\s+([a-zA-Z_]\w+)', flags=ascii_p3) +RE_union = re.compile(r'\b(union)\s+([a-zA-Z_]\w+)', flags=ascii_p3) +RE_enum = re.compile(r'\b(enum)\s+([a-zA-Z_]\w+)', flags=ascii_p3) +RE_typedef = re.compile(r'\b(typedef)\s+([a-zA-Z_]\w+)', flags=ascii_p3) # # Detects a reference to a documentation page of the form Documentation/... with diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst index 69fc5167e648..acd2cc2a538d 100644 --- a/Documentation/userspace-api/index.rst +++ b/Documentation/userspace-api/index.rst @@ -22,6 +22,7 @@ place where this information is gathered. spec_ctrl accelerators/ocxl ioctl/index + iommu media/index .. only:: subproject and html diff --git a/MAINTAINERS b/MAINTAINERS index 56051ace219b..0bd560eb5101 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -978,7 +978,7 @@ M: Michael Hennerich <Michael.Hennerich@analog.com> L: linux-iio@vger.kernel.org S: Supported W: http://ez.analog.com/community/linux-device-drivers -F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.txt +F: Documentation/devicetree/bindings/iio/adc/adi,ad7768-1.yaml F: drivers/iio/adc/ad7768-1.c ANALOG DEVICES INC AD7780 DRIVER @@ -3857,7 +3857,7 @@ M: Roger Quadros <rogerq@ti.com> L: linux-usb@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git -F: Documentation/devicetree/bindings/usb/cdns-usb3.txt +F: Documentation/devicetree/bindings/usb/cdns,usb3.yaml F: drivers/usb/cdns3/ CADET FM/AM RADIO RECEIVER DRIVER @@ -7923,7 +7923,7 @@ HISILICON LPC BUS DRIVER M: john.garry@huawei.com S: Maintained W: http://www.hisilicon.com -F: Documentation/devicetree/bindings/arm/hisilicon/hisilicon-low-pin-count.txt +F: Documentation/devicetree/bindings/arm/hisilicon/low-pin-count.yaml F: drivers/bus/hisi_lpc.c HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) @@ -11170,7 +11170,7 @@ F: Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt F: drivers/input/touchscreen/melfas_mip4.c MELLANOX BLUEFIELD I2C DRIVER -M: Khalil Blaiech <kblaiech@mellanox.com> +M: Khalil Blaiech <kblaiech@nvidia.com> L: linux-i2c@vger.kernel.org S: Supported F: drivers/i2c/busses/i2c-mlxbf.c @@ -14534,6 +14534,14 @@ F: Documentation/devicetree/bindings/mailbox/qcom-ipcc.yaml F: drivers/mailbox/qcom-ipcc.c F: include/dt-bindings/mailbox/qcom-ipcc.h +QUALCOMM IPQ4019 VQMMC REGULATOR DRIVER +M: Robert Marko <robert.marko@sartura.hr> +M: Luka Perkov <luka.perkov@sartura.hr> +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/regulator/vqmmc-ipq4019-regulator.yaml +F: drivers/regulator/vqmmc-ipq4019-regulator.c + QUALCOMM RMNET DRIVER M: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org> M: Sean Tranchetti <stranche@codeaurora.org> @@ -14889,7 +14897,6 @@ RENESAS ETHERNET DRIVERS R: Sergei Shtylyov <sergei.shtylyov@gmail.com> L: netdev@vger.kernel.org L: linux-renesas-soc@vger.kernel.org -F: Documentation/devicetree/bindings/net/renesas,*.txt F: Documentation/devicetree/bindings/net/renesas,*.yaml F: drivers/net/ethernet/renesas/ F: include/linux/sh_eth.h @@ -18090,7 +18097,7 @@ M: Yu Chen <chenyu56@huawei.com> M: Binghui Wang <wangbinghui@hisilicon.com> L: linux-usb@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt +F: Documentation/devicetree/bindings/phy/hisilicon,hi3660-usb3.yaml F: drivers/phy/hisilicon/phy-hi3660-usb3.c USB ISP116X DRIVER @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = -rc2 +EXTRAVERSION = -rc3 NAME = Kleptomaniac Octopus # *DOCUMENTATION* diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 17fd1ed700cc..9152782444b5 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -67,7 +67,22 @@ sr r5, [ARC_REG_LPB_CTRL] 1: #endif /* CONFIG_ARC_LPB_DISABLE */ -#endif + + /* On HSDK, CCMs need to remapped super early */ +#ifdef CONFIG_ARC_SOC_HSDK + mov r6, 0x60000000 + lr r5, [ARC_REG_ICCM_BUILD] + breq r5, 0, 1f + sr r6, [ARC_REG_AUX_ICCM] +1: + lr r5, [ARC_REG_DCCM_BUILD] + breq r5, 0, 2f + sr r6, [ARC_REG_AUX_DCCM] +2: +#endif /* CONFIG_ARC_SOC_HSDK */ + +#endif /* CONFIG_ISA_ARCV2 */ + ; Config DSP_CTRL properly, so kernel may use integer multiply, ; multiply-accumulate, and divide operations DSP_EARLY_INIT diff --git a/arch/arc/kernel/stacktrace.c b/arch/arc/kernel/stacktrace.c index feba91c9d969..b23986f98450 100644 --- a/arch/arc/kernel/stacktrace.c +++ b/arch/arc/kernel/stacktrace.c @@ -112,7 +112,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, int (*consumer_fn) (unsigned int, void *), void *arg) { #ifdef CONFIG_ARC_DW2_UNWIND - int ret = 0; + int ret = 0, cnt = 0; unsigned int address; struct unwind_frame_info frame_info; @@ -132,6 +132,11 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, break; frame_info.regs.r63 = frame_info.regs.r31; + + if (cnt++ > 128) { + printk("unwinder looping too long, aborting !\n"); + return 0; + } } return address; /* return the last address it saw */ diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index 0b63fc095b99..b3ea1fa11f87 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c @@ -17,22 +17,6 @@ int arc_hsdk_axi_dmac_coherent __section(".data") = 0; #define ARC_CCM_UNUSED_ADDR 0x60000000 -static void __init hsdk_init_per_cpu(unsigned int cpu) -{ - /* - * By default ICCM is mapped to 0x7z while this area is used for - * kernel virtual mappings, so move it to currently unused area. - */ - if (cpuinfo_arc700[cpu].iccm.sz) - write_aux_reg(ARC_REG_AUX_ICCM, ARC_CCM_UNUSED_ADDR); - - /* - * By default DCCM is mapped to 0x8z while this area is used by kernel, - * so move it to currently unused area. - */ - if (cpuinfo_arc700[cpu].dccm.sz) - write_aux_reg(ARC_REG_AUX_DCCM, ARC_CCM_UNUSED_ADDR); -} #define ARC_PERIPHERAL_BASE 0xf0000000 #define CREG_BASE (ARC_PERIPHERAL_BASE + 0x1000) @@ -339,5 +323,4 @@ static const char *hsdk_compat[] __initconst = { MACHINE_START(SIMULATION, "hsdk") .dt_compat = hsdk_compat, .init_early = hsdk_init_early, - .init_per_cpu = hsdk_init_per_cpu, MACHINE_END diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d57112a276f5..c23dbf8bebee 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -354,8 +354,8 @@ static void __init free_highpages(void) /* set highmem page free */ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &range_start, &range_end, NULL) { - unsigned long start = PHYS_PFN(range_start); - unsigned long end = PHYS_PFN(range_end); + unsigned long start = PFN_UP(range_start); + unsigned long end = PFN_DOWN(range_end); /* Ignore complete lowmem entries */ if (end <= max_low) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1d466addb078..1515f6f153a0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1002,7 +1002,7 @@ config NUMA config NODES_SHIFT int "Maximum NUMA Nodes (as a power of 2)" range 1 10 - default "2" + default "4" depends on NEED_MULTIPLE_NODES help Specify the maximum number of NUMA Nodes available on the target diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index e3d47b52161d..ec7720dbe2c8 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -10,6 +10,7 @@ * #imm16 values used for BRK instruction generation * 0x004: for installing kprobes * 0x005: for installing uprobes + * 0x006: for kprobe software single-step * Allowed values for kgdb are 0x400 - 0x7ff * 0x100: for triggering a fault on purpose (reserved) * 0x400: for dynamic BRK instruction @@ -19,6 +20,7 @@ */ #define KPROBES_BRK_IMM 0x004 #define UPROBES_BRK_IMM 0x005 +#define KPROBES_BRK_SS_IMM 0x006 #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 0b298f48f5bf..657c921fd784 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -53,6 +53,7 @@ /* kprobes BRK opcodes with ESR encoding */ #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5)) +#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5)) /* uprobes BRK opcodes with ESR encoding */ #define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5)) diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h index 97e511d645a2..8699ce30f587 100644 --- a/arch/arm64/include/asm/kprobes.h +++ b/arch/arm64/include/asm/kprobes.h @@ -16,7 +16,7 @@ #include <linux/percpu.h> #define __ARCH_WANT_KPROBES_INSN_SLOT -#define MAX_INSN_SIZE 1 +#define MAX_INSN_SIZE 2 #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c index af9987c154ca..66adee8b5fc8 100644 --- a/arch/arm64/kernel/kexec_image.c +++ b/arch/arm64/kernel/kexec_image.c @@ -43,7 +43,7 @@ static void *image_load(struct kimage *image, u64 flags, value; bool be_image, be_kernel; struct kexec_buf kbuf; - unsigned long text_offset; + unsigned long text_offset, kernel_segment_number; struct kexec_segment *kernel_segment; int ret; @@ -88,11 +88,37 @@ static void *image_load(struct kimage *image, /* Adjust kernel segment with TEXT_OFFSET */ kbuf.memsz += text_offset; - ret = kexec_add_buffer(&kbuf); - if (ret) + kernel_segment_number = image->nr_segments; + + /* + * The location of the kernel segment may make it impossible to satisfy + * the other segment requirements, so we try repeatedly to find a + * location that will work. + */ + while ((ret = kexec_add_buffer(&kbuf)) == 0) { + /* Try to load additional data */ + kernel_segment = &image->segment[kernel_segment_number]; + ret = load_other_segments(image, kernel_segment->mem, + kernel_segment->memsz, initrd, + initrd_len, cmdline); + if (!ret) + break; + + /* + * We couldn't find space for the other segments; erase the + * kernel segment and try the next available hole. + */ + image->nr_segments -= 1; + kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; + } + + if (ret) { + pr_err("Could not find any suitable kernel location!"); return ERR_PTR(ret); + } - kernel_segment = &image->segment[image->nr_segments - 1]; + kernel_segment = &image->segment[kernel_segment_number]; kernel_segment->mem += text_offset; kernel_segment->memsz -= text_offset; image->start = kernel_segment->mem; @@ -101,12 +127,7 @@ static void *image_load(struct kimage *image, kernel_segment->mem, kbuf.bufsz, kernel_segment->memsz); - /* Load additional data */ - ret = load_other_segments(image, - kernel_segment->mem, kernel_segment->memsz, - initrd, initrd_len, cmdline); - - return ERR_PTR(ret); + return 0; } #ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 5b0e67b93cdc..03210f644790 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -240,6 +240,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz) return ret; } +/* + * Tries to add the initrd and DTB to the image. If it is not possible to find + * valid locations, this function will undo changes to the image and return non + * zero. + */ int load_other_segments(struct kimage *image, unsigned long kernel_load_addr, unsigned long kernel_size, @@ -248,7 +253,8 @@ int load_other_segments(struct kimage *image, { struct kexec_buf kbuf; void *headers, *dtb = NULL; - unsigned long headers_sz, initrd_load_addr = 0, dtb_len; + unsigned long headers_sz, initrd_load_addr = 0, dtb_len, + orig_segments = image->nr_segments; int ret = 0; kbuf.image = image; @@ -334,6 +340,7 @@ int load_other_segments(struct kimage *image, return 0; out_err: + image->nr_segments = orig_segments; vfree(dtb); return ret; } diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index deba738142ed..f11a1a1f7026 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -36,25 +36,16 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); static void __kprobes post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); -static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) -{ - void *addrs[1]; - u32 insns[1]; - - addrs[0] = addr; - insns[0] = opcode; - - return aarch64_insn_patch_text(addrs, insns, 1); -} - static void __kprobes arch_prepare_ss_slot(struct kprobe *p) { + kprobe_opcode_t *addr = p->ainsn.api.insn; + void *addrs[] = {addr, addr + 1}; + u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS}; + /* prepare insn slot */ - patch_text(p->ainsn.api.insn, p->opcode); + aarch64_insn_patch_text(addrs, insns, 2); - flush_icache_range((uintptr_t) (p->ainsn.api.insn), - (uintptr_t) (p->ainsn.api.insn) + - MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE)); /* * Needs restoring of return address after stepping xol. @@ -128,13 +119,18 @@ void *alloc_insn_page(void) /* arm kprobe: install breakpoint in text */ void __kprobes arch_arm_kprobe(struct kprobe *p) { - patch_text(p->addr, BRK64_OPCODE_KPROBES); + void *addr = p->addr; + u32 insn = BRK64_OPCODE_KPROBES; + + aarch64_insn_patch_text(&addr, &insn, 1); } /* disarm kprobe: remove breakpoint from text */ void __kprobes arch_disarm_kprobe(struct kprobe *p) { - patch_text(p->addr, p->opcode); + void *addr = p->addr; + + aarch64_insn_patch_text(&addr, &p->opcode, 1); } void __kprobes arch_remove_kprobe(struct kprobe *p) @@ -163,20 +159,15 @@ static void __kprobes set_current_kprobe(struct kprobe *p) } /* - * Interrupts need to be disabled before single-step mode is set, and not - * reenabled until after single-step mode ends. - * Without disabling interrupt on local CPU, there is a chance of - * interrupt occurrence in the period of exception return and start of - * out-of-line single-step, that result in wrongly single stepping - * into the interrupt handler. + * Mask all of DAIF while executing the instruction out-of-line, to keep things + * simple and avoid nesting exceptions. Interrupts do have to be disabled since + * the kprobe state is per-CPU and doesn't get migrated. */ static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, struct pt_regs *regs) { kcb->saved_irqflag = regs->pstate & DAIF_MASK; - regs->pstate |= PSR_I_BIT; - /* Unmask PSTATE.D for enabling software step exceptions. */ - regs->pstate &= ~PSR_D_BIT; + regs->pstate |= DAIF_MASK; } static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, @@ -219,10 +210,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, slot = (unsigned long)p->ainsn.api.insn; set_ss_context(kcb, slot); /* mark pending ss */ - - /* IRQs and single stepping do not mix well. */ kprobes_save_local_irqflag(kcb, regs); - kernel_enable_single_step(regs); instruction_pointer_set(regs, slot); } else { /* insn simulation */ @@ -273,12 +261,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) } /* call post handler */ kcb->kprobe_status = KPROBE_HIT_SSDONE; - if (cur->post_handler) { - /* post_handler can hit breakpoint and single step - * again, so we enable D-flag for recursive exception. - */ + if (cur->post_handler) cur->post_handler(cur, regs, 0); - } reset_current_kprobe(); } @@ -302,8 +286,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) if (!instruction_pointer(regs)) BUG(); - kernel_disable_single_step(); - if (kcb->kprobe_status == KPROBE_REENTER) restore_previous_kprobe(kcb); else @@ -365,10 +347,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs) * pre-handler and it returned non-zero, it will * modify the execution path and no need to single * stepping. Let's just reset current kprobe and exit. - * - * pre_handler can hit a breakpoint and can step thru - * before return, keep PSTATE D-flag enabled until - * pre_handler return back. */ if (!p->pre_handler || !p->pre_handler(p, regs)) { setup_singlestep(p, regs, kcb, 0); @@ -399,7 +377,7 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) } static int __kprobes -kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) +kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); int retval; @@ -409,16 +387,15 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) if (retval == DBG_HOOK_HANDLED) { kprobes_restore_local_irqflag(kcb, regs); - kernel_disable_single_step(); - post_kprobe_handler(kcb, regs); } return retval; } -static struct step_hook kprobes_step_hook = { - .fn = kprobe_single_step_handler, +static struct break_hook kprobes_break_ss_hook = { + .imm = KPROBES_BRK_SS_IMM, + .fn = kprobe_breakpoint_ss_handler, }; static int __kprobes @@ -486,7 +463,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) int __init arch_init_kprobes(void) { register_kernel_break_hook(&kprobes_break_hook); - register_kernel_step_hook(&kprobes_step_hook); + register_kernel_break_hook(&kprobes_break_ss_hook); return 0; } diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h index 85ed2390fb99..567cdc557402 100644 --- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -63,7 +63,7 @@ static inline void restore_user_access(unsigned long flags) static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) { - return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xf0000000), + return WARN(!((regs->kuap ^ MD_APG_KUAP) & 0xff000000), "Bug: fault blocked by AP register !"); } diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h index 1d9ac0f9c794..0bd1b144eb76 100644 --- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -33,19 +33,18 @@ * respectively NA for All or X for Supervisor and no access for User. * Then we use the APG to say whether accesses are according to Page rules or * "all Supervisor" rules (Access to all) - * Therefore, we define 2 APG groups. lsb is _PMD_USER - * 0 => Kernel => 01 (all accesses performed according to page definition) - * 1 => User => 00 (all accesses performed as supervisor iaw page definition) - * 2-15 => Not Used - */ -#define MI_APG_INIT 0x40000000 - -/* - * 0 => Kernel => 01 (all accesses performed according to page definition) - * 1 => User => 10 (all accesses performed according to swaped page definition) - * 2-15 => Not Used - */ -#define MI_APG_KUEP 0x60000000 + * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say + * "all User" rules, that will lead to NA for all. + * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED + * 0 => Kernel => 11 (all accesses performed according as user iaw page definition) + * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition) + * 2 => User => 11 (all accesses performed according as user iaw page definition) + * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT + * => 10 (all accesses performed according to swaped page definition) for KUEP + * 4-15 => Not Used + */ +#define MI_APG_INIT 0xdc000000 +#define MI_APG_KUEP 0xde000000 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MI_RPN is written, bits in @@ -106,25 +105,9 @@ #define MD_Ks 0x80000000 /* Should not be set */ #define MD_Kp 0x40000000 /* Should always be set */ -/* - * All pages' PP data bits are set to either 000 or 011 or 001, which means - * respectively RW for Supervisor and no access for User, or RO for - * Supervisor and no access for user and NA for ALL. - * Then we use the APG to say whether accesses are according to Page rules or - * "all Supervisor" rules (Access to all) - * Therefore, we define 2 APG groups. lsb is _PMD_USER - * 0 => Kernel => 01 (all accesses performed according to page definition) - * 1 => User => 00 (all accesses performed as supervisor iaw page definition) - * 2-15 => Not Used - */ -#define MD_APG_INIT 0x40000000 - -/* - * 0 => No user => 01 (all accesses performed according to page definition) - * 1 => User => 10 (all accesses performed according to swaped page definition) - * 2-15 => Not Used - */ -#define MD_APG_KUAP 0x60000000 +/* See explanation above at the definition of MI_APG_INIT */ +#define MD_APG_INIT 0xdc000000 +#define MD_APG_KUAP 0xde000000 /* The effective page number register. When read, contains the information * about the last instruction TLB miss. When MD_RPN is written, bits in diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h index 66f403a7da44..1581204467e1 100644 --- a/arch/powerpc/include/asm/nohash/32/pte-8xx.h +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -39,9 +39,9 @@ * into the TLB. */ #define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */ -#define _PAGE_SPECIAL 0x0020 /* SW entry */ +#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */ #define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */ -#define _PAGE_ACCESSED 0x0080 /* software: page referenced */ +#define _PAGE_SPECIAL 0x0080 /* SW entry */ #define _PAGE_NA 0x0200 /* Supervisor NA, User no access */ #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ @@ -59,11 +59,12 @@ #define _PMD_PRESENT 0x0001 #define _PMD_PRESENT_MASK _PMD_PRESENT -#define _PMD_BAD 0x0fd0 +#define _PMD_BAD 0x0f90 #define _PMD_PAGE_MASK 0x000c #define _PMD_PAGE_8M 0x000c #define _PMD_PAGE_512K 0x0004 -#define _PMD_USER 0x0020 /* APG 1 */ +#define _PMD_ACCESSED 0x0020 /* APG 1 */ +#define _PMD_USER 0x0040 /* APG 2 */ #define _PTE_NONE_MASK 0 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index 8728590f514a..3beeb030cd78 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h @@ -6,6 +6,7 @@ struct device; struct device_node; +struct drmem_lmb; #ifdef CONFIG_NUMA @@ -61,6 +62,9 @@ static inline int early_cpu_to_node(int cpu) */ return (nid < 0) ? 0 : nid; } + +int of_drconf_to_nid_single(struct drmem_lmb *lmb); + #else static inline int early_cpu_to_node(int cpu) { return 0; } @@ -84,10 +88,12 @@ static inline int cpu_distance(__be32 *cpu1_assoc, __be32 *cpu2_assoc) return 0; } -#endif /* CONFIG_NUMA */ +static inline int of_drconf_to_nid_single(struct drmem_lmb *lmb) +{ + return first_online_node; +} -struct drmem_lmb; -int of_drconf_to_nid_single(struct drmem_lmb *lmb); +#endif /* CONFIG_NUMA */ #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) extern int find_and_online_cpu_nid(int cpu); diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index ef5bbb705c08..501c9a79038c 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -178,7 +178,7 @@ do { \ * are no aliasing issues. */ #define __put_user_asm_goto(x, addr, label, op) \ - asm volatile goto( \ + asm_volatile_goto( \ "1: " op "%U1%X1 %0,%1 # put_user\n" \ EX_TABLE(1b, %l2) \ : \ @@ -191,7 +191,7 @@ do { \ __put_user_asm_goto(x, ptr, label, "std") #else /* __powerpc64__ */ #define __put_user_asm2_goto(x, addr, label) \ - asm volatile goto( \ + asm_volatile_goto( \ "1: stw%X1 %0, %1\n" \ "2: stw%X1 %L0, %L1\n" \ EX_TABLE(1b, %l2) \ diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c index 6b50bf15d8c1..bf3270426d82 100644 --- a/arch/powerpc/kernel/eeh_cache.c +++ b/arch/powerpc/kernel/eeh_cache.c @@ -264,8 +264,9 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v) { struct pci_io_addr_range *piar; struct rb_node *n; + unsigned long flags; - spin_lock(&pci_io_addr_cache_root.piar_lock); + spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); for (n = rb_first(&pci_io_addr_cache_root.rb_root); n; n = rb_next(n)) { piar = rb_entry(n, struct pci_io_addr_range, rb_node); @@ -273,7 +274,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v) (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", &piar->addr_lo, &piar->addr_hi, pci_name(piar->pcidev)); } - spin_unlock(&pci_io_addr_cache_root.piar_lock); + spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); return 0; } diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 44c9018aed1b..a1ae00689e0f 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -284,11 +284,7 @@ _ENTRY(saved_ksp_limit) rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ lwz r11, 0(r11) /* Get Linux PTE */ -#ifdef CONFIG_SWAP li r9, _PAGE_PRESENT | _PAGE_ACCESSED -#else - li r9, _PAGE_PRESENT -#endif andc. r9, r9, r11 /* Check permission */ bne 5f @@ -369,11 +365,7 @@ _ENTRY(saved_ksp_limit) rlwimi r11, r10, 22, 20, 29 /* Compute PTE address */ lwz r11, 0(r11) /* Get Linux PTE */ -#ifdef CONFIG_SWAP li r9, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC -#else - li r9, _PAGE_PRESENT | _PAGE_EXEC -#endif andc. r9, r9, r11 /* Check permission */ bne 5f diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 9f359d3fba74..ee0bfebc375f 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -202,9 +202,7 @@ SystemCall: InstructionTLBMiss: mtspr SPRN_SPRG_SCRATCH0, r10 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS) mtspr SPRN_SPRG_SCRATCH1, r11 -#endif /* If we are faulting a kernel address, we have to use the * kernel page tables. @@ -224,25 +222,13 @@ InstructionTLBMiss: 3: mtcr r11 #endif -#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT) lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ mtspr SPRN_MD_TWC, r11 -#else - lwz r10, (swapper_pg_dir-PAGE_OFFSET)@l(r10) /* Get level 1 entry */ - mtspr SPRN_MI_TWC, r10 /* Set segment attributes */ - mtspr SPRN_MD_TWC, r10 -#endif mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ -#if defined(CONFIG_HUGETLBFS) || !defined(CONFIG_PIN_TLB_TEXT) + rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K mtspr SPRN_MI_TWC, r11 -#endif -#ifdef CONFIG_SWAP - rlwinm r11, r10, 32-5, _PAGE_PRESENT - and r11, r11, r10 - rlwimi r10, r11, 0, _PAGE_PRESENT -#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 20 and 23 must be clear. * Software indicator bits 22, 24, 25, 26, and 27 must be @@ -256,9 +242,7 @@ InstructionTLBMiss: /* Restore registers */ 0: mfspr r10, SPRN_SPRG_SCRATCH0 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) || defined(CONFIG_HUGETLBFS) mfspr r11, SPRN_SPRG_SCRATCH1 -#endif rfi patch_site 0b, patch__itlbmiss_exit_1 @@ -268,9 +252,7 @@ InstructionTLBMiss: addi r10, r10, 1 stw r10, (itlb_miss_counter - PAGE_OFFSET)@l(0) mfspr r10, SPRN_SPRG_SCRATCH0 -#if defined(ITLB_MISS_KERNEL) || defined(CONFIG_SWAP) mfspr r11, SPRN_SPRG_SCRATCH1 -#endif rfi #endif @@ -297,30 +279,16 @@ DataStoreTLBMiss: mfspr r10, SPRN_MD_TWC lwz r10, 0(r10) /* Get the pte */ - /* Insert the Guarded flag into the TWC from the Linux PTE. + /* Insert Guarded and Accessed flags into the TWC from the Linux PTE. * It is bit 27 of both the Linux PTE and the TWC (at least * I got that right :-). It will be better when we can put * this into the Linux pgd/pmd and load it in the operation * above. */ - rlwimi r11, r10, 0, _PAGE_GUARDED + rlwimi r11, r10, 0, _PAGE_GUARDED | _PAGE_ACCESSED rlwimi r11, r10, 32 - 9, _PMD_PAGE_512K mtspr SPRN_MD_TWC, r11 - /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. - * We also need to know if the insn is a load/store, so: - * Clear _PAGE_PRESENT and load that which will - * trap into DTLB Error with store bit set accordinly. - */ - /* PRESENT=0x1, ACCESSED=0x20 - * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5)); - * r10 = (r10 & ~PRESENT) | r11; - */ -#ifdef CONFIG_SWAP - rlwinm r11, r10, 32-5, _PAGE_PRESENT - and r11, r11, r10 - rlwimi r10, r11, 0, _PAGE_PRESENT -#endif /* The Linux PTE won't go exactly into the MMU TLB. * Software indicator bits 24, 25, 26, and 27 must be * set. All other Linux PTE bits control the behavior @@ -711,7 +679,7 @@ initial_mmu: li r9, 4 /* up to 4 pages of 8M */ mtctr r9 lis r9, KERNELBASE@h /* Create vaddr for TLB */ - li r10, MI_PS8MEG | MI_SVALID /* Set 8M byte page */ + li r10, MI_PS8MEG | _PMD_ACCESSED | MI_SVALID li r11, MI_BOOTINIT /* Create RPN for address 0 */ 1: mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ @@ -775,7 +743,7 @@ _GLOBAL(mmu_pin_tlb) #ifdef CONFIG_PIN_TLB_TEXT LOAD_REG_IMMEDIATE(r5, 28 << 8) LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) - LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG) + LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT) LOAD_REG_ADDR(r9, _sinittext) li r0, 4 @@ -797,7 +765,7 @@ _GLOBAL(mmu_pin_tlb) LOAD_REG_IMMEDIATE(r5, 28 << 8 | MD_TWAM) #ifdef CONFIG_PIN_TLB_DATA LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET) - LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG) + LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED) #ifdef CONFIG_PIN_TLB_IMMR li r0, 3 #else @@ -834,7 +802,7 @@ _GLOBAL(mmu_pin_tlb) #endif #ifdef CONFIG_PIN_TLB_IMMR LOAD_REG_IMMEDIATE(r0, VIRT_IMMR_BASE | MD_EVALID) - LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED) + LOAD_REG_IMMEDIATE(r7, MD_SVALID | MD_PS512K | MD_GUARDED | _PMD_ACCESSED) mfspr r8, SPRN_IMMR rlwinm r8, r8, 0, 0xfff80000 ori r8, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | \ diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S index 5eb9eedac920..2aa16d5368e1 100644 --- a/arch/powerpc/kernel/head_book3s_32.S +++ b/arch/powerpc/kernel/head_book3s_32.S @@ -457,11 +457,7 @@ InstructionTLBMiss: cmplw 0,r1,r3 #endif mfspr r2, SPRN_SPRG_PGDIR -#ifdef CONFIG_SWAP li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC -#else - li r1,_PAGE_PRESENT | _PAGE_EXEC -#endif #if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ @@ -523,11 +519,7 @@ DataLoadTLBMiss: lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR -#ifdef CONFIG_SWAP li r1, _PAGE_PRESENT | _PAGE_ACCESSED -#else - li r1, _PAGE_PRESENT -#endif bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ @@ -603,11 +595,7 @@ DataStoreTLBMiss: lis r1, TASK_SIZE@h /* check if kernel address */ cmplw 0,r1,r3 mfspr r2, SPRN_SPRG_PGDIR -#ifdef CONFIG_SWAP li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED -#else - li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT -#endif bgt- 112f lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */ addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */ diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 3c6b9822f978..8c2857cbd960 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -1393,13 +1393,14 @@ static void add_cpu_to_masks(int cpu) /* Activate a secondary processor. */ void start_secondary(void *unused) { - unsigned int cpu = smp_processor_id(); + unsigned int cpu = raw_smp_processor_id(); mmgrab(&init_mm); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); + rcu_cpu_starting(cpu); preempt_disable(); cpu_callin_map[cpu] = 1; diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index c47e6b35c551..824b2c9da75b 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -476,7 +476,7 @@ do { \ do { \ long __kr_err; \ \ - __put_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \ + __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \ if (unlikely(__kr_err)) \ goto err_label; \ } while (0) diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 99e12faa5498..765b62434f30 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Linaro Limited * Author: AKASHI Takahiro <takahiro.akashi@linaro.org> diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 11e2a4fe66e0..7e849797c9c3 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -35,6 +35,10 @@ ENTRY(_start) .word 0 #endif .balign 8 +#ifdef CONFIG_RISCV_M_MODE + /* Image load offset (0MB) from start of RAM for M-mode */ + .dword 0 +#else #if __riscv_xlen == 64 /* Image load offset(2MB) from start of RAM */ .dword 0x200000 @@ -42,6 +46,7 @@ ENTRY(_start) /* Image load offset(4MB) from start of RAM */ .dword 0x400000 #endif +#endif /* Effective size of kernel image */ .dword _end - _start .dword __HEAD_FLAGS diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore index 11ebee9e4c1d..3a19def868ec 100644 --- a/arch/riscv/kernel/vdso/.gitignore +++ b/arch/riscv/kernel/vdso/.gitignore @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only vdso.lds *.tmp +vdso-syms.S diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 7d6a94d45ec9..cb8f9e4cfcbf 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -43,19 +43,14 @@ $(obj)/vdso.o: $(obj)/vdso.so SYSCFLAGS_vdso.so.dbg = $(c_flags) $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + -Wl,--build-id -Wl,--hash-style=both # We also create a special relocatable object that should mirror the symbol # table and layout of the linked DSO. With ld --just-symbols we can then # refer to these symbols in the kernel code rather than hand-coded addresses. - -SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - -Wl,--build-id=sha1 -Wl,--hash-style=both -$(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE - $(call if_changed,vdsold) - -LDFLAGS_vdso-syms.o := -r --just-symbols -$(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE - $(call if_changed,ld) +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) # strip rule for the .so file $(obj)/%.so: OBJCOPYFLAGS := -S @@ -73,6 +68,11 @@ quiet_cmd_vdsold = VDSOLD $@ $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ rm $@.tmp +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + # install commands for the unstripped file quiet_cmd_vdso_install = INSTALL $@ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh new file mode 100755 index 000000000000..e64cb6d9440e --- /dev/null +++ b/arch/riscv/kernel/vdso/so2s.sh @@ -0,0 +1,6 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ +# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com> + +sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \ +| grep '^\.' diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 1359e21c0c62..3c8b9e433c67 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -86,6 +86,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a pmd_t *pmd, *pmd_k; pte_t *pte_k; int index; + unsigned long pfn; /* User mode accesses just cause a SIGSEGV */ if (user_mode(regs)) @@ -100,7 +101,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * of a task switch. */ index = pgd_index(addr); - pgd = (pgd_t *)pfn_to_virt(csr_read(CSR_SATP)) + index; + pfn = csr_read(CSR_SATP) & SATP_PPN; + pgd = (pgd_t *)pfn_to_virt(pfn) + index; pgd_k = init_mm.pgd + index; if (!pgd_present(*pgd_k)) { diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index ea933b789a88..8e577f14f120 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -154,9 +154,8 @@ disable: void __init setup_bootmem(void) { - phys_addr_t mem_size = 0; - phys_addr_t total_mem = 0; - phys_addr_t mem_start, start, end = 0; + phys_addr_t mem_start = 0; + phys_addr_t start, end = 0; phys_addr_t vmlinux_end = __pa_symbol(&_end); phys_addr_t vmlinux_start = __pa_symbol(&_start); u64 i; @@ -164,21 +163,18 @@ void __init setup_bootmem(void) /* Find the memory region containing the kernel */ for_each_mem_range(i, &start, &end) { phys_addr_t size = end - start; - if (!total_mem) + if (!mem_start) mem_start = start; if (start <= vmlinux_start && vmlinux_end <= end) BUG_ON(size == 0); - total_mem = total_mem + size; } /* - * Remove memblock from the end of usable area to the - * end of region + * The maximal physical memory size is -PAGE_OFFSET. + * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed + * as it is unusable by kernel. */ - mem_size = min(total_mem, (phys_addr_t)-PAGE_OFFSET); - if (mem_start + mem_size < end) - memblock_remove(mem_start + mem_size, - end - mem_start - mem_size); + memblock_enforce_memory_limit(mem_start - PAGE_OFFSET); /* Reserve from the start of the kernel to the end of the kernel */ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start); @@ -297,6 +293,7 @@ pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss; #define NUM_EARLY_PMDS (1UL + MAX_EARLY_MAPPING_SIZE / PGDIR_SIZE) #endif pmd_t early_pmd[PTRS_PER_PMD * NUM_EARLY_PMDS] __initdata __aligned(PAGE_SIZE); +pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE); static pmd_t *__init get_pmd_virt_early(phys_addr_t pa) { @@ -494,6 +491,18 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) load_pa + (va - PAGE_OFFSET), map_size, PAGE_KERNEL_EXEC); +#ifndef __PAGETABLE_PMD_FOLDED + /* Setup early PMD for DTB */ + create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, + (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE); + /* Create two consecutive PMD mappings for FDT early scan */ + pa = dtb_pa & ~(PMD_SIZE - 1); + create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA, + pa, PMD_SIZE, PAGE_KERNEL); + create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE, + pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL); + dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1)); +#else /* Create two consecutive PGD mappings for FDT early scan */ pa = dtb_pa & ~(PGDIR_SIZE - 1); create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA, @@ -501,6 +510,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE, pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL); dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1)); +#endif dtb_early_pa = dtb_pa; /* diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 0784bf3caf43..a4d3c578fbd8 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -93,9 +93,10 @@ CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y CONFIG_CMA_DEBUG=y CONFIG_CMA_DEBUGFS=y +CONFIG_CMA_AREAS=7 CONFIG_MEM_SOFT_DIRTY=y CONFIG_ZSWAP=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_IDLE_PAGE_TRACKING=y @@ -378,7 +379,6 @@ CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m -# CONFIG_NET_DROP_MONITOR is not set CONFIG_PCI=y # CONFIG_PCIEASPM is not set CONFIG_PCI_DEBUG=y @@ -386,7 +386,7 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_S390=y CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=y -CONFIG_ZRAM=m +CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m @@ -689,6 +689,7 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECRDSA=m +CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_CHACHA20POLY1305=m @@ -709,7 +710,6 @@ CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m @@ -753,6 +753,7 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y +CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CORDIC=m CONFIG_CRC32_SELFTEST=y CONFIG_CRC4=m @@ -829,6 +830,7 @@ CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m CONFIG_FAULT_INJECTION=y CONFIG_FAILSLAB=y CONFIG_FAIL_PAGE_ALLOC=y +CONFIG_FAULT_INJECTION_USERCOPY=y CONFIG_FAIL_MAKE_REQUEST=y CONFIG_FAIL_IO_TIMEOUT=y CONFIG_FAIL_FUTEX=y diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 905bc8c4cfaf..17d5df2c1eff 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -87,9 +87,10 @@ CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y +CONFIG_CMA_AREAS=7 CONFIG_MEM_SOFT_DIRTY=y CONFIG_ZSWAP=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC=y CONFIG_ZSMALLOC_STAT=y CONFIG_DEFERRED_STRUCT_PAGE_INIT=y CONFIG_IDLE_PAGE_TRACKING=y @@ -371,7 +372,6 @@ CONFIG_NETLINK_DIAG=m CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_NET_PKTGEN=m -# CONFIG_NET_DROP_MONITOR is not set CONFIG_PCI=y # CONFIG_PCIEASPM is not set CONFIG_HOTPLUG_PCI=y @@ -379,7 +379,7 @@ CONFIG_HOTPLUG_PCI_S390=y CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_CONNECTOR=y -CONFIG_ZRAM=m +CONFIG_ZRAM=y CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_DRBD=m @@ -680,6 +680,7 @@ CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECRDSA=m +CONFIG_CRYPTO_SM2=m CONFIG_CRYPTO_CURVE25519=m CONFIG_CRYPTO_GCM=y CONFIG_CRYPTO_CHACHA20POLY1305=m @@ -701,7 +702,6 @@ CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_SHA3=m -CONFIG_CRYPTO_SM3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES_TI=m @@ -745,6 +745,7 @@ CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_CRC32_S390=y +CONFIG_CRYPTO_DEV_VIRTIO=m CONFIG_CORDIC=m CONFIG_PRIME_NUMBERS=m CONFIG_CRC4=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index 8f67c55625f9..a302630341ef 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -17,11 +17,11 @@ CONFIG_HZ_100=y # CONFIG_CHSC_SCH is not set # CONFIG_SCM_BUS is not set CONFIG_CRASH_DUMP=y -# CONFIG_SECCOMP is not set # CONFIG_PFAULT is not set # CONFIG_S390_HYPFS_FS is not set # CONFIG_VIRTUALIZATION is not set # CONFIG_S390_GUEST is not set +# CONFIG_SECCOMP is not set CONFIG_PARTITION_ADVANCED=y CONFIG_IBM_PARTITION=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 6b8d8c69b1a1..b5dbae78969b 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -692,16 +692,6 @@ static inline int pud_large(pud_t pud) return !!(pud_val(pud) & _REGION3_ENTRY_LARGE); } -static inline unsigned long pud_pfn(pud_t pud) -{ - unsigned long origin_mask; - - origin_mask = _REGION_ENTRY_ORIGIN; - if (pud_large(pud)) - origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; - return (pud_val(pud) & origin_mask) >> PAGE_SHIFT; -} - #define pmd_leaf pmd_large static inline int pmd_large(pmd_t pmd) { @@ -747,16 +737,6 @@ static inline int pmd_none(pmd_t pmd) return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY; } -static inline unsigned long pmd_pfn(pmd_t pmd) -{ - unsigned long origin_mask; - - origin_mask = _SEGMENT_ENTRY_ORIGIN; - if (pmd_large(pmd)) - origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; - return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT; -} - #define pmd_write pmd_write static inline int pmd_write(pmd_t pmd) { @@ -1238,11 +1218,39 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) -#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) -#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) #define p4d_deref(pud) (p4d_val(pud) & _REGION_ENTRY_ORIGIN) #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) +static inline unsigned long pmd_deref(pmd_t pmd) +{ + unsigned long origin_mask; + + origin_mask = _SEGMENT_ENTRY_ORIGIN; + if (pmd_large(pmd)) + origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE; + return pmd_val(pmd) & origin_mask; +} + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return pmd_deref(pmd) >> PAGE_SHIFT; +} + +static inline unsigned long pud_deref(pud_t pud) +{ + unsigned long origin_mask; + + origin_mask = _REGION_ENTRY_ORIGIN; + if (pud_large(pud)) + origin_mask = _REGION3_ENTRY_ORIGIN_LARGE; + return pud_val(pud) & origin_mask; +} + +static inline unsigned long pud_pfn(pud_t pud) +{ + return pud_deref(pud) >> PAGE_SHIFT; +} + /* * The pgd_offset function *always* adds the index for the top-level * region/segment table. This is done to get a sequence like the diff --git a/arch/s390/include/asm/vdso/vdso.h b/arch/s390/include/asm/vdso/vdso.h deleted file mode 100644 index e69de29bb2d1..000000000000 --- a/arch/s390/include/asm/vdso/vdso.h +++ /dev/null diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index ece58f2217cb..2012c1cf0853 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -61,14 +61,6 @@ int main(void) BLANK(); OFFSET(__VDSO_GETCPU_VAL, vdso_per_cpu_data, getcpu_val); BLANK(); - /* constants used by the vdso */ - DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME); - DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC); - DEFINE(__CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); - DEFINE(__CLOCK_MONOTONIC_COARSE, CLOCK_MONOTONIC_COARSE); - DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID); - DEFINE(__CLOCK_COARSE_RES, LOW_RES_NSEC); - BLANK(); /* idle data offsets */ OFFSET(__CLOCK_IDLE_ENTER, s390_idle_data, clock_idle_enter); OFFSET(__CLOCK_IDLE_EXIT, s390_idle_data, clock_idle_exit); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index ebfe86d097f0..390d97daa2b3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -855,13 +855,14 @@ void __init smp_detect_cpus(void) static void smp_init_secondary(void) { - int cpu = smp_processor_id(); + int cpu = raw_smp_processor_id(); S390_lowcore.last_update_clock = get_tod_clock(); restore_access_regs(S390_lowcore.access_regs_save_area); set_cpu_flag(CIF_ASCE_PRIMARY); set_cpu_flag(CIF_ASCE_SECONDARY); cpu_init(); + rcu_cpu_starting(cpu); preempt_disable(); init_cpu_timer(); vtime_init(); diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index d33f21545dfd..9a6bae503fe6 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c @@ -101,6 +101,10 @@ static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf) if (ret) break; + /* the PCI function will be scanned once function 0 appears */ + if (!zdev->zbus->bus) + break; + pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn); if (!pdev) break; diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index a5e5db6ada3c..39b2eded7bc2 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -164,6 +164,7 @@ void initialize_identity_maps(void *rmode) add_identity_map(cmdline, cmdline + COMMAND_LINE_SIZE); /* Load the new page-table. */ + sev_verify_cbit(top_level_pgt); write_cr3(top_level_pgt); } diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index dd07e7b41b11..aa561795efd1 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -68,6 +68,9 @@ SYM_FUNC_START(get_sev_encryption_bit) SYM_FUNC_END(get_sev_encryption_bit) .code64 + +#include "../../kernel/sev_verify_cbit.S" + SYM_FUNC_START(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp @@ -81,6 +84,19 @@ SYM_FUNC_START(set_sev_encryption_mask) bts %rax, sme_me_mask(%rip) /* Create the encryption mask */ + /* + * Read MSR_AMD64_SEV again and store it to sev_status. Can't do this in + * get_sev_encryption_bit() because this function is 32-bit code and + * shared between 64-bit and 32-bit boot path. + */ + movl $MSR_AMD64_SEV, %ecx /* Read the SEV MSR */ + rdmsr + + /* Store MSR value in sev_status */ + shlq $32, %rdx + orq %rdx, %rax + movq %rax, sev_status(%rip) + .Lno_sev_mask: movq %rbp, %rsp /* Restore original stack pointer */ @@ -96,5 +112,7 @@ SYM_FUNC_END(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT .balign 8 -SYM_DATA(sme_me_mask, .quad 0) +SYM_DATA(sme_me_mask, .quad 0) +SYM_DATA(sev_status, .quad 0) +SYM_DATA(sev_check_data, .quad 0) #endif diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 6d31f1b4c4d1..d9a631c5973c 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -159,4 +159,6 @@ void boot_page_fault(void); void boot_stage1_vc(void); void boot_stage2_vc(void); +unsigned long sev_verify_cbit(unsigned long cr3); + #endif /* BOOT_COMPRESSED_MISC_H */ diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c index 40e0e322161d..284e73661a18 100644 --- a/arch/x86/hyperv/hv_apic.c +++ b/arch/x86/hyperv/hv_apic.c @@ -273,11 +273,15 @@ void __init hv_apic_init(void) pr_info("Hyper-V: Using enlightened APIC (%s mode)", x2apic_enabled() ? "x2apic" : "xapic"); /* - * With x2apic, architectural x2apic MSRs are equivalent to the - * respective synthetic MSRs, so there's no need to override - * the apic accessors. The only exception is - * hv_apic_eoi_write, because it benefits from lazy EOI when - * available, but it works for both xapic and x2apic modes. + * When in x2apic mode, don't use the Hyper-V specific APIC + * accessors since the field layout in the ICR register is + * different in x2apic mode. Furthermore, the architectural + * x2apic MSRs function just as well as the Hyper-V + * synthetic APIC MSRs, so there's no benefit in having + * separate Hyper-V accessors for x2apic mode. The only + * exception is hv_apic_eoi_write, because it benefits from + * lazy EOI when available, but the same accessor works for + * both xapic and x2apic because the field layout is the same. */ apic_set_eoi_write(hv_apic_eoi_write); if (!x2apic_enabled()) { diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 714233cee0b5..3115caa7d7d0 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -290,6 +290,9 @@ static void __init uv_stringify(int len, char *to, char *from) { /* Relies on 'to' being NULL chars so result will be NULL terminated */ strncpy(to, from, len-1); + + /* Trim trailing spaces */ + (void)strim(to); } /* Find UV arch type entry in UVsystab */ @@ -366,7 +369,7 @@ static int __init early_get_arch_type(void) return ret; } -static int __init uv_set_system_type(char *_oem_id) +static int __init uv_set_system_type(char *_oem_id, char *_oem_table_id) { /* Save OEM_ID passed from ACPI MADT */ uv_stringify(sizeof(oem_id), oem_id, _oem_id); @@ -386,13 +389,23 @@ static int __init uv_set_system_type(char *_oem_id) /* (Not hubless), not a UV */ return 0; + /* Is UV hubless system */ + uv_hubless_system = 0x01; + + /* UV5 Hubless */ + if (strncmp(uv_archtype, "NSGI5", 5) == 0) + uv_hubless_system |= 0x20; + /* UV4 Hubless: CH */ - if (strncmp(uv_archtype, "NSGI4", 5) == 0) - uv_hubless_system = 0x11; + else if (strncmp(uv_archtype, "NSGI4", 5) == 0) + uv_hubless_system |= 0x10; /* UV3 Hubless: UV300/MC990X w/o hub */ else - uv_hubless_system = 0x9; + uv_hubless_system |= 0x8; + + /* Copy APIC type */ + uv_stringify(sizeof(oem_table_id), oem_table_id, _oem_table_id); pr_info("UV: OEM IDs %s/%s, SystemType %d, HUBLESS ID %x\n", oem_id, oem_table_id, uv_system_type, uv_hubless_system); @@ -456,7 +469,7 @@ static int __init uv_acpi_madt_oem_check(char *_oem_id, char *_oem_table_id) uv_cpu_info->p_uv_hub_info = &uv_hub_info_node0; /* If not UV, return. */ - if (likely(uv_set_system_type(_oem_id) == 0)) + if (uv_set_system_type(_oem_id, _oem_table_id) == 0) return 0; /* Save and Decode OEM Table ID */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d3f0db463f96..581fb7223ad0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1254,6 +1254,14 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) return 0; } +static bool is_spec_ib_user_controlled(void) +{ + return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || + spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || + spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; +} + static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) { switch (ctrl) { @@ -1261,16 +1269,26 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return 0; + /* - * Indirect branch speculation is always disabled in strict - * mode. It can neither be enabled if it was force-disabled - * by a previous prctl call. + * With strict mode for both IBPB and STIBP, the instruction + * code paths avoid checking this task flag and instead, + * unconditionally run the instruction. However, STIBP and IBPB + * are independent and either can be set to conditionally + * enabled regardless of the mode of the other. + * + * If either is set to conditional, allow the task flag to be + * updated, unless it was force-disabled by a previous prctl + * call. Currently, this is possible on an AMD CPU which has the + * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the + * kernel is booted with 'spectre_v2_user=seccomp', then + * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and + * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. */ - if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED || + if (!is_spec_ib_user_controlled() || task_spec_ib_force_disable(task)) return -EPERM; + task_clear_spec_ib_disable(task); task_update_spec_tif(task); break; @@ -1283,10 +1301,10 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return -EPERM; - if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + + if (!is_spec_ib_user_controlled()) return 0; + task_set_spec_ib_disable(task); if (ctrl == PR_SPEC_FORCE_DISABLE) task_set_spec_ib_force_disable(task); @@ -1351,20 +1369,17 @@ static int ib_prctl_get(struct task_struct *task) if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) return PR_SPEC_ENABLE; - else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || - spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) - return PR_SPEC_DISABLE; - else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || - spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || - spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || - spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) { + else if (is_spec_ib_user_controlled()) { if (task_spec_ib_force_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; if (task_spec_ib_disable(task)) return PR_SPEC_PRCTL | PR_SPEC_DISABLE; return PR_SPEC_PRCTL | PR_SPEC_ENABLE; - } else + } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || + spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) + return PR_SPEC_DISABLE; + else return PR_SPEC_NOT_AFFECTED; } diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 7eb2a1c87969..3c417734790f 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -161,6 +161,21 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) /* Setup early boot stage 4-/5-level pagetables. */ addq phys_base(%rip), %rax + + /* + * For SEV guests: Verify that the C-bit is correct. A malicious + * hypervisor could lie about the C-bit position to perform a ROP + * attack on the guest by writing to the unencrypted stack and wait for + * the next RET instruction. + * %rsi carries pointer to realmode data and is callee-clobbered. Save + * and restore it. + */ + pushq %rsi + movq %rax, %rdi + call sev_verify_cbit + popq %rsi + + /* Switch to new page-table */ movq %rax, %cr3 /* Ensure I am executing from virtual addresses */ @@ -279,6 +294,7 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) SYM_CODE_END(secondary_startup_64) #include "verify_cpu.S" +#include "sev_verify_cbit.S" #ifdef CONFIG_HOTPLUG_CPU /* diff --git a/arch/x86/kernel/sev-es-shared.c b/arch/x86/kernel/sev-es-shared.c index 5f83ccaab877..7d04b356d44d 100644 --- a/arch/x86/kernel/sev-es-shared.c +++ b/arch/x86/kernel/sev-es-shared.c @@ -178,6 +178,32 @@ void __init do_vc_no_ghcb(struct pt_regs *regs, unsigned long exit_code) goto fail; regs->dx = val >> 32; + /* + * This is a VC handler and the #VC is only raised when SEV-ES is + * active, which means SEV must be active too. Do sanity checks on the + * CPUID results to make sure the hypervisor does not trick the kernel + * into the no-sev path. This could map sensitive data unencrypted and + * make it accessible to the hypervisor. + * + * In particular, check for: + * - Hypervisor CPUID bit + * - Availability of CPUID leaf 0x8000001f + * - SEV CPUID bit. + * + * The hypervisor might still report the wrong C-bit position, but this + * can't be checked here. + */ + + if ((fn == 1 && !(regs->cx & BIT(31)))) + /* Hypervisor bit */ + goto fail; + else if (fn == 0x80000000 && (regs->ax < 0x8000001f)) + /* SEV leaf check */ + goto fail; + else if ((fn == 0x8000001f && !(regs->ax & BIT(1)))) + /* SEV bit */ + goto fail; + /* Skip over the CPUID two-byte opcode */ regs->ip += 2; diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c index 4a96726fbaf8..0bd1a0fc587e 100644 --- a/arch/x86/kernel/sev-es.c +++ b/arch/x86/kernel/sev-es.c @@ -374,8 +374,8 @@ fault: return ES_EXCEPTION; } -static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, - unsigned long vaddr, phys_addr_t *paddr) +static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, + unsigned long vaddr, phys_addr_t *paddr) { unsigned long va = (unsigned long)vaddr; unsigned int level; @@ -394,15 +394,19 @@ static bool vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt, if (user_mode(ctxt->regs)) ctxt->fi.error_code |= X86_PF_USER; - return false; + return ES_EXCEPTION; } + if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC)) + /* Emulated MMIO to/from encrypted memory not supported */ + return ES_UNSUPPORTED; + pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; pa |= va & ~page_level_mask(level); *paddr = pa; - return true; + return ES_OK; } /* Include code shared with pre-decompression boot stage */ @@ -731,6 +735,7 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, { u64 exit_code, exit_info_1, exit_info_2; unsigned long ghcb_pa = __pa(ghcb); + enum es_result res; phys_addr_t paddr; void __user *ref; @@ -740,11 +745,12 @@ static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt, exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE; - if (!vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr)) { - if (!read) + res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr); + if (res != ES_OK) { + if (res == ES_EXCEPTION && !read) ctxt->fi.error_code |= X86_PF_WRITE; - return ES_EXCEPTION; + return res; } exit_info_1 = paddr; diff --git a/arch/x86/kernel/sev_verify_cbit.S b/arch/x86/kernel/sev_verify_cbit.S new file mode 100644 index 000000000000..ee04941a6546 --- /dev/null +++ b/arch/x86/kernel/sev_verify_cbit.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * sev_verify_cbit.S - Code for verification of the C-bit position reported + * by the Hypervisor when running with SEV enabled. + * + * Copyright (c) 2020 Joerg Roedel (jroedel@suse.de) + * + * sev_verify_cbit() is called before switching to a new long-mode page-table + * at boot. + * + * Verify that the C-bit position is correct by writing a random value to + * an encrypted memory location while on the current page-table. Then it + * switches to the new page-table to verify the memory content is still the + * same. After that it switches back to the current page-table and when the + * check succeeded it returns. If the check failed the code invalidates the + * stack pointer and goes into a hlt loop. The stack-pointer is invalidated to + * make sure no interrupt or exception can get the CPU out of the hlt loop. + * + * New page-table pointer is expected in %rdi (first parameter) + * + */ +SYM_FUNC_START(sev_verify_cbit) +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* First check if a C-bit was detected */ + movq sme_me_mask(%rip), %rsi + testq %rsi, %rsi + jz 3f + + /* sme_me_mask != 0 could mean SME or SEV - Check also for SEV */ + movq sev_status(%rip), %rsi + testq %rsi, %rsi + jz 3f + + /* Save CR4 in %rsi */ + movq %cr4, %rsi + + /* Disable Global Pages */ + movq %rsi, %rdx + andq $(~X86_CR4_PGE), %rdx + movq %rdx, %cr4 + + /* + * Verified that running under SEV - now get a random value using + * RDRAND. This instruction is mandatory when running as an SEV guest. + * + * Don't bail out of the loop if RDRAND returns errors. It is better to + * prevent forward progress than to work with a non-random value here. + */ +1: rdrand %rdx + jnc 1b + + /* Store value to memory and keep it in %rdx */ + movq %rdx, sev_check_data(%rip) + + /* Backup current %cr3 value to restore it later */ + movq %cr3, %rcx + + /* Switch to new %cr3 - This might unmap the stack */ + movq %rdi, %cr3 + + /* + * Compare value in %rdx with memory location. If C-bit is incorrect + * this would read the encrypted data and make the check fail. + */ + cmpq %rdx, sev_check_data(%rip) + + /* Restore old %cr3 */ + movq %rcx, %cr3 + + /* Restore previous CR4 */ + movq %rsi, %cr4 + + /* Check CMPQ result */ + je 3f + + /* + * The check failed, prevent any forward progress to prevent ROP + * attacks, invalidate the stack and go into a hlt loop. + */ + xorq %rsp, %rsp + subq $0x1000, %rsp +2: hlt + jmp 2b +3: +#endif + /* Return page-table pointer */ + movq %rdi, %rax + ret +SYM_FUNC_END(sev_verify_cbit) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 037faac46b0c..1e299ac73c86 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -16,8 +16,6 @@ * to a jmp to memcpy_erms which does the REP; MOVSB mem copy. */ -.weak memcpy - /* * memcpy - Copy a memory block. * @@ -30,7 +28,7 @@ * rax original destination */ SYM_FUNC_START_ALIAS(__memcpy) -SYM_FUNC_START_LOCAL(memcpy) +SYM_FUNC_START_WEAK(memcpy) ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \ "jmp memcpy_erms", X86_FEATURE_ERMS diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S index 7ff00ea64e4f..41902fe8b859 100644 --- a/arch/x86/lib/memmove_64.S +++ b/arch/x86/lib/memmove_64.S @@ -24,9 +24,7 @@ * Output: * rax: dest */ -.weak memmove - -SYM_FUNC_START_ALIAS(memmove) +SYM_FUNC_START_WEAK(memmove) SYM_FUNC_START(__memmove) mov %rdi, %rax diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S index 9ff15ee404a4..0bfd26e4ca9e 100644 --- a/arch/x86/lib/memset_64.S +++ b/arch/x86/lib/memset_64.S @@ -6,8 +6,6 @@ #include <asm/alternative-asm.h> #include <asm/export.h> -.weak memset - /* * ISO C memset - set a memory block to a byte value. This function uses fast * string to get better performance than the original function. The code is @@ -19,7 +17,7 @@ * * rax original destination */ -SYM_FUNC_START_ALIAS(memset) +SYM_FUNC_START_WEAK(memset) SYM_FUNC_START(__memset) /* * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index efbb3de472df..bc0833713be9 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -39,6 +39,7 @@ */ u64 sme_me_mask __section(".data") = 0; u64 sev_status __section(".data") = 0; +u64 sev_check_data __section(".data") = 0; EXPORT_SYMBOL(sme_me_mask); DEFINE_STATIC_KEY_FALSE(sev_enable_key); EXPORT_SYMBOL_GPL(sev_enable_key); diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index c6fc83efee0c..8731b7ad9308 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -89,8 +89,8 @@ static void __init free_highpages(void) /* set highmem page free */ for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &range_start, &range_end, NULL) { - unsigned long start = PHYS_PFN(range_start); - unsigned long end = PHYS_PFN(range_end); + unsigned long start = PFN_UP(range_start); + unsigned long end = PFN_DOWN(range_end); /* Ignore complete lowmem entries */ if (end <= max_low) diff --git a/drivers/base/core.c b/drivers/base/core.c index 78114ddac755..d661ada1518f 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -773,8 +773,7 @@ static void __device_link_del(struct kref *kref) dev_dbg(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_drop_link(link->consumer); + pm_runtime_drop_link(link); list_del_rcu(&link->s_node); list_del_rcu(&link->c_node); @@ -788,8 +787,7 @@ static void __device_link_del(struct kref *kref) dev_info(link->consumer, "Dropping the link to %s\n", dev_name(link->supplier)); - if (link->flags & DL_FLAG_PM_RUNTIME) - pm_runtime_drop_link(link->consumer); + pm_runtime_drop_link(link); list_del(&link->s_node); list_del(&link->c_node); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index b42229b74fd6..148e81969e04 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -1117,6 +1117,8 @@ static void __device_release_driver(struct device *dev, struct device *parent) drv = dev->driver; if (drv) { + pm_runtime_get_sync(dev); + while (device_links_busy(dev)) { __device_driver_unlock(dev, parent); @@ -1128,13 +1130,12 @@ static void __device_release_driver(struct device *dev, struct device *parent) * have released the driver successfully while this one * was waiting, so check for that. */ - if (dev->driver != drv) + if (dev->driver != drv) { + pm_runtime_put(dev); return; + } } - pm_runtime_get_sync(dev); - pm_runtime_clean_up_links(dev); - driver_sysfs_remove(dev); if (dev->bus) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 6f605f7820bb..bfda153b1a41 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1643,42 +1643,6 @@ void pm_runtime_remove(struct device *dev) } /** - * pm_runtime_clean_up_links - Prepare links to consumers for driver removal. - * @dev: Device whose driver is going to be removed. - * - * Check links from this device to any consumers and if any of them have active - * runtime PM references to the device, drop the usage counter of the device - * (as many times as needed). - * - * Links with the DL_FLAG_MANAGED flag unset are ignored. - * - * Since the device is guaranteed to be runtime-active at the point this is - * called, nothing else needs to be done here. - * - * Moreover, this is called after device_links_busy() has returned 'false', so - * the status of each link is guaranteed to be DL_STATE_SUPPLIER_UNBIND and - * therefore rpm_active can't be manipulated concurrently. - */ -void pm_runtime_clean_up_links(struct device *dev) -{ - struct device_link *link; - int idx; - - idx = device_links_read_lock(); - - list_for_each_entry_rcu(link, &dev->links.consumers, s_node, - device_links_read_lock_held()) { - if (!(link->flags & DL_FLAG_MANAGED)) - continue; - - while (refcount_dec_not_one(&link->rpm_active)) - pm_runtime_put_noidle(dev); - } - - device_links_read_unlock(idx); -} - -/** * pm_runtime_get_suppliers - Resume and reference-count supplier devices. * @dev: Consumer device. */ @@ -1729,7 +1693,7 @@ void pm_runtime_new_link(struct device *dev) spin_unlock_irq(&dev->power.lock); } -void pm_runtime_drop_link(struct device *dev) +static void pm_runtime_drop_link_count(struct device *dev) { spin_lock_irq(&dev->power.lock); WARN_ON(dev->power.links_count == 0); @@ -1737,6 +1701,25 @@ void pm_runtime_drop_link(struct device *dev) spin_unlock_irq(&dev->power.lock); } +/** + * pm_runtime_drop_link - Prepare for device link removal. + * @link: Device link going away. + * + * Drop the link count of the consumer end of @link and decrement the supplier + * device's runtime PM usage counter as many times as needed to drop all of the + * PM runtime reference to it from the consumer. + */ +void pm_runtime_drop_link(struct device_link *link) +{ + if (!(link->flags & DL_FLAG_PM_RUNTIME)) + return; + + pm_runtime_drop_link_count(link->consumer); + + while (refcount_dec_not_one(&link->rpm_active)) + pm_runtime_put(link->supplier); +} + static bool pm_runtime_need_not_resume(struct device *dev) { return atomic_read(&dev->power.usage_count) <= 1 && diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index cfd00ad40355..c24d9b5ad81a 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -47,7 +47,7 @@ struct nullb_device { unsigned int nr_zones_closed; struct blk_zone *zones; sector_t zone_size_sects; - spinlock_t zone_dev_lock; + spinlock_t zone_lock; unsigned long *zone_locks; unsigned long size; /* device size in MB */ diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 8775acbb4f8f..beb34b4f76b0 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -46,11 +46,20 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) if (!dev->zones) return -ENOMEM; - spin_lock_init(&dev->zone_dev_lock); - dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); - if (!dev->zone_locks) { - kvfree(dev->zones); - return -ENOMEM; + /* + * With memory backing, the zone_lock spinlock needs to be temporarily + * released to avoid scheduling in atomic context. To guarantee zone + * information protection, use a bitmap to lock zones with + * wait_on_bit_lock_io(). Sleeping on the lock is OK as memory backing + * implies that the queue is marked with BLK_MQ_F_BLOCKING. + */ + spin_lock_init(&dev->zone_lock); + if (dev->memory_backed) { + dev->zone_locks = bitmap_zalloc(dev->nr_zones, GFP_KERNEL); + if (!dev->zone_locks) { + kvfree(dev->zones); + return -ENOMEM; + } } if (dev->zone_nr_conv >= dev->nr_zones) { @@ -137,12 +146,17 @@ void null_free_zoned_dev(struct nullb_device *dev) static inline void null_lock_zone(struct nullb_device *dev, unsigned int zno) { - wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + if (dev->memory_backed) + wait_on_bit_lock_io(dev->zone_locks, zno, TASK_UNINTERRUPTIBLE); + spin_lock_irq(&dev->zone_lock); } static inline void null_unlock_zone(struct nullb_device *dev, unsigned int zno) { - clear_and_wake_up_bit(zno, dev->zone_locks); + spin_unlock_irq(&dev->zone_lock); + + if (dev->memory_backed) + clear_and_wake_up_bit(zno, dev->zone_locks); } int null_report_zones(struct gendisk *disk, sector_t sector, @@ -322,7 +336,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); null_lock_zone(dev, zno); - spin_lock(&dev->zone_dev_lock); switch (zone->cond) { case BLK_ZONE_COND_FULL: @@ -375,9 +388,17 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, if (zone->cond != BLK_ZONE_COND_EXP_OPEN) zone->cond = BLK_ZONE_COND_IMP_OPEN; - spin_unlock(&dev->zone_dev_lock); + /* + * Memory backing allocation may sleep: release the zone_lock spinlock + * to avoid scheduling in atomic context. Zone operation atomicity is + * still guaranteed through the zone_locks bitmap. + */ + if (dev->memory_backed) + spin_unlock_irq(&dev->zone_lock); ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); - spin_lock(&dev->zone_dev_lock); + if (dev->memory_backed) + spin_lock_irq(&dev->zone_lock); + if (ret != BLK_STS_OK) goto unlock; @@ -392,7 +413,6 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ret = BLK_STS_OK; unlock: - spin_unlock(&dev->zone_dev_lock); null_unlock_zone(dev, zno); return ret; @@ -516,9 +536,7 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, null_lock_zone(dev, i); zone = &dev->zones[i]; if (zone->cond != BLK_ZONE_COND_EMPTY) { - spin_lock(&dev->zone_dev_lock); null_reset_zone(dev, zone); - spin_unlock(&dev->zone_dev_lock); trace_nullb_zone_op(cmd, i, zone->cond); } null_unlock_zone(dev, i); @@ -530,7 +548,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, zone = &dev->zones[zone_no]; null_lock_zone(dev, zone_no); - spin_lock(&dev->zone_dev_lock); switch (op) { case REQ_OP_ZONE_RESET: @@ -550,8 +567,6 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, break; } - spin_unlock(&dev->zone_dev_lock); - if (ret == BLK_STS_OK) trace_nullb_zone_op(cmd, zone_no, zone->cond); diff --git a/drivers/char/tpm/eventlog/efi.c b/drivers/char/tpm/eventlog/efi.c index 6bb023de17f1..35229e5143ca 100644 --- a/drivers/char/tpm/eventlog/efi.c +++ b/drivers/char/tpm/eventlog/efi.c @@ -41,6 +41,11 @@ int tpm_read_log_efi(struct tpm_chip *chip) log_size = log_tbl->size; memunmap(log_tbl); + if (!log_size) { + pr_warn("UEFI TPM log area empty\n"); + return -EIO; + } + log_tbl = memremap(efi.tpm_log, sizeof(*log_tbl) + log_size, MEMREMAP_WB); if (!log_tbl) { diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 0b214963539d..4ed6e660273a 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -27,6 +27,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/kernel.h> +#include <linux/dmi.h> #include "tpm.h" #include "tpm_tis_core.h" @@ -49,8 +50,8 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da return container_of(data, struct tpm_tis_tcg_phy, priv); } -static bool interrupts = true; -module_param(interrupts, bool, 0444); +static int interrupts = -1; +module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); static bool itpm; @@ -63,6 +64,28 @@ module_param(force, bool, 0444); MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry"); #endif +static int tpm_tis_disable_irq(const struct dmi_system_id *d) +{ + if (interrupts == -1) { + pr_notice("tpm_tis: %s detected: disabling interrupts.\n", d->ident); + interrupts = 0; + } + + return 0; +} + +static const struct dmi_system_id tpm_tis_dmi_table[] = { + { + .callback = tpm_tis_disable_irq, + .ident = "ThinkPad T490s", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T490s"), + }, + }, + {} +}; + #if defined(CONFIG_PNP) && defined(CONFIG_ACPI) static int has_hid(struct acpi_device *dev, const char *hid) { @@ -192,6 +215,8 @@ static int tpm_tis_init(struct device *dev, struct tpm_info *tpm_info) int irq = -1; int rc; + dmi_check_system(tpm_tis_dmi_table); + rc = check_acpi_tpm2(dev); if (rc) return rc; diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c index fa2f1b4fad7b..a94bf28f858a 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-hash.c @@ -7,7 +7,7 @@ * * This file add support for MD5 and SHA1/SHA224/SHA256/SHA384/SHA512. * - * You could find the datasheet in Documentation/arm/sunxi/README + * You could find the datasheet in Documentation/arm/sunxi.rst */ #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c index 78503006949c..cfde9ee4356b 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-prng.c @@ -7,7 +7,7 @@ * * This file handle the PRNG * - * You could find a link for the datasheet in Documentation/arm/sunxi/README + * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ #include "sun8i-ce.h" #include <linux/dma-mapping.h> diff --git a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c index 654328160d19..5b7af4498bd5 100644 --- a/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c +++ b/drivers/crypto/allwinner/sun8i-ce/sun8i-ce-trng.c @@ -7,7 +7,7 @@ * * This file handle the TRNG * - * You could find a link for the datasheet in Documentation/arm/sunxi/README + * You could find a link for the datasheet in Documentation/arm/sunxi.rst */ #include "sun8i-ce.h" #include <linux/dma-mapping.h> diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h index 33128ebbeb9f..24346f1d7dd0 100644 --- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h +++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h @@ -217,6 +217,10 @@ enum { #ifndef ASICREV_IS_VANGOGH #define ASICREV_IS_VANGOGH(eChipRev) ((eChipRev >= VANGOGH_A0) && (eChipRev < VANGOGH_UNKNOWN)) #endif +#define GREEN_SARDINE_A0 0xA1 +#ifndef ASICREV_IS_GREEN_SARDINE +#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF)) +#endif /* * ASIC chip ID diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 7c90a63c273d..fcce6909f201 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -509,21 +509,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, return -ENOENT; /* - * Already in the desired write domain? Nothing for us to do! - * - * We apply a little bit of cunning here to catch a broader set of - * no-ops. If obj->write_domain is set, we must be in the same - * obj->read_domains, and only that domain. Therefore, if that - * obj->write_domain matches the request read_domains, we are - * already in the same read/write domain and can skip the operation, - * without having to further check the requested write_domain. - */ - if (READ_ONCE(obj->write_domain) == read_domains) { - err = 0; - goto out; - } - - /* * Try to flush the object off the GPU without holding the lock. * We will repeat the flush holding the lock in the normal manner * to catch cases where we are gazumped. @@ -560,6 +545,19 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, if (err) goto out; + /* + * Already in the desired write domain? Nothing for us to do! + * + * We apply a little bit of cunning here to catch a broader set of + * no-ops. If obj->write_domain is set, we must be in the same + * obj->read_domains, and only that domain. Therefore, if that + * obj->write_domain matches the request read_domains, we are + * already in the same read/write domain and can skip the operation, + * without having to further check the requested write_domain. + */ + if (READ_ONCE(obj->write_domain) == read_domains) + goto out_unpin; + err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out_unpin; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 7c3a1012e702..760fefdfe392 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -245,22 +245,14 @@ static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u } static inline u32 * -__gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1) +__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) { - /* We're using qword write, offset should be aligned to 8 bytes. */ - GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - - /* w/a for post sync ops following a GPGPU operation we - * need a prior CS_STALL, which is emitted by the flush - * following the batch. - */ *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; - *cs++ = flags1 | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB; - *cs++ = gtt_offset; + *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; + *cs++ = offset; *cs++ = 0; *cs++ = value; - /* We're thrashing one dword of HWS. */ - *cs++ = 0; + *cs++ = 0; /* We're thrashing one extra dword. */ return cs; } @@ -268,13 +260,38 @@ __gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 f static inline u32* gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) { - return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, 0, flags); + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_write_rcs(cs, + value, + gtt_offset, + 0, + flags | PIPE_CONTROL_GLOBAL_GTT_IVB); } static inline u32* gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1) { - return __gen8_emit_ggtt_write_rcs(cs, value, gtt_offset, flags0, flags1); + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_write_rcs(cs, + value, + gtt_offset, + flags0, + flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB); +} + +static inline u32 * +__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + *cs++ = (MI_FLUSH_DW + 1) | flags; + *cs++ = gtt_offset; + *cs++ = 0; + *cs++ = value; + + return cs; } static inline u32 * @@ -285,12 +302,10 @@ gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags; - *cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT; - *cs++ = 0; - *cs++ = value; - - return cs; + return __gen8_emit_flush_dw(cs, + value, + gtt_offset | MI_FLUSH_DW_USE_GTT, + flags | MI_FLUSH_DW_OP_STOREDW); } static inline void __intel_engine_reset(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index a32aabce7901..f82c6dd1de18 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3547,6 +3547,19 @@ static const struct intel_context_ops execlists_context_ops = { .destroy = execlists_context_destroy, }; +static u32 hwsp_offset(const struct i915_request *rq) +{ + const struct intel_timeline_cacheline *cl; + + /* Before the request is executed, the timeline/cachline is fixed */ + + cl = rcu_dereference_protected(rq->hwsp_cacheline, 1); + if (cl) + return cl->ggtt_offset; + + return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset; +} + static int gen8_emit_init_breadcrumb(struct i915_request *rq) { u32 *cs; @@ -3569,7 +3582,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) *cs++ = MI_NOOP; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_request_timeline(rq)->hwsp_offset; + *cs++ = hwsp_offset(rq); *cs++ = 0; *cs++ = rq->fence.seqno - 1; @@ -4886,11 +4899,9 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) return gen8_emit_wa_tail(request, cs); } -static u32 *emit_xcs_breadcrumb(struct i915_request *request, u32 *cs) +static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) { - u32 addr = i915_request_active_timeline(request)->hwsp_offset; - - return gen8_emit_ggtt_write(cs, request->fence.seqno, addr, 0); + return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); } static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) @@ -4909,7 +4920,7 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - i915_request_active_timeline(request)->hwsp_offset, + hwsp_offset(request), PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL); @@ -4921,7 +4932,7 @@ gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - i915_request_active_timeline(request)->hwsp_offset, + hwsp_offset(request), PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | @@ -4983,7 +4994,9 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) { - return gen12_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); + /* XXX Stalling flush before seqno write; post-sync not */ + cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0)); + return gen12_emit_fini_breadcrumb_tail(rq, cs); } static u32 * @@ -4991,7 +5004,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen12_emit_ggtt_write_rcs(cs, request->fence.seqno, - i915_request_active_timeline(request)->hwsp_offset, + hwsp_offset(request), PIPE_CONTROL0_HDC_PIPELINE_FLUSH, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index a2f74cefe4c3..7ea94d201fe6 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -188,10 +188,14 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) return cl; } -static void cacheline_acquire(struct intel_timeline_cacheline *cl) +static void cacheline_acquire(struct intel_timeline_cacheline *cl, + u32 ggtt_offset) { - if (cl) - i915_active_acquire(&cl->active); + if (!cl) + return; + + cl->ggtt_offset = ggtt_offset; + i915_active_acquire(&cl->active); } static void cacheline_release(struct intel_timeline_cacheline *cl) @@ -340,7 +344,7 @@ int intel_timeline_pin(struct intel_timeline *tl, struct i915_gem_ww_ctx *ww) GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", tl->fence_context, tl->hwsp_offset); - cacheline_acquire(tl->hwsp_cacheline); + cacheline_acquire(tl->hwsp_cacheline, tl->hwsp_offset); if (atomic_fetch_inc(&tl->pin_count)) { cacheline_release(tl->hwsp_cacheline); __i915_vma_unpin(tl->hwsp_ggtt); @@ -515,7 +519,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, GT_TRACE(tl->gt, "timeline:%llx using HWSP offset:%x\n", tl->fence_context, tl->hwsp_offset); - cacheline_acquire(cl); + cacheline_acquire(cl, tl->hwsp_offset); tl->hwsp_cacheline = cl; *seqno = timeline_advance(tl); @@ -573,9 +577,7 @@ int intel_timeline_read_hwsp(struct i915_request *from, if (err) goto out; - *hwsp = i915_ggtt_offset(cl->hwsp->vma) + - ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES; - + *hwsp = cl->ggtt_offset; out: i915_active_release(&cl->active); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 02181c5020db..4474f487f589 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -94,6 +94,8 @@ struct intel_timeline_cacheline { struct intel_timeline_hwsp *hwsp; void *vaddr; + u32 ggtt_offset; + struct rcu_head rcu; }; diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c7cf15fe9ef6..ce93079cf933 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1489,7 +1489,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset); - if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { + if (value != 0 && + !intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n", offset, value); return -EINVAL; @@ -1650,6 +1651,34 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } +/** + * FixMe: + * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: + * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) + * Due to the missing flush of bb filled by VM vCPU, host GPU hangs on executing + * these MI_BATCH_BUFFER. + * Temporarily workaround this by setting SNOOP bit for PAT3 used by PPGTT + * PML4 PTE: PAT(0) PCD(1) PWT(1). + * The performance is still expected to be low, will need further improvement. + */ +static int bxt_ppat_low_write(struct intel_vgpu *vgpu, unsigned int offset, + void *p_data, unsigned int bytes) +{ + u64 pat = + GEN8_PPAT(0, CHV_PPAT_SNOOP) | + GEN8_PPAT(1, 0) | + GEN8_PPAT(2, 0) | + GEN8_PPAT(3, CHV_PPAT_SNOOP) | + GEN8_PPAT(4, CHV_PPAT_SNOOP) | + GEN8_PPAT(5, CHV_PPAT_SNOOP) | + GEN8_PPAT(6, CHV_PPAT_SNOOP) | + GEN8_PPAT(7, CHV_PPAT_SNOOP); + + vgpu_vreg(vgpu, offset) = lower_32_bits(pat); + + return 0; +} + static int guc_status_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) @@ -2812,7 +2841,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt) MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); - MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); + MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS & ~D_BXT); MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); MMIO_D(GAMTARBMODE, D_BDW_PLUS); @@ -3139,7 +3168,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) NULL, NULL); MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); - MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS); + MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); return 0; } @@ -3313,9 +3342,21 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) MMIO_D(GEN8_PUSHBUS_SHIFT, D_BXT); MMIO_D(GEN6_GFXPAUSE, D_BXT); MMIO_DFH(GEN8_L3SQCREG1, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(GEN8_L3CNTLREG, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DFH(_MMIO(0x20D8), D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(RENDER_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(GEN6_BSD_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(BLT_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); + MMIO_F(GEN8_RING_CS_GPR(VEBOX_RING_BASE, 0), 0x40, F_CMD_ACCESS, + 0, 0, D_BXT, NULL, NULL); MMIO_DFH(GEN9_CTX_PREEMPT_REG, D_BXT, F_CMD_ACCESS, NULL, NULL); + MMIO_DH(GEN8_PRIVATE_PAT_LO, D_BXT, NULL, bxt_ppat_low_write); + return 0; } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1570eb8aa978..aed2ef6466a2 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1277,7 +1277,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu) i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm)); for_each_engine(engine, vgpu->gvt->gt, id) - intel_context_unpin(s->shadow[id]); + intel_context_put(s->shadow[id]); kmem_cache_destroy(s->workloads); } @@ -1369,11 +1369,6 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) ce->ring = __intel_context_ring_size(ring_size); } - ret = intel_context_pin(ce); - intel_context_put(ce); - if (ret) - goto out_shadow_ctx; - s->shadow[i] = ce; } @@ -1405,7 +1400,6 @@ out_shadow_ctx: if (IS_ERR(s->shadow[i])) break; - intel_context_unpin(s->shadow[i]); intel_context_put(s->shadow[i]); } i915_vm_put(&ppgtt->vm); @@ -1479,6 +1473,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu_submission *s = &workload->vgpu->submission; + intel_context_unpin(s->shadow[workload->engine->id]); release_shadow_batch_buffer(workload); release_shadow_wa_ctx(&workload->wa_ctx); @@ -1724,6 +1719,12 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, return ERR_PTR(ret); } + ret = intel_context_pin(s->shadow[engine->id]); + if (ret) { + intel_vgpu_destroy_workload(workload); + return ERR_PTR(ret); + } + return workload; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index ffb5287e055a..caa9b041616b 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -314,8 +314,10 @@ static void __vma_release(struct dma_fence_work *work) { struct i915_vma_work *vw = container_of(work, typeof(*vw), base); - if (vw->pinned) + if (vw->pinned) { __i915_gem_object_unpin_pages(vw->pinned); + i915_gem_object_put(vw->pinned); + } i915_vm_free_pt_stash(vw->vm, &vw->stash); i915_vm_put(vw->vm); @@ -431,7 +433,7 @@ int i915_vma_bind(struct i915_vma *vma, if (vma->obj) { __i915_gem_object_pin_pages(vma->obj); - work->pinned = vma->obj; + work->pinned = i915_gem_object_get(vma->obj); } } else { vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags); diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index 71d84c7a5378..d07b39b8afd2 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -111,10 +111,6 @@ static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi) return 0; } -static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder) -{ -} - static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder) { struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder); @@ -140,7 +136,6 @@ static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder, static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { .enable = dw_hdmi_imx_encoder_enable, - .disable = dw_hdmi_imx_encoder_disable, .atomic_check = dw_hdmi_imx_atomic_check, }; @@ -219,15 +214,9 @@ static int dw_hdmi_imx_bind(struct device *dev, struct device *master, hdmi->dev = &pdev->dev; encoder = &hdmi->encoder; - encoder->possible_crtcs = drm_of_find_possible_crtcs(drm, dev->of_node); - /* - * If we failed to find the CRTC(s) which this encoder is - * supposed to be connected to, it's because the CRTC has - * not been registered yet. Defer probing, and hope that - * the required CRTC is added later. - */ - if (encoder->possible_crtcs == 0) - return -EPROBE_DEFER; + ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node); + if (ret) + return ret; ret = dw_hdmi_imx_parse_dt(hdmi); if (ret < 0) diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index 7d00c49fd5a5..9bf5ad6d18a2 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -20,6 +20,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> @@ -212,7 +213,9 @@ static int imx_drm_bind(struct device *dev) drm->mode_config.allow_fb_modifiers = true; drm->mode_config.normalize_zpos = true; - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + return ret; ret = drm_vblank_init(drm, MAX_CRTC); if (ret) @@ -251,7 +254,6 @@ err_poll_fini: drm_kms_helper_poll_fini(drm); component_unbind_all(drm->dev, drm); err_kms: - drm_mode_config_cleanup(drm); drm_dev_put(drm); return ret; @@ -267,11 +269,9 @@ static void imx_drm_unbind(struct device *dev) component_unbind_all(drm->dev, drm); - drm_mode_config_cleanup(drm); + drm_dev_put(drm); dev_set_drvdata(dev, NULL); - - drm_dev_put(drm); } static const struct component_master_ops imx_drm_ops = { diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index af757d1e21fe..41e2978cb1eb 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -62,7 +62,6 @@ struct imx_ldb_channel { struct i2c_adapter *ddc; int chno; void *edid; - int edid_len; struct drm_display_mode mode; int mode_valid; u32 bus_format; @@ -536,15 +535,14 @@ static int imx_ldb_panel_ddc(struct device *dev, } if (!channel->ddc) { + int edid_len; + /* if no DDC available, fallback to hardcoded EDID */ dev_dbg(dev, "no ddc available\n"); - edidp = of_get_property(child, "edid", - &channel->edid_len); + edidp = of_get_property(child, "edid", &edid_len); if (edidp) { - channel->edid = kmemdup(edidp, - channel->edid_len, - GFP_KERNEL); + channel->edid = kmemdup(edidp, edid_len, GFP_KERNEL); } else if (!channel->panel) { /* fallback to display-timings node */ ret = of_get_drm_display_mode(child, diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 813bb6156a68..2a8d2e32e7b4 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -13,7 +13,6 @@ #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> -#include <linux/spinlock.h> #include <linux/videodev2.h> #include <video/imx-ipu-v3.h> @@ -104,8 +103,6 @@ struct imx_tve { struct drm_connector connector; struct drm_encoder encoder; struct device *dev; - spinlock_t lock; /* register lock */ - bool enabled; int mode; int di_hsync_pin; int di_vsync_pin; @@ -129,30 +126,10 @@ static inline struct imx_tve *enc_to_tve(struct drm_encoder *e) return container_of(e, struct imx_tve, encoder); } -static void tve_lock(void *__tve) -__acquires(&tve->lock) -{ - struct imx_tve *tve = __tve; - - spin_lock(&tve->lock); -} - -static void tve_unlock(void *__tve) -__releases(&tve->lock) -{ - struct imx_tve *tve = __tve; - - spin_unlock(&tve->lock); -} - static void tve_enable(struct imx_tve *tve) { - if (!tve->enabled) { - tve->enabled = true; - clk_prepare_enable(tve->clk); - regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, - TVE_EN, TVE_EN); - } + clk_prepare_enable(tve->clk); + regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, TVE_EN); /* clear interrupt status register */ regmap_write(tve->regmap, TVE_STAT_REG, 0xffffffff); @@ -169,11 +146,8 @@ static void tve_enable(struct imx_tve *tve) static void tve_disable(struct imx_tve *tve) { - if (tve->enabled) { - tve->enabled = false; - regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0); - clk_disable_unprepare(tve->clk); - } + regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_EN, 0); + clk_disable_unprepare(tve->clk); } static int tve_setup_tvout(struct imx_tve *tve) @@ -500,8 +474,7 @@ static struct regmap_config tve_regmap_config = { .readable_reg = imx_tve_readable_reg, - .lock = tve_lock, - .unlock = tve_unlock, + .fast_io = true, .max_register = 0xdc, }; @@ -511,7 +484,7 @@ static const char * const imx_tve_modes[] = { [TVE_MODE_VGA] = "vga", }; -static const int of_get_tve_mode(struct device_node *np) +static int of_get_tve_mode(struct device_node *np) { const char *bm; int ret, i; @@ -544,7 +517,6 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) memset(tve, 0, sizeof(*tve)); tve->dev = dev; - spin_lock_init(&tve->lock); ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); if (ddc_node) { diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 8232f512b9ed..2eb8df4697df 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -28,7 +28,6 @@ struct imx_parallel_display { struct drm_bridge bridge; struct device *dev; void *edid; - int edid_len; u32 bus_format; u32 bus_flags; struct drm_display_mode mode; @@ -41,11 +40,6 @@ static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) return container_of(c, struct imx_parallel_display, connector); } -static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e) -{ - return container_of(e, struct imx_parallel_display, encoder); -} - static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { return container_of(b, struct imx_parallel_display, bridge); @@ -310,6 +304,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) struct device_node *np = dev->of_node; const u8 *edidp; struct imx_parallel_display *imxpd; + int edid_len; int ret; u32 bus_format = 0; const char *fmt; @@ -323,9 +318,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) if (ret && ret != -ENODEV) return ret; - edidp = of_get_property(np, "edid", &imxpd->edid_len); + edidp = of_get_property(np, "edid", &edid_len); if (edidp) - imxpd->edid = kmemdup(edidp, imxpd->edid_len, GFP_KERNEL); + imxpd->edid = devm_kmemdup(dev, edidp, edid_len, GFP_KERNEL); ret = of_property_read_string(np, "interface-pix-fmt", &fmt); if (!ret) { @@ -349,17 +344,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) return 0; } -static void imx_pd_unbind(struct device *dev, struct device *master, - void *data) -{ - struct imx_parallel_display *imxpd = dev_get_drvdata(dev); - - kfree(imxpd->edid); -} - static const struct component_ops imx_pd_ops = { .bind = imx_pd_bind, - .unbind = imx_pd_unbind, }; static int imx_pd_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 6e5dedacb777..689be734ed20 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -628,6 +628,7 @@ static int panfrost_probe(struct platform_device *pdev) err_out1: pm_runtime_disable(pfdev->dev); panfrost_device_fini(pfdev); + pm_runtime_set_suspended(pfdev->dev); err_out0: drm_dev_put(ddev); return err; @@ -642,9 +643,9 @@ static int panfrost_remove(struct platform_device *pdev) panfrost_gem_shrinker_cleanup(ddev); pm_runtime_get_sync(pfdev->dev); - panfrost_device_fini(pfdev); - pm_runtime_put_sync_suspend(pfdev->dev); pm_runtime_disable(pfdev->dev); + panfrost_device_fini(pfdev); + pm_runtime_set_suspended(pfdev->dev); drm_dev_put(ddev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index fb9f7334ce18..57a31dd0ffed 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -105,14 +105,12 @@ void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) kref_put(&mapping->refcount, panfrost_gem_mapping_release); } -void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo) { struct panfrost_gem_mapping *mapping; - mutex_lock(&bo->mappings.lock); list_for_each_entry(mapping, &bo->mappings.list, node) panfrost_gem_teardown_mapping(mapping); - mutex_unlock(&bo->mappings.lock); } int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index b3517ff9630c..8088d5fd8480 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -82,7 +82,7 @@ struct panfrost_gem_mapping * panfrost_gem_mapping_get(struct panfrost_gem_object *bo, struct panfrost_file_priv *priv); void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); -void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); +void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo); void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 288e46c40673..1b9f68d8e9aa 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -40,18 +40,26 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); struct panfrost_gem_object *bo = to_panfrost_bo(obj); + bool ret = false; if (atomic_read(&bo->gpu_usecount)) return false; - if (!mutex_trylock(&shmem->pages_lock)) + if (!mutex_trylock(&bo->mappings.lock)) return false; - panfrost_gem_teardown_mappings(bo); + if (!mutex_trylock(&shmem->pages_lock)) + goto unlock_mappings; + + panfrost_gem_teardown_mappings_locked(bo); drm_gem_shmem_purge_locked(obj); + ret = true; mutex_unlock(&shmem->pages_lock); - return true; + +unlock_mappings: + mutex_unlock(&bo->mappings.lock); + return ret; } static unsigned long diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index f432278173cd..c2dead3b0733 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -468,7 +468,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, } if (IS_ERR(cma_obj)) { - struct drm_printer p = drm_info_printer(vc4->dev->dev); + struct drm_printer p = drm_info_printer(vc4->base.dev); DRM_ERROR("Failed to allocate from CMA:\n"); vc4_bo_stats_print(&p, vc4); return ERR_PTR(-ENOMEM); @@ -609,7 +609,7 @@ static void vc4_bo_cache_time_work(struct work_struct *work) { struct vc4_dev *vc4 = container_of(work, struct vc4_dev, bo_cache.time_work); - struct drm_device *dev = vc4->dev; + struct drm_device *dev = &vc4->base; mutex_lock(&vc4->bo_lock); vc4_bo_cache_free_old(dev); @@ -1024,6 +1024,7 @@ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, return 0; } +static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused); int vc4_bo_cache_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -1052,10 +1053,10 @@ int vc4_bo_cache_init(struct drm_device *dev) INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work); timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0); - return 0; + return drmm_add_action_or_reset(dev, vc4_bo_cache_destroy, NULL); } -void vc4_bo_cache_destroy(struct drm_device *dev) +static void vc4_bo_cache_destroy(struct drm_device *dev, void *unused) { struct vc4_dev *vc4 = to_vc4_dev(dev); int i; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 8f10f609e4f8..2cd97a39c286 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -245,37 +245,37 @@ static int vc4_drm_bind(struct device *dev) dev->coherent_dma_mask = DMA_BIT_MASK(32); - vc4 = devm_kzalloc(dev, sizeof(*vc4), GFP_KERNEL); - if (!vc4) - return -ENOMEM; - /* If VC4 V3D is missing, don't advertise render nodes. */ node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL); if (!node || !of_device_is_available(node)) vc4_drm_driver.driver_features &= ~DRIVER_RENDER; of_node_put(node); - drm = drm_dev_alloc(&vc4_drm_driver, dev); - if (IS_ERR(drm)) - return PTR_ERR(drm); + vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base); + if (IS_ERR(vc4)) + return PTR_ERR(vc4); + + drm = &vc4->base; platform_set_drvdata(pdev, drm); - vc4->dev = drm; - drm->dev_private = vc4; INIT_LIST_HEAD(&vc4->debugfs_list); mutex_init(&vc4->bin_bo_lock); ret = vc4_bo_cache_init(drm); if (ret) - goto dev_put; + return ret; - drm_mode_config_init(drm); + ret = drmm_mode_config_init(drm); + if (ret) + return ret; - vc4_gem_init(drm); + ret = vc4_gem_init(drm); + if (ret) + return ret; ret = component_bind_all(dev, drm); if (ret) - goto gem_destroy; + return ret; ret = vc4_plane_create_additional_planes(drm); if (ret) @@ -300,30 +300,17 @@ static int vc4_drm_bind(struct device *dev) unbind_all: component_unbind_all(dev, drm); -gem_destroy: - vc4_gem_destroy(drm); - drm_mode_config_cleanup(drm); - vc4_bo_cache_destroy(drm); -dev_put: - drm_dev_put(drm); + return ret; } static void vc4_drm_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct vc4_dev *vc4 = to_vc4_dev(drm); drm_dev_unregister(drm); drm_atomic_helper_shutdown(drm); - - drm_mode_config_cleanup(drm); - - drm_atomic_private_obj_fini(&vc4->load_tracker); - drm_atomic_private_obj_fini(&vc4->ctm_manager); - - drm_dev_put(drm); } static const struct component_master_ops vc4_drm_ops = { diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 5d3d8ed0b775..40f1192fff02 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -14,6 +14,7 @@ #include <drm/drm_device.h> #include <drm/drm_encoder.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_mm.h> #include <drm/drm_modeset_lock.h> @@ -71,7 +72,7 @@ struct vc4_perfmon { }; struct vc4_dev { - struct drm_device *dev; + struct drm_device base; struct vc4_hvs *hvs; struct vc4_v3d *v3d; @@ -234,7 +235,7 @@ struct vc4_dev { static inline struct vc4_dev * to_vc4_dev(struct drm_device *dev) { - return (struct vc4_dev *)dev->dev_private; + return container_of(dev, struct vc4_dev, base); } struct vc4_bo { @@ -808,7 +809,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt); void *vc4_prime_vmap(struct drm_gem_object *obj); int vc4_bo_cache_init(struct drm_device *dev); -void vc4_bo_cache_destroy(struct drm_device *dev); int vc4_bo_inc_usecnt(struct vc4_bo *bo); void vc4_bo_dec_usecnt(struct vc4_bo *bo); void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo); @@ -873,8 +873,7 @@ extern struct platform_driver vc4_dsi_driver; extern const struct dma_fence_ops vc4_fence_ops; /* vc4_gem.c */ -void vc4_gem_init(struct drm_device *dev); -void vc4_gem_destroy(struct drm_device *dev); +int vc4_gem_init(struct drm_device *dev); int vc4_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 9f01ddd5b932..b641252939d8 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -314,16 +314,16 @@ vc4_reset_work(struct work_struct *work) struct vc4_dev *vc4 = container_of(work, struct vc4_dev, hangcheck.reset_work); - vc4_save_hang_state(vc4->dev); + vc4_save_hang_state(&vc4->base); - vc4_reset(vc4->dev); + vc4_reset(&vc4->base); } static void vc4_hangcheck_elapsed(struct timer_list *t) { struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer); - struct drm_device *dev = vc4->dev; + struct drm_device *dev = &vc4->base; uint32_t ct0ca, ct1ca; unsigned long irqflags; struct vc4_exec_info *bin_exec, *render_exec; @@ -1000,7 +1000,7 @@ vc4_job_handle_completed(struct vc4_dev *vc4) list_del(&exec->head); spin_unlock_irqrestore(&vc4->job_lock, irqflags); - vc4_complete_exec(vc4->dev, exec); + vc4_complete_exec(&vc4->base, exec); spin_lock_irqsave(&vc4->job_lock, irqflags); } @@ -1258,13 +1258,13 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, return 0; fail: - vc4_complete_exec(vc4->dev, exec); + vc4_complete_exec(&vc4->base, exec); return ret; } -void -vc4_gem_init(struct drm_device *dev) +static void vc4_gem_destroy(struct drm_device *dev, void *unused); +int vc4_gem_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -1285,10 +1285,11 @@ vc4_gem_init(struct drm_device *dev) INIT_LIST_HEAD(&vc4->purgeable.list); mutex_init(&vc4->purgeable.lock); + + return drmm_add_action_or_reset(dev, vc4_gem_destroy, NULL); } -void -vc4_gem_destroy(struct drm_device *dev) +static void vc4_gem_destroy(struct drm_device *dev, void *unused) { struct vc4_dev *vc4 = to_vc4_dev(dev); diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 0bd5ea435120..cccd341e5d67 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -562,7 +562,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = drm->dev_private; + struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_hvs *hvs = NULL; int ret; u32 dispctrl; @@ -681,7 +681,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = drm->dev_private; + struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_hvs *hvs = vc4->hvs; if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter)) diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 149825ff5df8..2b951cae04ad 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -51,7 +51,7 @@ static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state, struct drm_private_obj *manager) { struct drm_device *dev = state->dev; - struct vc4_dev *vc4 = dev->dev_private; + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_private_state *priv_state; int ret; @@ -93,6 +93,29 @@ static const struct drm_private_state_funcs vc4_ctm_state_funcs = { .atomic_destroy_state = vc4_ctm_destroy_state, }; +static void vc4_ctm_obj_fini(struct drm_device *dev, void *unused) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + drm_atomic_private_obj_fini(&vc4->ctm_manager); +} + +static int vc4_ctm_obj_init(struct vc4_dev *vc4) +{ + struct vc4_ctm_state *ctm_state; + + drm_modeset_lock_init(&vc4->ctm_state_lock); + + ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); + if (!ctm_state) + return -ENOMEM; + + drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, + &vc4_ctm_state_funcs); + + return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL); +} + /* Converts a DRM S31.32 value to the HW S0.9 format. */ static u16 vc4_ctm_s31_32_to_s0_9(u64 in) { @@ -609,6 +632,34 @@ static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = { .atomic_destroy_state = vc4_load_tracker_destroy_state, }; +static void vc4_load_tracker_obj_fini(struct drm_device *dev, void *unused) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + if (!vc4->load_tracker_available) + return; + + drm_atomic_private_obj_fini(&vc4->load_tracker); +} + +static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) +{ + struct vc4_load_tracker_state *load_state; + + if (!vc4->load_tracker_available) + return 0; + + load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); + if (!load_state) + return -ENOMEM; + + drm_atomic_private_obj_init(&vc4->base, &vc4->load_tracker, + &load_state->base, + &vc4_load_tracker_state_funcs); + + return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL); +} + #define NUM_OUTPUTS 6 #define NUM_CHANNELS 3 @@ -711,8 +762,6 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = { int vc4_kms_load(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_ctm_state *ctm_state; - struct vc4_load_tracker_state *load_state; bool is_vc5 = of_device_is_compatible(dev->dev->of_node, "brcm,bcm2711-vc5"); int ret; @@ -751,26 +800,13 @@ int vc4_kms_load(struct drm_device *dev) dev->mode_config.async_page_flip = true; dev->mode_config.allow_fb_modifiers = true; - drm_modeset_lock_init(&vc4->ctm_state_lock); - - ctm_state = kzalloc(sizeof(*ctm_state), GFP_KERNEL); - if (!ctm_state) - return -ENOMEM; - - drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base, - &vc4_ctm_state_funcs); - - if (vc4->load_tracker_available) { - load_state = kzalloc(sizeof(*load_state), GFP_KERNEL); - if (!load_state) { - drm_atomic_private_obj_fini(&vc4->ctm_manager); - return -ENOMEM; - } + ret = vc4_ctm_obj_init(vc4); + if (ret) + return ret; - drm_atomic_private_obj_init(dev, &vc4->load_tracker, - &load_state->base, - &vc4_load_tracker_state_funcs); - } + ret = vc4_load_tracker_obj_init(vc4); + if (ret) + return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index f7ab979721b3..65d0dac69b0b 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -168,7 +168,7 @@ static void vc4_v3d_init_hw(struct drm_device *dev) int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) { - struct drm_device *dev = vc4->dev; + struct drm_device *dev = &vc4->base; unsigned long irqflags; int slot; uint64_t seqno = 0; @@ -246,7 +246,7 @@ static int bin_bo_alloc(struct vc4_dev *vc4) INIT_LIST_HEAD(&list); while (true) { - struct vc4_bo *bo = vc4_bo_create(vc4->dev, size, true, + struct vc4_bo *bo = vc4_bo_create(&vc4->base, size, true, VC4_BO_TYPE_BIN); if (IS_ERR(bo)) { @@ -361,7 +361,7 @@ static int vc4_v3d_runtime_suspend(struct device *dev) struct vc4_v3d *v3d = dev_get_drvdata(dev); struct vc4_dev *vc4 = v3d->vc4; - vc4_irq_uninstall(vc4->dev); + vc4_irq_uninstall(&vc4->base); clk_disable_unprepare(v3d->clk); @@ -378,11 +378,11 @@ static int vc4_v3d_runtime_resume(struct device *dev) if (ret != 0) return ret; - vc4_v3d_init_hw(vc4->dev); + vc4_v3d_init_hw(&vc4->base); /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */ - enable_irq(vc4->dev->irq); - vc4_irq_postinstall(vc4->dev); + enable_irq(vc4->base.irq); + vc4_irq_postinstall(&vc4->base); return 0; } diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index b3dae9ec1a38..d166ee262ce4 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c @@ -133,73 +133,6 @@ enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat) } EXPORT_SYMBOL_GPL(ipu_pixelformat_to_colorspace); -bool ipu_pixelformat_is_planar(u32 pixelformat) -{ - switch (pixelformat) { - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YVU420: - case V4L2_PIX_FMT_YUV422P: - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - return true; - } - - return false; -} -EXPORT_SYMBOL_GPL(ipu_pixelformat_is_planar); - -enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code) -{ - switch (mbus_code & 0xf000) { - case 0x1000: - return IPUV3_COLORSPACE_RGB; - case 0x2000: - return IPUV3_COLORSPACE_YUV; - default: - return IPUV3_COLORSPACE_UNKNOWN; - } -} -EXPORT_SYMBOL_GPL(ipu_mbus_code_to_colorspace); - -int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat) -{ - switch (pixelformat) { - case V4L2_PIX_FMT_YUV420: - case V4L2_PIX_FMT_YVU420: - case V4L2_PIX_FMT_YUV422P: - case V4L2_PIX_FMT_NV12: - case V4L2_PIX_FMT_NV21: - case V4L2_PIX_FMT_NV16: - case V4L2_PIX_FMT_NV61: - /* - * for the planar YUV formats, the stride passed to - * cpmem must be the stride in bytes of the Y plane. - * And all the planar YUV formats have an 8-bit - * Y component. - */ - return (8 * pixel_stride) >> 3; - case V4L2_PIX_FMT_RGB565: - case V4L2_PIX_FMT_YUYV: - case V4L2_PIX_FMT_UYVY: - return (16 * pixel_stride) >> 3; - case V4L2_PIX_FMT_BGR24: - case V4L2_PIX_FMT_RGB24: - return (24 * pixel_stride) >> 3; - case V4L2_PIX_FMT_BGR32: - case V4L2_PIX_FMT_RGB32: - case V4L2_PIX_FMT_XBGR32: - case V4L2_PIX_FMT_XRGB32: - return (32 * pixel_stride) >> 3; - default: - break; - } - - return -EINVAL; -} -EXPORT_SYMBOL_GPL(ipu_stride_to_bytes); - int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, bool hflip, bool vflip) { diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index b64d2efbefe7..eb56e09ae15f 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -1275,7 +1275,7 @@ static void balloon_up(struct work_struct *dummy) /* Refuse to balloon below the floor. */ if (avail_pages < num_pages || avail_pages - num_pages < floor) { - pr_warn("Balloon request will be partially fulfilled. %s\n", + pr_info("Balloon request will be partially fulfilled. %s\n", avail_pages < num_pages ? "Not enough memory." : "Balloon floor reached."); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index a4f473ef4e5c..a97a9d058198 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -733,7 +733,7 @@ config I2C_LPC2K config I2C_MLXBF tristate "Mellanox BlueField I2C controller" - depends on ARM64 + depends on MELLANOX_PLATFORM && ARM64 help Enabling this option will add I2C SMBus support for Mellanox BlueField system. diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c index 44974b53a626..0d15f4c1e9f7 100644 --- a/drivers/i2c/busses/i2c-designware-slave.c +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -159,7 +159,6 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) u32 raw_stat, stat, enabled, tmp; u8 val = 0, slave_activity; - regmap_read(dev->map, DW_IC_INTR_STAT, &stat); regmap_read(dev->map, DW_IC_ENABLE, &enabled); regmap_read(dev->map, DW_IC_RAW_INTR_STAT, &raw_stat); regmap_read(dev->map, DW_IC_STATUS, &tmp); @@ -168,32 +167,30 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY) || !dev->slave) return 0; + stat = i2c_dw_read_clear_intrbits_slave(dev); dev_dbg(dev->dev, "%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n", enabled, slave_activity, raw_stat, stat); - if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET)) - i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); + if (stat & DW_IC_INTR_RX_FULL) { + if (dev->status != STATUS_WRITE_IN_PROGRESS) { + dev->status = STATUS_WRITE_IN_PROGRESS; + i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, + &val); + } + + regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + val = tmp; + if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, + &val)) + dev_vdbg(dev->dev, "Byte %X acked!", val); + } if (stat & DW_IC_INTR_RD_REQ) { if (slave_activity) { - if (stat & DW_IC_INTR_RX_FULL) { - regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); - val = tmp; - - if (!i2c_slave_event(dev->slave, - I2C_SLAVE_WRITE_RECEIVED, - &val)) { - dev_vdbg(dev->dev, "Byte %X acked!", - val); - } - regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); - stat = i2c_dw_read_clear_intrbits_slave(dev); - } else { - regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); - regmap_read(dev->map, DW_IC_CLR_RX_UNDER, &tmp); - stat = i2c_dw_read_clear_intrbits_slave(dev); - } + regmap_read(dev->map, DW_IC_CLR_RD_REQ, &tmp); + + dev->status = STATUS_READ_IN_PROGRESS; if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_REQUESTED, &val)) @@ -205,21 +202,11 @@ static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, &val)) regmap_read(dev->map, DW_IC_CLR_RX_DONE, &tmp); - - i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); - stat = i2c_dw_read_clear_intrbits_slave(dev); - return 1; } - if (stat & DW_IC_INTR_RX_FULL) { - regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); - val = tmp; - if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, - &val)) - dev_vdbg(dev->dev, "Byte %X acked!", val); - } else { + if (stat & DW_IC_INTR_STOP_DET) { + dev->status = STATUS_IDLE; i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); - stat = i2c_dw_read_clear_intrbits_slave(dev); } return 1; @@ -230,7 +217,6 @@ static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) struct dw_i2c_dev *dev = dev_id; int ret; - i2c_dw_read_clear_intrbits_slave(dev); ret = i2c_dw_irq_handler_slave(dev); if (ret > 0) complete(&dev->cmd_complete); diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index ee59e0da082d..33574d40ea9c 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -62,10 +62,8 @@ * Master. Default value is set to 400MHz. */ #define MLXBF_I2C_TYU_PLL_OUT_FREQ (400 * 1000 * 1000) -/* Reference clock for Bluefield 1 - 156 MHz. */ -#define MLXBF_I2C_TYU_PLL_IN_FREQ (156 * 1000 * 1000) -/* Reference clock for BlueField 2 - 200 MHz. */ -#define MLXBF_I2C_YU_PLL_IN_FREQ (200 * 1000 * 1000) +/* Reference clock for Bluefield - 156 MHz. */ +#define MLXBF_I2C_PLL_IN_FREQ (156 * 1000 * 1000) /* Constant used to determine the PLL frequency. */ #define MLNXBF_I2C_COREPLL_CONST 16384 @@ -489,44 +487,6 @@ static struct mutex mlxbf_i2c_bus_lock; #define MLXBF_I2C_FREQUENCY_1GHZ 1000000000 -static void mlxbf_i2c_write(void __iomem *io, int reg, u32 val) -{ - writel(val, io + reg); -} - -static u32 mlxbf_i2c_read(void __iomem *io, int reg) -{ - return readl(io + reg); -} - -/* - * This function is used to read data from Master GW Data Descriptor. - * Data bytes in the Master GW Data Descriptor are shifted left so the - * data starts at the MSB of the descriptor registers as set by the - * underlying hardware. TYU_READ_DATA enables byte swapping while - * reading data bytes, and MUST be called by the SMBus read routines - * to copy data from the 32 * 32-bit HW Data registers a.k.a Master GW - * Data Descriptor. - */ -static u32 mlxbf_i2c_read_data(void __iomem *io, int reg) -{ - return (u32)be32_to_cpu(mlxbf_i2c_read(io, reg)); -} - -/* - * This function is used to write data to the Master GW Data Descriptor. - * Data copied to the Master GW Data Descriptor MUST be shifted left so - * the data starts at the MSB of the descriptor registers as required by - * the underlying hardware. TYU_WRITE_DATA enables byte swapping when - * writing data bytes, and MUST be called by the SMBus write routines to - * copy data to the 32 * 32-bit HW Data registers a.k.a Master GW Data - * Descriptor. - */ -static void mlxbf_i2c_write_data(void __iomem *io, int reg, u32 val) -{ - mlxbf_i2c_write(io, reg, (u32)cpu_to_be32(val)); -} - /* * Function to poll a set of bits at a specific address; it checks whether * the bits are equal to zero when eq_zero is set to 'true', and not equal @@ -541,7 +501,7 @@ static u32 mlxbf_smbus_poll(void __iomem *io, u32 addr, u32 mask, timeout = (timeout / MLXBF_I2C_POLL_FREQ_IN_USEC) + 1; do { - bits = mlxbf_i2c_read(io, addr) & mask; + bits = readl(io + addr) & mask; if (eq_zero ? bits == 0 : bits != 0) return eq_zero ? 1 : bits; udelay(MLXBF_I2C_POLL_FREQ_IN_USEC); @@ -609,16 +569,16 @@ static int mlxbf_i2c_smbus_check_status(struct mlxbf_i2c_priv *priv) MLXBF_I2C_SMBUS_TIMEOUT); /* Read cause status bits. */ - cause_status_bits = mlxbf_i2c_read(priv->mst_cause->io, - MLXBF_I2C_CAUSE_ARBITER); + cause_status_bits = readl(priv->mst_cause->io + + MLXBF_I2C_CAUSE_ARBITER); cause_status_bits &= MLXBF_I2C_CAUSE_MASTER_ARBITER_BITS_MASK; /* * Parse both Cause and Master GW bits, then return transaction status. */ - master_status_bits = mlxbf_i2c_read(priv->smbus->io, - MLXBF_I2C_SMBUS_MASTER_STATUS); + master_status_bits = readl(priv->smbus->io + + MLXBF_I2C_SMBUS_MASTER_STATUS); master_status_bits &= MLXBF_I2C_SMBUS_MASTER_STATUS_MASK; if (mlxbf_i2c_smbus_transaction_success(master_status_bits, @@ -649,10 +609,17 @@ static void mlxbf_i2c_smbus_write_data(struct mlxbf_i2c_priv *priv, aligned_length = round_up(length, 4); - /* Copy data bytes from 4-byte aligned source buffer. */ + /* + * Copy data bytes from 4-byte aligned source buffer. + * Data copied to the Master GW Data Descriptor MUST be shifted + * left so the data starts at the MSB of the descriptor registers + * as required by the underlying hardware. Enable byte swapping + * when writing data bytes to the 32 * 32-bit HW Data registers + * a.k.a Master GW Data Descriptor. + */ for (offset = 0; offset < aligned_length; offset += sizeof(u32)) { data32 = *((u32 *)(data + offset)); - mlxbf_i2c_write_data(priv->smbus->io, addr + offset, data32); + iowrite32be(data32, priv->smbus->io + addr + offset); } } @@ -664,15 +631,23 @@ static void mlxbf_i2c_smbus_read_data(struct mlxbf_i2c_priv *priv, mask = sizeof(u32) - 1; + /* + * Data bytes in the Master GW Data Descriptor are shifted left + * so the data starts at the MSB of the descriptor registers as + * set by the underlying hardware. Enable byte swapping while + * reading data bytes from the 32 * 32-bit HW Data registers + * a.k.a Master GW Data Descriptor. + */ + for (offset = 0; offset < (length & ~mask); offset += sizeof(u32)) { - data32 = mlxbf_i2c_read_data(priv->smbus->io, addr + offset); + data32 = ioread32be(priv->smbus->io + addr + offset); *((u32 *)(data + offset)) = data32; } if (!(length & mask)) return; - data32 = mlxbf_i2c_read_data(priv->smbus->io, addr + offset); + data32 = ioread32be(priv->smbus->io + addr + offset); for (byte = 0; byte < (length & mask); byte++) { data[offset + byte] = data32 & GENMASK(7, 0); @@ -698,16 +673,16 @@ static int mlxbf_i2c_smbus_enable(struct mlxbf_i2c_priv *priv, u8 slave, command |= rol32(pec_en, MLXBF_I2C_MASTER_SEND_PEC_SHIFT); /* Clear status bits. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_STATUS, 0x0); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_STATUS); /* Set the cause data. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_CAUSE_OR_CLEAR, ~0x0); + writel(~0x0, priv->smbus->io + MLXBF_I2C_CAUSE_OR_CLEAR); /* Zero PEC byte. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_PEC, 0x0); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_PEC); /* Zero byte count. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_RS_BYTES, 0x0); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_RS_BYTES); /* GW activation. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_GW, command); + writel(command, priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_GW); /* * Poll master status and check status bits. An ACK is sent when @@ -823,8 +798,8 @@ mlxbf_i2c_smbus_start_transaction(struct mlxbf_i2c_priv *priv, * needs to be 'manually' reset. This should be removed in * next tag integration. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_MASTER_FSM, - MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK); + writel(MLXBF_I2C_SMBUS_MASTER_FSM_PS_STATE_MASK, + priv->smbus->io + MLXBF_I2C_SMBUS_MASTER_FSM); } return ret; @@ -1113,8 +1088,8 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv, timer |= mlxbf_i2c_set_timer(priv, timings->scl_low, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH, - timer); + writel(timer, priv->smbus->io + + MLXBF_I2C_SMBUS_TIMER_SCL_LOW_SCL_HIGH); timer = mlxbf_i2c_set_timer(priv, timings->sda_rise, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_0); @@ -1124,37 +1099,34 @@ static void mlxbf_i2c_set_timings(struct mlxbf_i2c_priv *priv, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_16); timer |= mlxbf_i2c_set_timer(priv, timings->scl_fall, false, MLXBF_I2C_MASK_8, MLXBF_I2C_SHIFT_24); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE, - timer); + writel(timer, priv->smbus->io + + MLXBF_I2C_SMBUS_TIMER_FALL_RISE_SPIKE); timer = mlxbf_i2c_set_timer(priv, timings->hold_start, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->hold_data, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_THOLD, timer); + writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_THOLD); timer = mlxbf_i2c_set_timer(priv, timings->setup_start, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->setup_stop, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); - mlxbf_i2c_write(priv->smbus->io, - MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP, timer); + writel(timer, priv->smbus->io + + MLXBF_I2C_SMBUS_TIMER_TSETUP_START_STOP); timer = mlxbf_i2c_set_timer(priv, timings->setup_data, true, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA, - timer); + writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_TIMER_TSETUP_DATA); timer = mlxbf_i2c_set_timer(priv, timings->buf, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_0); timer |= mlxbf_i2c_set_timer(priv, timings->thigh_max, false, MLXBF_I2C_MASK_16, MLXBF_I2C_SHIFT_16); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_THIGH_MAX_TBUF, - timer); + writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_THIGH_MAX_TBUF); timer = timings->timeout; - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT, - timer); + writel(timer, priv->smbus->io + MLXBF_I2C_SMBUS_SCL_LOW_TIMEOUT); } enum mlxbf_i2c_timings_config { @@ -1426,19 +1398,15 @@ static int mlxbf_i2c_init_master(struct platform_device *pdev, * platform firmware; disabling the bus might compromise the system * functionality. */ - config_reg = mlxbf_i2c_read(gpio_res->io, - MLXBF_I2C_GPIO_0_FUNC_EN_0); + config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0); config_reg = MLXBF_I2C_GPIO_SMBUS_GW_ASSERT_PINS(priv->bus, config_reg); - mlxbf_i2c_write(gpio_res->io, MLXBF_I2C_GPIO_0_FUNC_EN_0, - config_reg); + writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FUNC_EN_0); - config_reg = mlxbf_i2c_read(gpio_res->io, - MLXBF_I2C_GPIO_0_FORCE_OE_EN); + config_reg = readl(gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN); config_reg = MLXBF_I2C_GPIO_SMBUS_GW_RESET_PINS(priv->bus, config_reg); - mlxbf_i2c_write(gpio_res->io, MLXBF_I2C_GPIO_0_FORCE_OE_EN, - config_reg); + writel(config_reg, gpio_res->io + MLXBF_I2C_GPIO_0_FORCE_OE_EN); mutex_unlock(gpio_res->lock); @@ -1452,10 +1420,9 @@ static u64 mlxbf_calculate_freq_from_tyu(struct mlxbf_i2c_resource *corepll_res) u32 corepll_val; u16 core_f; - pad_frequency = MLXBF_I2C_TYU_PLL_IN_FREQ; + pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_val = mlxbf_i2c_read(corepll_res->io, - MLXBF_I2C_CORE_PLL_REG1); + corepll_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); /* Get Core PLL configuration bits. */ core_f = rol32(corepll_val, MLXBF_I2C_COREPLL_CORE_F_TYU_SHIFT) & @@ -1488,12 +1455,10 @@ static u64 mlxbf_calculate_freq_from_yu(struct mlxbf_i2c_resource *corepll_res) u8 core_od, core_r; u32 core_f; - pad_frequency = MLXBF_I2C_YU_PLL_IN_FREQ; + pad_frequency = MLXBF_I2C_PLL_IN_FREQ; - corepll_reg1_val = mlxbf_i2c_read(corepll_res->io, - MLXBF_I2C_CORE_PLL_REG1); - corepll_reg2_val = mlxbf_i2c_read(corepll_res->io, - MLXBF_I2C_CORE_PLL_REG2); + corepll_reg1_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG1); + corepll_reg2_val = readl(corepll_res->io + MLXBF_I2C_CORE_PLL_REG2); /* Get Core PLL configuration bits */ core_f = rol32(corepll_reg1_val, MLXBF_I2C_COREPLL_CORE_F_YU_SHIFT) & @@ -1585,7 +1550,7 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr) * (7-bit address, 1 status bit (1 if enabled, 0 if not)). */ for (reg = 0; reg < reg_cnt; reg++) { - slave_reg = mlxbf_i2c_read(priv->smbus->io, + slave_reg = readl(priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4); /* * Each register holds 4 slave addresses. So, we have to keep @@ -1643,8 +1608,8 @@ static int mlxbf_slave_enable(struct mlxbf_i2c_priv *priv, u8 addr) /* Enable the slave address and update the register. */ slave_reg |= (1 << MLXBF_I2C_SMBUS_SLAVE_ADDR_EN_BIT) << (byte * 8); - mlxbf_i2c_write(priv->smbus->io, - MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4, slave_reg); + writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + + reg * 0x4); return 0; } @@ -1668,7 +1633,7 @@ static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv) * (7-bit address, 1 status bit (1 if enabled, 0 if not)). */ for (reg = 0; reg < reg_cnt; reg++) { - slave_reg = mlxbf_i2c_read(priv->smbus->io, + slave_reg = readl(priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4); /* Check whether the address slots are empty. */ @@ -1708,8 +1673,8 @@ static int mlxbf_slave_disable(struct mlxbf_i2c_priv *priv) /* Cleanup the slave address slot. */ slave_reg &= ~(GENMASK(7, 0) << (slave_byte * 8)); - mlxbf_i2c_write(priv->smbus->io, - MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + reg * 0x4, slave_reg); + writel(slave_reg, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_ADDR_CFG + + reg * 0x4); return 0; } @@ -1801,7 +1766,7 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev, int ret; /* Reset FSM. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_FSM, 0); + writel(0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_FSM); /* * Enable slave cause interrupt bits. Drive @@ -1810,15 +1775,13 @@ static int mlxbf_i2c_init_slave(struct platform_device *pdev, * masters issue a Read and Write, respectively. But, clear all * interrupts first. */ - mlxbf_i2c_write(priv->slv_cause->io, - MLXBF_I2C_CAUSE_OR_CLEAR, ~0); + writel(~0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); int_reg = MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE; int_reg |= MLXBF_I2C_CAUSE_WRITE_SUCCESS; - mlxbf_i2c_write(priv->slv_cause->io, - MLXBF_I2C_CAUSE_OR_EVTEN0, int_reg); + writel(int_reg, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_EVTEN0); /* Finally, set the 'ready' bit to start handling transactions. */ - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1); + writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY); /* Initialize the cause coalesce resource. */ ret = mlxbf_i2c_init_coalesce(pdev, priv); @@ -1844,23 +1807,21 @@ static bool mlxbf_i2c_has_coalesce(struct mlxbf_i2c_priv *priv, bool *read, MLXBF_I2C_CAUSE_YU_SLAVE_BIT : priv->bus + MLXBF_I2C_CAUSE_TYU_SLAVE_BIT; - coalesce0_reg = mlxbf_i2c_read(priv->coalesce->io, - MLXBF_I2C_CAUSE_COALESCE_0); + coalesce0_reg = readl(priv->coalesce->io + MLXBF_I2C_CAUSE_COALESCE_0); is_set = coalesce0_reg & (1 << slave_shift); if (!is_set) return false; /* Check the source of the interrupt, i.e. whether a Read or Write. */ - cause_reg = mlxbf_i2c_read(priv->slv_cause->io, - MLXBF_I2C_CAUSE_ARBITER); + cause_reg = readl(priv->slv_cause->io + MLXBF_I2C_CAUSE_ARBITER); if (cause_reg & MLXBF_I2C_CAUSE_READ_WAIT_FW_RESPONSE) *read = true; else if (cause_reg & MLXBF_I2C_CAUSE_WRITE_SUCCESS) *write = true; /* Clear cause bits. */ - mlxbf_i2c_write(priv->slv_cause->io, MLXBF_I2C_CAUSE_OR_CLEAR, ~0x0); + writel(~0x0, priv->slv_cause->io + MLXBF_I2C_CAUSE_OR_CLEAR); return true; } @@ -1900,8 +1861,8 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes) * address, if supplied. */ if (recv_bytes > 0) { - data32 = mlxbf_i2c_read_data(priv->smbus->io, - MLXBF_I2C_SLAVE_DATA_DESC_ADDR); + data32 = ioread32be(priv->smbus->io + + MLXBF_I2C_SLAVE_DATA_DESC_ADDR); /* Parse the received bytes. */ switch (recv_bytes) { @@ -1966,7 +1927,7 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes) control32 |= rol32(write_size, MLXBF_I2C_SLAVE_WRITE_BYTES_SHIFT); control32 |= rol32(pec_en, MLXBF_I2C_SLAVE_SEND_PEC_SHIFT); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_GW, control32); + writel(control32, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_GW); /* * Wait until the transfer is completed; the driver will wait @@ -1975,10 +1936,9 @@ static int mlxbf_smbus_irq_send(struct mlxbf_i2c_priv *priv, u8 recv_bytes) mlxbf_smbus_slave_wait_for_idle(priv, MLXBF_I2C_SMBUS_TIMEOUT); /* Release the Slave GW. */ - mlxbf_i2c_write(priv->smbus->io, - MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES, 0x0); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_PEC, 0x0); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC); + writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY); return 0; } @@ -2023,10 +1983,9 @@ static int mlxbf_smbus_irq_recv(struct mlxbf_i2c_priv *priv, u8 recv_bytes) i2c_slave_event(slave, I2C_SLAVE_STOP, &value); /* Release the Slave GW. */ - mlxbf_i2c_write(priv->smbus->io, - MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES, 0x0); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_PEC, 0x0); - mlxbf_i2c_write(priv->smbus->io, MLXBF_I2C_SMBUS_SLAVE_READY, 0x1); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); + writel(0x0, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_PEC); + writel(0x1, priv->smbus->io + MLXBF_I2C_SMBUS_SLAVE_READY); return ret; } @@ -2061,8 +2020,8 @@ static irqreturn_t mlxbf_smbus_irq(int irq, void *ptr) * slave, if the higher 8 bits are sent then the slave expect N bytes * from the master. */ - rw_bytes_reg = mlxbf_i2c_read(priv->smbus->io, - MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); + rw_bytes_reg = readl(priv->smbus->io + + MLXBF_I2C_SMBUS_SLAVE_RS_MASTER_BYTES); recv_bytes = (rw_bytes_reg >> 8) & GENMASK(7, 0); /* @@ -2264,6 +2223,7 @@ static const struct of_device_id mlxbf_i2c_dt_ids[] = { MODULE_DEVICE_TABLE(of, mlxbf_i2c_dt_ids); +#ifdef CONFIG_ACPI static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = { { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] }, { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] }, @@ -2305,6 +2265,12 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) return ret; } +#else +static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) +{ + return -ENOENT; +} +#endif /* CONFIG_ACPI */ static int mlxbf_i2c_of_probe(struct device *dev, struct mlxbf_i2c_priv *priv) { @@ -2473,7 +2439,9 @@ static struct platform_driver mlxbf_i2c_driver = { .driver = { .name = "i2c-mlxbf", .of_match_table = mlxbf_i2c_dt_ids, +#ifdef CONFIG_ACPI .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), +#endif /* CONFIG_ACPI */ }, }; @@ -2502,5 +2470,5 @@ static void __exit mlxbf_i2c_exit(void) module_exit(mlxbf_i2c_exit); MODULE_DESCRIPTION("Mellanox BlueField I2C bus driver"); -MODULE_AUTHOR("Khalil Blaiech <kblaiech@mellanox.com>"); +MODULE_AUTHOR("Khalil Blaiech <kblaiech@nvidia.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 0cbdfbe605b5..33de99b7bc20 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -475,6 +475,10 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) { u16 control_reg; + writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); + udelay(50); + writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); + mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET); /* Set ioconfig */ @@ -529,10 +533,6 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c) mtk_i2c_writew(i2c, control_reg, OFFSET_CONTROL); mtk_i2c_writew(i2c, I2C_DELAY_LEN, OFFSET_DELAY_LEN); - - writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST); - udelay(50); - writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST); } static const struct i2c_spec_values *mtk_i2c_get_spec(unsigned int speed) diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index cab725559999..bdd60770779a 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -129,6 +129,7 @@ struct sh_mobile_i2c_data { int sr; bool send_stop; bool stop_after_dma; + bool atomic_xfer; struct resource *res; struct dma_chan *dma_tx; @@ -330,13 +331,15 @@ static unsigned char i2c_op(struct sh_mobile_i2c_data *pd, enum sh_mobile_i2c_op ret = iic_rd(pd, ICDR); break; case OP_RX_STOP: /* enable DTE interrupt, issue stop */ - iic_wr(pd, ICIC, - ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + if (!pd->atomic_xfer) + iic_wr(pd, ICIC, + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; case OP_RX_STOP_DATA: /* enable DTE interrupt, read data, issue stop */ - iic_wr(pd, ICIC, - ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); + if (!pd->atomic_xfer) + iic_wr(pd, ICIC, + ICIC_DTEE | ICIC_WAITE | ICIC_ALE | ICIC_TACKE); ret = iic_rd(pd, ICDR); iic_wr(pd, ICCR, ICCR_ICE | ICCR_RACK); break; @@ -429,7 +432,8 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id) if (wakeup) { pd->sr |= SW_DONE; - wake_up(&pd->wait); + if (!pd->atomic_xfer) + wake_up(&pd->wait); } /* defeat write posting to avoid spurious WAIT interrupts */ @@ -581,6 +585,9 @@ static void start_ch(struct sh_mobile_i2c_data *pd, struct i2c_msg *usr_msg, pd->pos = -1; pd->sr = 0; + if (pd->atomic_xfer) + return; + pd->dma_buf = i2c_get_dma_safe_msg_buf(pd->msg, 8); if (pd->dma_buf) sh_mobile_i2c_xfer_dma(pd); @@ -637,15 +644,13 @@ static int poll_busy(struct sh_mobile_i2c_data *pd) return i ? 0 : -ETIMEDOUT; } -static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, - struct i2c_msg *msgs, - int num) +static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd, + struct i2c_msg *msgs, int num) { - struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); struct i2c_msg *msg; int err = 0; int i; - long timeout; + long time_left; /* Wake up device and enable clock */ pm_runtime_get_sync(pd->dev); @@ -662,15 +667,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, if (do_start) i2c_op(pd, OP_START); - /* The interrupt handler takes care of the rest... */ - timeout = wait_event_timeout(pd->wait, - pd->sr & (ICSR_TACK | SW_DONE), - adapter->timeout); - - /* 'stop_after_dma' tells if DMA transfer was complete */ - i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, pd->stop_after_dma); + if (pd->atomic_xfer) { + unsigned long j = jiffies + pd->adap.timeout; + + time_left = time_before_eq(jiffies, j); + while (time_left && + !(pd->sr & (ICSR_TACK | SW_DONE))) { + unsigned char sr = iic_rd(pd, ICSR); + + if (sr & (ICSR_AL | ICSR_TACK | + ICSR_WAIT | ICSR_DTE)) { + sh_mobile_i2c_isr(0, pd); + udelay(150); + } else { + cpu_relax(); + } + time_left = time_before_eq(jiffies, j); + } + } else { + /* The interrupt handler takes care of the rest... */ + time_left = wait_event_timeout(pd->wait, + pd->sr & (ICSR_TACK | SW_DONE), + pd->adap.timeout); + + /* 'stop_after_dma' tells if DMA xfer was complete */ + i2c_put_dma_safe_msg_buf(pd->dma_buf, pd->msg, + pd->stop_after_dma); + } - if (!timeout) { + if (!time_left) { dev_err(pd->dev, "Transfer request timed out\n"); if (pd->dma_direction != DMA_NONE) sh_mobile_i2c_cleanup_dma(pd); @@ -696,14 +721,35 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, return err ?: num; } +static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); + + pd->atomic_xfer = false; + return sh_mobile_xfer(pd, msgs, num); +} + +static int sh_mobile_i2c_xfer_atomic(struct i2c_adapter *adapter, + struct i2c_msg *msgs, + int num) +{ + struct sh_mobile_i2c_data *pd = i2c_get_adapdata(adapter); + + pd->atomic_xfer = true; + return sh_mobile_xfer(pd, msgs, num); +} + static u32 sh_mobile_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } static const struct i2c_algorithm sh_mobile_i2c_algorithm = { - .functionality = sh_mobile_i2c_func, - .master_xfer = sh_mobile_i2c_xfer, + .functionality = sh_mobile_i2c_func, + .master_xfer = sh_mobile_i2c_xfer, + .master_xfer_atomic = sh_mobile_i2c_xfer_atomic, }; static const struct i2c_adapter_quirks sh_mobile_i2c_quirks = { diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h index f0e5ffba2d51..97ed8f952f6e 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.h @@ -176,7 +176,7 @@ struct pvrdma_port_attr { u8 subnet_timeout; u8 init_type_reply; u8 active_width; - u16 active_speed; + u8 active_speed; u8 phys_state; u8 reserved[2]; }; diff --git a/drivers/infiniband/sw/rdmavt/vt.c b/drivers/infiniband/sw/rdmavt/vt.c index 52218684ad4a..670a9623b46e 100644 --- a/drivers/infiniband/sw/rdmavt/vt.c +++ b/drivers/infiniband/sw/rdmavt/vt.c @@ -524,6 +524,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb) int rvt_register_device(struct rvt_dev_info *rdi) { int ret = 0, i; + u64 dma_mask; if (!rdi) return -EINVAL; @@ -580,8 +581,10 @@ int rvt_register_device(struct rvt_dev_info *rdi) /* DMA Operations */ rdi->ibdev.dev.dma_parms = rdi->ibdev.dev.parent->dma_parms; - dma_set_coherent_mask(&rdi->ibdev.dev, - rdi->ibdev.dev.parent->coherent_dma_mask); + dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + ret = dma_coerce_mask_and_coherent(&rdi->ibdev.dev, dma_mask); + if (ret) + goto bail_wss; /* Protection Domain */ spin_lock_init(&rdi->n_pds_lock); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 1fc022362fbe..f9c832e82552 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -1118,6 +1118,7 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) int err; struct ib_device *dev = &rxe->ib_dev; struct crypto_shash *tfm; + u64 dma_mask; strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc)); @@ -1130,7 +1131,10 @@ int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name) rxe->ndev->dev_addr); dev->dev.dma_parms = &rxe->dma_parms; dma_set_max_seg_size(&dev->dev, UINT_MAX); - dma_set_coherent_mask(&dev->dev, dma_get_required_mask(&dev->dev)); + dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + err = dma_coerce_mask_and_coherent(&dev->dev, dma_mask); + if (err) + return err; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index ca8bc7296867..181e06c1c43d 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -306,6 +306,7 @@ static struct siw_device *siw_device_create(struct net_device *netdev) struct siw_device *sdev = NULL; struct ib_device *base_dev; struct device *parent = netdev->dev.parent; + u64 dma_mask; int rv; if (!parent) { @@ -384,8 +385,10 @@ static struct siw_device *siw_device_create(struct net_device *netdev) base_dev->dev.parent = parent; base_dev->dev.dma_parms = &sdev->dma_parms; dma_set_max_seg_size(&base_dev->dev, UINT_MAX); - dma_set_coherent_mask(&base_dev->dev, - dma_get_required_mask(&base_dev->dev)); + dma_mask = IS_ENABLED(CONFIG_64BIT) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32); + if (dma_coerce_mask_and_coherent(&base_dev->dev, dma_mask)) + goto error; + base_dev->num_comp_vectors = num_possible_cpus(); xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 0065eb17ae36..53a8becac827 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -622,10 +622,11 @@ static int srpt_refresh_port(struct srpt_port *sport) /** * srpt_unregister_mad_agent - unregister MAD callback functions * @sdev: SRPT HCA pointer. + * @port_cnt: number of ports with registered MAD * * Note: It is safe to call this function more than once for the same device. */ -static void srpt_unregister_mad_agent(struct srpt_device *sdev) +static void srpt_unregister_mad_agent(struct srpt_device *sdev, int port_cnt) { struct ib_port_modify port_modify = { .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP, @@ -633,7 +634,7 @@ static void srpt_unregister_mad_agent(struct srpt_device *sdev) struct srpt_port *sport; int i; - for (i = 1; i <= sdev->device->phys_port_cnt; i++) { + for (i = 1; i <= port_cnt; i++) { sport = &sdev->port[i - 1]; WARN_ON(sport->port != i); if (sport->mad_agent) { @@ -3185,7 +3186,8 @@ static int srpt_add_one(struct ib_device *device) if (ret) { pr_err("MAD registration failed for %s-%d.\n", dev_name(&sdev->device->dev), i); - goto err_event; + i--; + goto err_port; } } @@ -3197,7 +3199,8 @@ static int srpt_add_one(struct ib_device *device) pr_debug("added %s.\n", dev_name(&device->dev)); return 0; -err_event: +err_port: + srpt_unregister_mad_agent(sdev, i); ib_unregister_event_handler(&sdev->event_handler); err_cm: if (sdev->cm_id) @@ -3221,7 +3224,7 @@ static void srpt_remove_one(struct ib_device *device, void *client_data) struct srpt_device *sdev = client_data; int i; - srpt_unregister_mad_agent(sdev); + srpt_unregister_mad_agent(sdev, sdev->device->phys_port_cnt); ib_unregister_event_handler(&sdev->event_handler); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index 41435a699b53..bdeb010efee6 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h @@ -256,6 +256,7 @@ enum rdma_ch_state { * @rdma_cm: See below. * @rdma_cm.cm_id: RDMA CM ID associated with the channel. * @cq: IB completion queue for this channel. + * @cq_size: Number of CQEs in @cq. * @zw_cqe: Zero-length write CQE. * @rcu: RCU head. * @kref: kref for this channel. diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index f696ac7c5f89..89647700bab2 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -409,7 +409,11 @@ extern bool amd_iommu_np_cache; /* Only true if all IOMMUs support device IOTLBs */ extern bool amd_iommu_iotlb_sup; -#define MAX_IRQS_PER_TABLE 256 +/* + * AMD IOMMU hardware only support 512 IRTEs despite + * the architectural limitation of 2048 entries. + */ +#define MAX_IRQS_PER_TABLE 512 #define IRQ_TABLE_ALIGNMENT 128 struct irq_remap_table { diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 8651f6d4dfa0..1b1ca63e6bbe 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2525,6 +2525,9 @@ struct dmar_domain *find_domain(struct device *dev) { struct device_domain_info *info; + if (unlikely(!dev || !dev->iommu)) + return NULL; + if (unlikely(attach_deferred(dev))) return NULL; diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index f1861fa3d0e4..3242ebd0bca3 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -279,6 +279,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct intel_svm_dev *sdev = NULL; struct dmar_domain *dmar_domain; + struct device_domain_info *info; struct intel_svm *svm = NULL; int ret = 0; @@ -310,6 +311,10 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, if (data->hpasid <= 0 || data->hpasid >= PASID_MAX) return -EINVAL; + info = get_domain_info(dev); + if (!info) + return -EINVAL; + dmar_domain = to_dmar_domain(domain); mutex_lock(&pasid_mutex); @@ -357,6 +362,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, goto out; } sdev->dev = dev; + sdev->sid = PCI_DEVID(info->bus, info->devfn); /* Only count users if device has aux domains */ if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) @@ -1029,7 +1035,7 @@ no_pasid: resp.qw0 = QI_PGRP_PASID(req->pasid) | QI_PGRP_DID(req->rid) | QI_PGRP_PASID_P(req->pasid_present) | - QI_PGRP_PDP(req->pasid_present) | + QI_PGRP_PDP(req->priv_data_present) | QI_PGRP_RESP_CODE(result) | QI_PGRP_RESP_TYPE; resp.qw1 = QI_PGRP_IDX(req->prg_index) | diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8c470f451a32..b53446bb8c6b 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2071,7 +2071,7 @@ EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate); static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data) { - u32 mask; + u64 mask; int i; if (data->version != IOMMU_GPASID_BIND_VERSION_1) diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index c6098eee0c7c..2aa79c32ee22 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -180,7 +180,6 @@ config IRQ_MIPS_CPU select GENERIC_IRQ_CHIP select GENERIC_IRQ_IPI if SYS_SUPPORTS_MULTITHREADING select IRQ_DOMAIN - select IRQ_DOMAIN_HIERARCHY if GENERIC_IRQ_IPI select GENERIC_IRQ_EFFECTIVE_AFF_MASK config CLPS711X_IRQCHIP @@ -315,7 +314,6 @@ config KEYSTONE_IRQ config MIPS_GIC bool select GENERIC_IRQ_IPI - select IRQ_DOMAIN_HIERARCHY select MIPS_CM config INGENIC_IRQ @@ -591,6 +589,7 @@ config LOONGSON_PCH_MSI config MST_IRQ bool "MStar Interrupt Controller" + depends on ARCH_MEDIATEK || ARCH_MSTARV7 || COMPILE_TEST default ARCH_MEDIATEK select IRQ_DOMAIN select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index 97838eb705f9..cbc7c740e4dc 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -244,7 +244,7 @@ static int bcm2836_cpu_dying(unsigned int cpu) #define BITS_PER_MBOX 32 -static void bcm2836_arm_irqchip_smp_init(void) +static void __init bcm2836_arm_irqchip_smp_init(void) { struct irq_fwspec ipi_fwspec = { .fwnode = intc.domain->fwnode, diff --git a/drivers/irqchip/irq-mst-intc.c b/drivers/irqchip/irq-mst-intc.c index 4be077591898..143657b0cf28 100644 --- a/drivers/irqchip/irq-mst-intc.c +++ b/drivers/irqchip/irq-mst-intc.c @@ -154,8 +154,8 @@ static const struct irq_domain_ops mst_intc_domain_ops = { .free = irq_domain_free_irqs_common, }; -int __init -mst_intc_of_init(struct device_node *dn, struct device_node *parent) +static int __init mst_intc_of_init(struct device_node *dn, + struct device_node *parent) { struct irq_domain *domain, *domain_parent; struct mst_intc_chip_data *cd; diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 3819185bfd02..cb7f60b3b4a9 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -71,8 +71,7 @@ struct intc_irqpin_priv { }; struct intc_irqpin_config { - unsigned int irlm_bit; - unsigned needs_irlm:1; + int irlm_bit; /* -1 if non-existent */ }; static unsigned long intc_irqpin_read32(void __iomem *iomem) @@ -349,11 +348,10 @@ static const struct irq_domain_ops intc_irqpin_irq_domain_ops = { static const struct intc_irqpin_config intc_irqpin_irlm_r8a777x = { .irlm_bit = 23, /* ICR0.IRLM0 */ - .needs_irlm = 1, }; static const struct intc_irqpin_config intc_irqpin_rmobile = { - .needs_irlm = 0, + .irlm_bit = -1, }; static const struct of_device_id intc_irqpin_dt_ids[] = { @@ -470,7 +468,7 @@ static int intc_irqpin_probe(struct platform_device *pdev) } /* configure "individual IRQ mode" where needed */ - if (config && config->needs_irlm) { + if (config && config->irlm_bit >= 0) { if (io[INTC_IRQPIN_REG_IRLM]) intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_IRLM, config->irlm_bit, 1, 1); diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index eaa3e9fe54e9..6f432d2a5ceb 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -99,7 +99,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask, struct irq_data *d, int enable) { int cpu; - struct plic_priv *priv = irq_get_chip_data(d->irq); + struct plic_priv *priv = irq_data_get_irq_chip_data(d); writel(enable, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); for_each_cpu(cpu, mask) { @@ -115,7 +115,7 @@ static void plic_irq_unmask(struct irq_data *d) { struct cpumask amask; unsigned int cpu; - struct plic_priv *priv = irq_get_chip_data(d->irq); + struct plic_priv *priv = irq_data_get_irq_chip_data(d); cpumask_and(&amask, &priv->lmask, cpu_online_mask); cpu = cpumask_any_and(irq_data_get_affinity_mask(d), @@ -127,7 +127,7 @@ static void plic_irq_unmask(struct irq_data *d) static void plic_irq_mask(struct irq_data *d) { - struct plic_priv *priv = irq_get_chip_data(d->irq); + struct plic_priv *priv = irq_data_get_irq_chip_data(d); plic_irq_toggle(&priv->lmask, d, 0); } @@ -138,7 +138,7 @@ static int plic_set_affinity(struct irq_data *d, { unsigned int cpu; struct cpumask amask; - struct plic_priv *priv = irq_get_chip_data(d->irq); + struct plic_priv *priv = irq_data_get_irq_chip_data(d); cpumask_and(&amask, &priv->lmask, mask_val); @@ -151,7 +151,7 @@ static int plic_set_affinity(struct irq_data *d, return -EINVAL; plic_irq_toggle(&priv->lmask, d, 0); - plic_irq_toggle(cpumask_of(cpu), d, 1); + plic_irq_toggle(cpumask_of(cpu), d, !irqd_irq_masked(d)); irq_data_update_effective_affinity(d, cpumask_of(cpu)); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 0c2c61db26b4..8662d7b7b262 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -195,6 +195,10 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = { { .exti = 25, .irq_parent = 107, .chip = &stm32_exti_h_chip_direct }, { .exti = 30, .irq_parent = 52, .chip = &stm32_exti_h_chip_direct }, { .exti = 47, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct }, + { .exti = 48, .irq_parent = 138, .chip = &stm32_exti_h_chip_direct }, + { .exti = 50, .irq_parent = 139, .chip = &stm32_exti_h_chip_direct }, + { .exti = 52, .irq_parent = 140, .chip = &stm32_exti_h_chip_direct }, + { .exti = 53, .irq_parent = 141, .chip = &stm32_exti_h_chip_direct }, { .exti = 54, .irq_parent = 135, .chip = &stm32_exti_h_chip_direct }, { .exti = 61, .irq_parent = 100, .chip = &stm32_exti_h_chip_direct }, { .exti = 65, .irq_parent = 144, .chip = &stm32_exti_h_chip }, diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c index e0cceb81c648..b2ab8db439d9 100644 --- a/drivers/irqchip/irq-ti-sci-inta.c +++ b/drivers/irqchip/irq-ti-sci-inta.c @@ -85,6 +85,17 @@ struct ti_sci_inta_vint_desc { * @base: Base address of the memory mapped IO registers * @pdev: Pointer to platform device. * @ti_sci_id: TI-SCI device identifier + * @unmapped_cnt: Number of @unmapped_dev_ids entries + * @unmapped_dev_ids: Pointer to an array of TI-SCI device identifiers of + * unmapped event sources. + * Unmapped Events are not part of the Global Event Map and + * they are converted to Global event within INTA to be + * received by the same INTA to generate an interrupt. + * In case an interrupt request comes for a device which is + * generating Unmapped Event, we must use the INTA's TI-SCI + * device identifier in place of the source device + * identifier to let sysfw know where it has to program the + * Global Event number. */ struct ti_sci_inta_irq_domain { const struct ti_sci_handle *sci; @@ -96,11 +107,37 @@ struct ti_sci_inta_irq_domain { void __iomem *base; struct platform_device *pdev; u32 ti_sci_id; + + int unmapped_cnt; + u16 *unmapped_dev_ids; }; #define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \ events[i]) +static u16 ti_sci_inta_get_dev_id(struct ti_sci_inta_irq_domain *inta, u32 hwirq) +{ + u16 dev_id = HWIRQ_TO_DEVID(hwirq); + int i; + + if (inta->unmapped_cnt == 0) + return dev_id; + + /* + * For devices sending Unmapped Events we must use the INTA's TI-SCI + * device identifier number to be able to convert it to a Global Event + * and map it to an interrupt. + */ + for (i = 0; i < inta->unmapped_cnt; i++) { + if (dev_id == inta->unmapped_dev_ids[i]) { + dev_id = inta->ti_sci_id; + break; + } + } + + return dev_id; +} + /** * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs * @desc: Pointer to irq_desc corresponding to the irq @@ -251,7 +288,7 @@ static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta u16 dev_id, dev_index; int err; - dev_id = HWIRQ_TO_DEVID(hwirq); + dev_id = ti_sci_inta_get_dev_id(inta, hwirq); dev_index = HWIRQ_TO_IRQID(hwirq); event_desc = &vint_desc->events[free_bit]; @@ -352,14 +389,15 @@ static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc, { struct ti_sci_inta_vint_desc *vint_desc; struct ti_sci_inta_irq_domain *inta; + u16 dev_id; vint_desc = to_vint_desc(event_desc, event_desc->vint_bit); inta = vint_desc->domain->host_data; + dev_id = ti_sci_inta_get_dev_id(inta, hwirq); /* free event irq */ mutex_lock(&inta->vint_mutex); inta->sci->ops.rm_irq_ops.free_event_map(inta->sci, - HWIRQ_TO_DEVID(hwirq), - HWIRQ_TO_IRQID(hwirq), + dev_id, HWIRQ_TO_IRQID(hwirq), inta->ti_sci_id, vint_desc->vint_id, event_desc->global_event, @@ -574,6 +612,41 @@ static struct msi_domain_info ti_sci_inta_msi_domain_info = { .chip = &ti_sci_inta_msi_irq_chip, }; +static int ti_sci_inta_get_unmapped_sources(struct ti_sci_inta_irq_domain *inta) +{ + struct device *dev = &inta->pdev->dev; + struct device_node *node = dev_of_node(dev); + struct of_phandle_iterator it; + int count, err, ret, i; + + count = of_count_phandle_with_args(node, "ti,unmapped-event-sources", NULL); + if (count <= 0) + return 0; + + inta->unmapped_dev_ids = devm_kcalloc(dev, count, + sizeof(*inta->unmapped_dev_ids), + GFP_KERNEL); + if (!inta->unmapped_dev_ids) + return -ENOMEM; + + i = 0; + of_for_each_phandle(&it, err, node, "ti,unmapped-event-sources", NULL, 0) { + u32 dev_id; + + ret = of_property_read_u32(it.node, "ti,sci-dev-id", &dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure for %pOFf\n", it.node); + of_node_put(it.node); + return ret; + } + inta->unmapped_dev_ids[i++] = dev_id; + } + + inta->unmapped_cnt = count; + + return 0; +} + static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) { struct irq_domain *parent_domain, *domain, *msi_domain; @@ -629,6 +702,10 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev) if (IS_ERR(inta->base)) return PTR_ERR(inta->base); + ret = ti_sci_inta_get_unmapped_sources(inta); + if (ret) + return ret; + domain = irq_domain_add_linear(dev_of_node(dev), ti_sci_get_num_resources(inta->vint), &ti_sci_inta_irq_domain_ops, inta); diff --git a/drivers/mtd/nand/raw/fsl_ifc_nand.c b/drivers/mtd/nand/raw/fsl_ifc_nand.c index 0e7a9b64301e..e345f9d9f8e8 100644 --- a/drivers/mtd/nand/raw/fsl_ifc_nand.c +++ b/drivers/mtd/nand/raw/fsl_ifc_nand.c @@ -707,6 +707,30 @@ static int fsl_ifc_attach_chip(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); + struct fsl_ifc_ctrl *ctrl = priv->ctrl; + struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; + u32 csor; + + csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); + + /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */ + if (csor & CSOR_NAND_ECC_DEC_EN) { + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; + mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops); + + /* Hardware generates ECC per 512 Bytes */ + chip->ecc.size = 512; + if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) { + chip->ecc.bytes = 8; + chip->ecc.strength = 4; + } else { + chip->ecc.bytes = 16; + chip->ecc.strength = 8; + } + } else { + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + chip->ecc.algo = NAND_ECC_ALGO_HAMMING; + } dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__, nanddev_ntargets(&chip->base)); @@ -910,25 +934,6 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) return -ENODEV; } - /* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */ - if (csor & CSOR_NAND_ECC_DEC_EN) { - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops); - - /* Hardware generates ECC per 512 Bytes */ - chip->ecc.size = 512; - if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) { - chip->ecc.bytes = 8; - chip->ecc.strength = 4; - } else { - chip->ecc.bytes = 16; - chip->ecc.strength = 8; - } - } else { - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; - } - ret = fsl_ifc_sram_init(priv); if (ret) return ret; diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index d4200eb2ad32..684c51e5e60d 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -1681,6 +1681,11 @@ static int mxcnd_attach_chip(struct nand_chip *chip) struct mxc_nand_host *host = nand_get_controller_data(chip); struct device *dev = mtd->dev.parent; + chip->ecc.bytes = host->devtype_data->eccbytes; + host->eccsize = host->devtype_data->eccsize; + chip->ecc.size = 512; + mtd_set_ooblayout(mtd, host->devtype_data->ooblayout); + switch (chip->ecc.engine_type) { case NAND_ECC_ENGINE_TYPE_ON_HOST: chip->ecc.read_page = mxc_nand_read_page; @@ -1836,19 +1841,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (host->devtype_data->axi_offset) host->regs_axi = host->base + host->devtype_data->axi_offset; - this->ecc.bytes = host->devtype_data->eccbytes; - host->eccsize = host->devtype_data->eccsize; - this->legacy.select_chip = host->devtype_data->select_chip; - this->ecc.size = 512; - mtd_set_ooblayout(mtd, host->devtype_data->ooblayout); - - if (host->pdata.hw_ecc) { - this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - } else { - this->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - this->ecc.algo = NAND_ECC_ALGO_HAMMING; - } /* NAND bus width determines access functions used by upper layer */ if (host->pdata.width == 2) diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index b31a5818234d..550bda4d1415 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -1708,6 +1708,13 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) return -EINVAL; } + /* Default ECC settings in case they are not set in the device tree */ + if (!chip->ecc.size) + chip->ecc.size = FMC2_ECC_STEP_SIZE; + + if (!chip->ecc.strength) + chip->ecc.strength = FMC2_ECC_BCH8; + ret = nand_ecc_choose_conf(chip, &stm32_fmc2_nfc_ecc_caps, mtd->oobsize - FMC2_BBM_LEN); if (ret) { @@ -1727,8 +1734,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) mtd_set_ooblayout(mtd, &stm32_fmc2_nfc_ooblayout_ops); - if (chip->options & NAND_BUSWIDTH_16) - stm32_fmc2_nfc_set_buswidth_16(nfc, true); + stm32_fmc2_nfc_setup(chip); return 0; } @@ -1952,11 +1958,6 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev) chip->options |= NAND_BUSWIDTH_AUTO | NAND_NO_SUBPAGE_WRITE | NAND_USES_DMA; - /* Default ECC settings */ - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST; - chip->ecc.size = FMC2_ECC_STEP_SIZE; - chip->ecc.strength = FMC2_ECC_BCH8; - /* Scan to find existence of the device */ ret = nand_scan(chip, nand->ncs); if (ret) diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 0369d98b2d12..f0ae7a01703a 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2701,11 +2701,10 @@ static void spi_nor_sfdp_init_params(struct spi_nor *nor) memcpy(&sfdp_params, nor->params, sizeof(sfdp_params)); - if (spi_nor_parse_sfdp(nor, &sfdp_params)) { + if (spi_nor_parse_sfdp(nor, nor->params)) { + memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); nor->addr_width = 0; nor->flags &= ~SNOR_F_4B_OPCODES; - } else { - memcpy(nor->params, &sfdp_params, sizeof(*nor->params)); } } @@ -3009,13 +3008,15 @@ static int spi_nor_set_addr_width(struct spi_nor *nor) /* already configured from SFDP */ } else if (nor->info->addr_width) { nor->addr_width = nor->info->addr_width; - } else if (nor->mtd.size > 0x1000000) { - /* enable 4-byte addressing if the device exceeds 16MiB */ - nor->addr_width = 4; } else { nor->addr_width = 3; } + if (nor->addr_width == 3 && nor->mtd.size > 0x1000000) { + /* enable 4-byte addressing if the device exceeds 16MiB */ + nor->addr_width = 4; + } + if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) { dev_dbg(nor->dev, "address width is too large: %u\n", nor->addr_width); diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index b70ded3760f2..6dee4f8f2024 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -512,9 +512,13 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) */ struct sk_buff *skb = priv->echo_skb[idx]; struct canfd_frame *cf = (struct canfd_frame *)skb->data; - u8 len = cf->len; - *len_ptr = len; + /* get the real payload length for netdev statistics */ + if (cf->can_id & CAN_RTR_FLAG) + *len_ptr = 0; + else + *len_ptr = cf->len; + priv->echo_skb[idx] = NULL; return skb; @@ -538,7 +542,11 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) if (!skb) return 0; - netif_rx(skb); + skb_get(skb); + if (netif_rx(skb) == NET_RX_SUCCESS) + dev_consume_skb_any(skb); + else + dev_kfree_skb_any(skb); return len; } diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 4d594e977497..881799bd9c5e 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -217,7 +217,7 @@ * MX8MP FlexCAN3 03.00.17.01 yes yes no yes yes yes * VF610 FlexCAN3 ? no yes no yes yes? no * LS1021A FlexCAN2 03.00.04.00 no yes no no yes no - * LX2160A FlexCAN3 03.00.23.00 no yes no no yes yes + * LX2160A FlexCAN3 03.00.23.00 no yes no yes yes yes * * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. */ @@ -400,19 +400,19 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | - FLEXCAN_QUIRK_BROKEN_PERR_STATE, + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC, }; static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, + FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, }; static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD, + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_SUPPORT_FD | + FLEXCAN_QUIRK_SUPPORT_ECC, }; static const struct can_bittiming_const flexcan_bittiming_const = { @@ -2062,6 +2062,8 @@ static int flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); + device_set_wakeup_enable(&pdev->dev, false); + device_set_wakeup_capable(&pdev->dev, false); unregister_flexcandev(dev); pm_runtime_disable(&pdev->dev); free_candev(dev); diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 10aa3e457c33..40c33b8a5fda 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -262,8 +262,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, cf_len = get_can_dlc(pucan_msg_get_dlc(msg)); /* if this frame is an echo, */ - if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) && - !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) { + if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) { unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); @@ -277,7 +276,13 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, netif_wake_queue(priv->ndev); spin_unlock_irqrestore(&priv->echo_lock, flags); - return 0; + + /* if this frame is only an echo, stop here. Otherwise, + * continue to push this application self-received frame into + * its own rx queue. + */ + if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) + return 0; } /* otherwise, it should be pushed into rx fifo */ diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c index 3b180269a92d..6e95193b215b 100644 --- a/drivers/net/can/rx-offload.c +++ b/drivers/net/can/rx-offload.c @@ -245,7 +245,7 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload, if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { - kfree_skb(skb); + dev_kfree_skb_any(skb); return -ENOBUFS; } @@ -290,7 +290,7 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload, { if (skb_queue_len(&offload->skb_queue) > offload->skb_queue_len_max) { - kfree_skb(skb); + dev_kfree_skb_any(skb); return -ENOBUFS; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index c3f49543ff26..9c215f7c5f81 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -75,11 +75,11 @@ static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) { switch (model) { case MCP251XFD_MODEL_MCP2517FD: - return "MCP2517FD"; break; + return "MCP2517FD"; case MCP251XFD_MODEL_MCP2518FD: - return "MCP2518FD"; break; + return "MCP2518FD"; case MCP251XFD_MODEL_MCP251XFD: - return "MCP251xFD"; break; + return "MCP251xFD"; } return "<unknown>"; @@ -95,21 +95,21 @@ static const char *mcp251xfd_get_mode_str(const u8 mode) { switch (mode) { case MCP251XFD_REG_CON_MODE_MIXED: - return "Mixed (CAN FD/CAN 2.0)"; break; + return "Mixed (CAN FD/CAN 2.0)"; case MCP251XFD_REG_CON_MODE_SLEEP: - return "Sleep"; break; + return "Sleep"; case MCP251XFD_REG_CON_MODE_INT_LOOPBACK: - return "Internal Loopback"; break; + return "Internal Loopback"; case MCP251XFD_REG_CON_MODE_LISTENONLY: - return "Listen Only"; break; + return "Listen Only"; case MCP251XFD_REG_CON_MODE_CONFIG: - return "Configuration"; break; + return "Configuration"; case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK: - return "External Loopback"; break; + return "External Loopback"; case MCP251XFD_REG_CON_MODE_CAN2_0: - return "CAN 2.0"; break; + return "CAN 2.0"; case MCP251XFD_REG_CON_MODE_RESTRICTED: - return "Restricted Operation"; break; + return "Restricted Operation"; } return "<unknown>"; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index ba25902dd78c..314f868b3465 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -173,7 +173,7 @@ mcp251xfd_regmap_nocrc_read(void *context, memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd)); if (MCP251XFD_SANITIZE_SPI) memset(buf_tx->data, 0x0, val_len); - }; + } err = spi_sync(spi, &msg); if (err) @@ -330,17 +330,17 @@ mcp251xfd_regmap_crc_read(void *context, goto out; } - netdev_dbg(priv->ndev, - "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n", - reg, val_len, (int)val_len, buf_rx->data, - get_unaligned_be16(buf_rx->data + val_len)); - } - - if (err) { netdev_info(priv->ndev, - "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n", + "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n", reg, val_len, (int)val_len, buf_rx->data, get_unaligned_be16(buf_rx->data + val_len)); + } + + if (err) { + netdev_err(priv->ndev, + "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n", + reg, val_len, (int)val_len, buf_rx->data, + get_unaligned_be16(buf_rx->data + val_len)); return err; } diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index 1d63006c97bc..9913f5458279 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -933,7 +933,7 @@ static int ti_hecc_probe(struct platform_device *pdev) err = clk_prepare_enable(priv->clk); if (err) { dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); - goto probe_exit_clk; + goto probe_exit_release_clk; } priv->offload.mailbox_read = ti_hecc_mailbox_read; @@ -942,7 +942,7 @@ static int ti_hecc_probe(struct platform_device *pdev) err = can_rx_offload_add_timestamp(ndev, &priv->offload); if (err) { dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); - goto probe_exit_clk; + goto probe_exit_disable_clk; } err = register_candev(ndev); @@ -960,7 +960,9 @@ static int ti_hecc_probe(struct platform_device *pdev) probe_exit_offload: can_rx_offload_del(&priv->offload); -probe_exit_clk: +probe_exit_disable_clk: + clk_disable_unprepare(priv->clk); +probe_exit_release_clk: clk_put(priv->clk); probe_exit_candev: free_candev(ndev); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index d91df34e7fa8..c2764799f9ef 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -130,14 +130,55 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) /* protect from getting time before setting now */ if (ktime_to_ns(time_ref->tv_host)) { u64 delta_us; + s64 delta_ts = 0; + + /* General case: dev_ts_1 < dev_ts_2 < ts, with: + * + * - dev_ts_1 = previous sync timestamp + * - dev_ts_2 = last sync timestamp + * - ts = event timestamp + * - ts_period = known sync period (theoretical) + * ~ dev_ts2 - dev_ts1 + * *but*: + * + * - time counters wrap (see adapter->ts_used_bits) + * - sometimes, dev_ts_1 < ts < dev_ts2 + * + * "normal" case (sync time counters increase): + * must take into account case when ts wraps (tsw) + * + * < ts_period > < > + * | | | + * ---+--------+----+-------0-+--+--> + * ts_dev_1 | ts_dev_2 | + * ts tsw + */ + if (time_ref->ts_dev_1 < time_ref->ts_dev_2) { + /* case when event time (tsw) wraps */ + if (ts < time_ref->ts_dev_1) + delta_ts = 1 << time_ref->adapter->ts_used_bits; + + /* Otherwise, sync time counter (ts_dev_2) has wrapped: + * handle case when event time (tsn) hasn't. + * + * < ts_period > < > + * | | | + * ---+--------+--0-+---------+--+--> + * ts_dev_1 | ts_dev_2 | + * tsn ts + */ + } else if (time_ref->ts_dev_1 < ts) { + delta_ts = -(1 << time_ref->adapter->ts_used_bits); + } - delta_us = ts - time_ref->ts_dev_2; - if (ts < time_ref->ts_dev_2) - delta_us &= (1 << time_ref->adapter->ts_used_bits) - 1; + /* add delay between last sync and event timestamps */ + delta_ts += (signed int)(ts - time_ref->ts_dev_2); - delta_us += time_ref->ts_total; + /* add time from beginning to last sync */ + delta_ts += time_ref->ts_total; - delta_us *= time_ref->adapter->us_per_ts_scale; + /* convert ticks number into microseconds */ + delta_us = delta_ts * time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *time = ktime_add_us(time_ref->tv_host_0, delta_us); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index ab63fd9eb982..d29d20525588 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -468,12 +468,18 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_msg_get_channel(rm)]; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct canfd_frame *cfd; struct sk_buff *skb; const u16 rx_msg_flags = le16_to_cpu(rm->flags); + if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) + return -ENOMEM; + + dev = usb_if->dev[pucan_msg_get_channel(rm)]; + netdev = dev->netdev; + if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { /* CANFD frame case */ skb = alloc_canfd_skb(netdev, &cfd); @@ -519,15 +525,21 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; - struct pcan_usb_fd_device *pdev = - container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_usb_fd_device *pdev; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; enum can_state rx_state, tx_state; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; + if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) + return -ENOMEM; + + dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; + pdev = container_of(dev, struct pcan_usb_fd_device, dev); + netdev = dev->netdev; + /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; @@ -579,9 +591,14 @@ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pucan_ermsg_get_channel(er)]; - struct pcan_usb_fd_device *pdev = - container_of(dev, struct pcan_usb_fd_device, dev); + struct pcan_usb_fd_device *pdev; + struct peak_usb_device *dev; + + if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) + return -EINVAL; + + dev = usb_if->dev[pucan_ermsg_get_channel(er)]; + pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* keep a trace of tx and rx error counters for later use */ pdev->bec.txerr = er->tx_err_cnt; @@ -595,11 +612,17 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; - struct peak_usb_device *dev = usb_if->dev[pufd_omsg_get_channel(ov)]; - struct net_device *netdev = dev->netdev; + struct peak_usb_device *dev; + struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; + if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) + return -EINVAL; + + dev = usb_if->dev[pufd_omsg_get_channel(ov)]; + netdev = dev->netdev; + /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); if (!skb) @@ -716,6 +739,9 @@ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, u16 tx_msg_size, tx_msg_flags; u8 can_dlc; + if (cfd->len > CANFD_MAX_DLEN) + return -EINVAL; + tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); tx_msg->size = cpu_to_le16(tx_msg_size); tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 6c4d00d2dbdc..48d746e18f30 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -1395,7 +1395,7 @@ static int xcan_open(struct net_device *ndev) if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); - return ret; + goto err; } ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, @@ -1479,6 +1479,7 @@ static int xcan_get_berr_counter(const struct net_device *ndev, if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); + pm_runtime_put(priv->dev); return ret; } @@ -1793,7 +1794,7 @@ static int xcan_probe(struct platform_device *pdev) if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); - goto err_pmdisable; + goto err_disableclks; } if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { @@ -1828,7 +1829,6 @@ static int xcan_probe(struct platform_device *pdev) err_disableclks: pm_runtime_put(priv->dev); -err_pmdisable: pm_runtime_disable(&pdev->dev); err_free: free_candev(ndev); diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 53064e0e1618..5bdac669a339 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -1219,8 +1219,8 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) priv->port_mtu[port] = new_mtu; for (i = 0; i < QCA8K_NUM_PORTS; i++) - if (priv->port_mtu[port] > mtu) - mtu = priv->port_mtu[port]; + if (priv->port_mtu[i] > mtu) + mtu = priv->port_mtu[i]; /* Include L2 header / FCS length */ qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, mtu + ETH_HLEN + ETH_FCS_LEN); diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 883e47c5b1a7..286f0341bdf8 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -1929,7 +1929,8 @@ static inline int macb_clear_csum(struct sk_buff *skb) static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev) { - bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb); + bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) || + skb_is_nonlinear(*skb); int padlen = ETH_ZLEN - (*skb)->len; int headroom = skb_headroom(*skb); int tailroom = skb_tailroom(*skb); diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index d581c4e623f8..96d561653496 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -212,7 +212,7 @@ static struct sk_buff *alloc_ctrl_skb(struct sk_buff *skb, int len) { if (likely(skb && !skb_shared(skb) && !skb_cloned(skb))) { __skb_trim(skb, 0); - refcount_add(2, &skb->users); + refcount_inc(&skb->users); } else { skb = alloc_skb(len, GFP_KERNEL | __GFP_NOFAIL); } diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c index f1820aca0d33..62c829023da5 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_hw.c @@ -383,6 +383,9 @@ int chtls_setkey(struct chtls_sock *csk, u32 keylen, if (ret) goto out_notcb; + if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN))) + goto out_notcb; + set_wr_txq(skb, CPL_PRIORITY_DATA, csk->tlshws.txqid); csk->wr_credits -= DIV_ROUND_UP(len, 16); csk->wr_unacked += DIV_ROUND_UP(len, 16); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 06cc863f4dd6..d9c285948fc2 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -174,12 +174,17 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); #define DPAA_PARSE_RESULTS_SIZE sizeof(struct fman_prs_result) #define DPAA_TIME_STAMP_SIZE 8 #define DPAA_HASH_RESULTS_SIZE 8 +#define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \ + + DPAA_HASH_RESULTS_SIZE) +#define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \ + dpaa_rx_extra_headroom) #ifdef CONFIG_DPAA_ERRATUM_A050385 -#define DPAA_RX_PRIV_DATA_SIZE (DPAA_A050385_ALIGN - (DPAA_PARSE_RESULTS_SIZE\ - + DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE)) +#define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE) +#define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \ + DPAA_RX_PRIV_DATA_A050385_SIZE : \ + DPAA_RX_PRIV_DATA_DEFAULT_SIZE) #else -#define DPAA_RX_PRIV_DATA_SIZE (u16)(DPAA_TX_PRIV_DATA_SIZE + \ - dpaa_rx_extra_headroom) +#define DPAA_RX_PRIV_DATA_SIZE DPAA_RX_PRIV_DATA_DEFAULT_SIZE #endif #define DPAA_ETH_PCD_RXQ_NUM 128 @@ -2840,7 +2845,8 @@ out_error: return err; } -static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) +static u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl, + enum port_type port) { u16 headroom; @@ -2854,10 +2860,12 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl) * * Also make sure the headroom is a multiple of data_align bytes */ - headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE + - DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE); + headroom = (u16)(bl[port].priv_data_size + DPAA_HWA_SIZE); - return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); + if (port == RX) + return ALIGN(headroom, DPAA_FD_RX_DATA_ALIGNMENT); + else + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT); } static int dpaa_eth_probe(struct platform_device *pdev) @@ -3025,8 +3033,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) goto free_dpaa_fqs; } - priv->tx_headroom = dpaa_get_headroom(&priv->buf_layout[TX]); - priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]); + priv->tx_headroom = dpaa_get_headroom(priv->buf_layout, TX); + priv->rx_headroom = dpaa_get_headroom(priv->buf_layout, RX); /* All real interfaces need their ports initialized */ err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs, diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 832a2175636d..c527f4ee1d3a 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -456,6 +456,12 @@ struct bufdesc_ex { */ #define FEC_QUIRK_HAS_FRREG (1 << 16) +/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid + * the generation of an MII event. This must be avoided in the older + * FEC blocks where it will stop MII events being generated. + */ +#define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17) + struct bufdesc_prop { int qid; /* Address of Rx and Tx buffers */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 8f7eca1e7716..d7919555250d 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -100,14 +100,14 @@ static const struct fec_devinfo fec_imx27_info = { static const struct fec_devinfo fec_imx28_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_FRREG, + FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII, }; static const struct fec_devinfo fec_imx6q_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | - FEC_QUIRK_HAS_RACC, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII, }; static const struct fec_devinfo fec_mvf600_info = { @@ -119,7 +119,8 @@ static const struct fec_devinfo fec_imx6x_info = { FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | - FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII, }; static const struct fec_devinfo fec_imx6ul_info = { @@ -127,7 +128,7 @@ static const struct fec_devinfo fec_imx6ul_info = { FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_COALESCE, + FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII, }; static struct platform_device_id fec_devtype[] = { @@ -2134,15 +2135,17 @@ static int fec_enet_mii_init(struct platform_device *pdev) if (suppress_preamble) fep->phy_speed |= BIT(7); - /* Clear MMFR to avoid to generate MII event by writing MSCR. - * MII event generation condition: - * - writing MSCR: - * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & - * mscr_reg_data_in[7:0] != 0 - * - writing MMFR: - * - mscr[7:0]_not_zero - */ - writel(0, fep->hwp + FEC_MII_DATA); + if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { + /* Clear MMFR to avoid to generate MII event by writing MSCR. + * MII event generation condition: + * - writing MSCR: + * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & + * mscr_reg_data_in[7:0] != 0 + * - writing MMFR: + * - mscr[7:0]_not_zero + */ + writel(0, fep->hwp + FEC_MII_DATA); + } writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 41dd3d0f3452..d391a45cebb6 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -1829,20 +1829,12 @@ static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN; /* make space for additional header when fcb is needed */ - if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) { - struct sk_buff *skb_new; - - skb_new = skb_realloc_headroom(skb, fcb_len); - if (!skb_new) { + if (fcb_len) { + if (unlikely(skb_cow_head(skb, fcb_len))) { dev->stats.tx_errors++; dev_kfree_skb_any(skb); return NETDEV_TX_OK; } - - if (skb->sk) - skb_set_owner_w(skb_new, skb->sk); - dev_consume_skb_any(skb); - skb = skb_new; } /* total number of fragments in the SKB */ @@ -3380,7 +3372,7 @@ static int gfar_probe(struct platform_device *ofdev) if (dev->features & NETIF_F_IP_CSUM || priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) - dev->needed_headroom = GMAC_FCB_LEN; + dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN; /* Initializing some of the rx/tx queue level parameters */ for (i = 0; i < priv->num_tx_queues; i++) { diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index af4dfbe28d56..da15913879f8 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1185,18 +1185,27 @@ static int ibmvnic_open(struct net_device *netdev) if (adapter->state != VNIC_CLOSED) { rc = ibmvnic_login(netdev); if (rc) - return rc; + goto out; rc = init_resources(adapter); if (rc) { netdev_err(netdev, "failed to initialize resources\n"); release_resources(adapter); - return rc; + goto out; } } rc = __ibmvnic_open(netdev); +out: + /* + * If open fails due to a pending failover, set device state and + * return. Device operation will be handled by reset routine. + */ + if (rc && adapter->failover_pending) { + adapter->state = VNIC_OPEN; + rc = 0; + } return rc; } @@ -1922,6 +1931,13 @@ static int do_reset(struct ibmvnic_adapter *adapter, rwi->reset_reason); rtnl_lock(); + /* + * Now that we have the rtnl lock, clear any pending failover. + * This will ensure ibmvnic_open() has either completed or will + * block until failover is complete. + */ + if (rwi->reset_reason == VNIC_RESET_FAILOVER) + adapter->failover_pending = false; netif_carrier_off(netdev); adapter->reset_reason = rwi->reset_reason; @@ -2202,6 +2218,13 @@ static void __ibmvnic_reset(struct work_struct *work) /* CHANGE_PARAM requestor holds rtnl_lock */ rc = do_change_param_reset(adapter, rwi, reset_state); } else if (adapter->force_reset_recovery) { + /* + * Since we are doing a hard reset now, clear the + * failover_pending flag so we don't ignore any + * future MOBILITY or other resets. + */ + adapter->failover_pending = false; + /* Transport event occurred during previous reset */ if (adapter->wait_for_reset) { /* Previous was CHANGE_PARAM; caller locked */ @@ -2266,9 +2289,15 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter, unsigned long flags; int ret; + /* + * If failover is pending don't schedule any other reset. + * Instead let the failover complete. If there is already a + * a failover reset scheduled, we will detect and drop the + * duplicate reset when walking the ->rwi_list below. + */ if (adapter->state == VNIC_REMOVING || adapter->state == VNIC_REMOVED || - adapter->failover_pending) { + (adapter->failover_pending && reason != VNIC_RESET_FAILOVER)) { ret = EBUSY; netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n"); goto err; @@ -4713,7 +4742,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, case IBMVNIC_CRQ_INIT: dev_info(dev, "Partner initialized\n"); adapter->from_passive_init = true; - adapter->failover_pending = false; if (!completion_done(&adapter->init_done)) { complete(&adapter->init_done); adapter->init_done_rc = -EIO; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index ed9808fc743b..35c72d4a78b3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -126,6 +126,11 @@ static int ionic_get_link_ksettings(struct net_device *netdev, ethtool_link_ksettings_zero_link_mode(ks, supported); + if (!idev->port_info) { + netdev_err(netdev, "port_info not initialized\n"); + return -EOPNOTSUPP; + } + /* The port_info data is found in a DMA space that the NIC keeps * up-to-date, so there's no need to request the data from the * NIC, we already have it in our memory space. diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 00f13805c6f7..7766d73823eb 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4080,9 +4080,17 @@ err_out: return -EIO; } -static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp) { - return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_60: + case RTL_GIGA_MAC_VER_61: + case RTL_GIGA_MAC_VER_63: + return true; + default: + return false; + } } static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) @@ -4154,7 +4162,7 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, opts[1] |= transport_offset << TCPHO_SHIFT; } else { - if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp))) return !eth_skb_pad(skb); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index b6e5e3e36b63..81ee0a071b4e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -625,13 +625,6 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, if (ret) return ret; - if (plat->eee_usecs_rate > 0) { - u32 tx_lpi_usec; - - tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; - writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); - } - ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) return ret; @@ -641,6 +634,13 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, res.wol_irq = pci_irq_vector(pdev, 0); res.irq = pci_irq_vector(pdev, 0); + if (plat->eee_usecs_rate > 0) { + u32 tx_lpi_usec; + + tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1; + writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER); + } + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { pci_free_irq_vectors(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 220626a8d499..d833908b660a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4757,6 +4757,7 @@ static void stmmac_napi_add(struct net_device *dev) ch->priv_data = priv; ch->index = queue; + spin_lock_init(&ch->lock); if (queue < priv->plat->rx_queues_to_use) { netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 4d02c5135611..4619c3a950b0 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -728,7 +728,6 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) (1 << HWTSTAMP_TX_ON); info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); return 0; } diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c index 51cc29f39038..31c5e36ff706 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.c +++ b/drivers/net/ethernet/ti/cpsw_priv.c @@ -639,13 +639,10 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_NTP_ALL: - return -ERANGE; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; - break; + return -ERANGE; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index 1d18c10e8f82..34aa196b7465 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c @@ -2389,7 +2389,8 @@ static int sfp_probe(struct platform_device *pdev) continue; sfp->gpio_irq[i] = gpiod_to_irq(sfp->gpio[i]); - if (!sfp->gpio_irq[i]) { + if (sfp->gpio_irq[i] < 0) { + sfp->gpio_irq[i] = 0; sfp->need_poll = true; continue; } diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index a322f51873d0..581ed51abb53 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1309,6 +1309,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1230, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 376096bfc54a..40ca71b29bb9 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4582,8 +4582,7 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) } EXPORT_SYMBOL_GPL(nvme_start_queues); - -void nvme_sync_queues(struct nvme_ctrl *ctrl) +void nvme_sync_io_queues(struct nvme_ctrl *ctrl) { struct nvme_ns *ns; @@ -4591,7 +4590,12 @@ void nvme_sync_queues(struct nvme_ctrl *ctrl) list_for_each_entry(ns, &ctrl->namespaces, list) blk_sync_queue(ns->queue); up_read(&ctrl->namespaces_rwsem); +} +EXPORT_SYMBOL_GPL(nvme_sync_io_queues); +void nvme_sync_queues(struct nvme_ctrl *ctrl) +{ + nvme_sync_io_queues(ctrl); if (ctrl->admin_q) blk_sync_queue(ctrl->admin_q); } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index cc111136a981..bc330bf0d3bd 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -602,6 +602,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl); void nvme_start_queues(struct nvme_ctrl *ctrl); void nvme_kill_queues(struct nvme_ctrl *ctrl); void nvme_sync_queues(struct nvme_ctrl *ctrl); +void nvme_sync_io_queues(struct nvme_ctrl *ctrl); void nvme_unfreeze(struct nvme_ctrl *ctrl); void nvme_wait_freeze(struct nvme_ctrl *ctrl); int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index df8f3612107f..0578ff253c47 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -198,6 +198,7 @@ struct nvme_queue { u32 q_depth; u16 cq_vector; u16 sq_tail; + u16 last_sq_tail; u16 cq_head; u16 qid; u8 cq_phase; @@ -455,11 +456,24 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) return 0; } -static inline void nvme_write_sq_db(struct nvme_queue *nvmeq) +/* + * Write sq tail if we are asked to, or if the next command would wrap. + */ +static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq) { + if (!write_sq) { + u16 next_tail = nvmeq->sq_tail + 1; + + if (next_tail == nvmeq->q_depth) + next_tail = 0; + if (next_tail != nvmeq->last_sq_tail) + return; + } + if (nvme_dbbuf_update_and_check_event(nvmeq->sq_tail, nvmeq->dbbuf_sq_db, nvmeq->dbbuf_sq_ei)) writel(nvmeq->sq_tail, nvmeq->q_db); + nvmeq->last_sq_tail = nvmeq->sq_tail; } /** @@ -476,8 +490,7 @@ static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, cmd, sizeof(*cmd)); if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; - if (write_sq) - nvme_write_sq_db(nvmeq); + nvme_write_sq_db(nvmeq, write_sq); spin_unlock(&nvmeq->sq_lock); } @@ -486,7 +499,8 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) struct nvme_queue *nvmeq = hctx->driver_data; spin_lock(&nvmeq->sq_lock); - nvme_write_sq_db(nvmeq); + if (nvmeq->sq_tail != nvmeq->last_sq_tail) + nvme_write_sq_db(nvmeq, true); spin_unlock(&nvmeq->sq_lock); } @@ -1496,6 +1510,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) struct nvme_dev *dev = nvmeq->dev; nvmeq->sq_tail = 0; + nvmeq->last_sq_tail = 0; nvmeq->cq_head = 0; nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 541b0cba6d80..65e3d0ef36e1 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -122,7 +122,6 @@ struct nvme_rdma_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; - struct mutex teardown_lock; bool use_inline_data; u32 io_queues[HCTX_MAX_TYPES]; }; @@ -1010,8 +1009,8 @@ out_free_io_queues: static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, bool remove) { - mutex_lock(&ctrl->teardown_lock); blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + blk_sync_queue(ctrl->ctrl.admin_q); nvme_rdma_stop_queue(&ctrl->queues[0]); if (ctrl->ctrl.admin_tagset) { blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, @@ -1021,16 +1020,15 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, if (remove) blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl, remove); - mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { - mutex_lock(&ctrl->teardown_lock); if (ctrl->ctrl.queue_count > 1) { nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); + nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); if (ctrl->ctrl.tagset) { blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, @@ -1041,7 +1039,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, nvme_start_queues(&ctrl->ctrl); nvme_rdma_destroy_io_queues(ctrl, remove); } - mutex_unlock(&ctrl->teardown_lock); } static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) @@ -1976,16 +1973,12 @@ static void nvme_rdma_complete_timed_out(struct request *rq) { struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); struct nvme_rdma_queue *queue = req->queue; - struct nvme_rdma_ctrl *ctrl = queue->ctrl; - /* fence other contexts that may complete the command */ - mutex_lock(&ctrl->teardown_lock); nvme_rdma_stop_queue(queue); - if (!blk_mq_request_completed(rq)) { + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; blk_mq_complete_request(rq); } - mutex_unlock(&ctrl->teardown_lock); } static enum blk_eh_timer_return @@ -2320,7 +2313,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, return ERR_PTR(-ENOMEM); ctrl->ctrl.opts = opts; INIT_LIST_HEAD(&ctrl->list); - mutex_init(&ctrl->teardown_lock); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index d6a3e1487354..c0c33320fe65 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -124,7 +124,6 @@ struct nvme_tcp_ctrl { struct sockaddr_storage src_addr; struct nvme_ctrl ctrl; - struct mutex teardown_lock; struct work_struct err_work; struct delayed_work connect_work; struct nvme_tcp_request async_req; @@ -1886,8 +1885,8 @@ out_free_queue: static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, bool remove) { - mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); blk_mq_quiesce_queue(ctrl->admin_q); + blk_sync_queue(ctrl->admin_q); nvme_tcp_stop_queue(ctrl, 0); if (ctrl->admin_tagset) { blk_mq_tagset_busy_iter(ctrl->admin_tagset, @@ -1897,18 +1896,17 @@ static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl, if (remove) blk_mq_unquiesce_queue(ctrl->admin_q); nvme_tcp_destroy_admin_queue(ctrl, remove); - mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, bool remove) { - mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); if (ctrl->queue_count <= 1) - goto out; + return; blk_mq_quiesce_queue(ctrl->admin_q); nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); + nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); if (ctrl->tagset) { blk_mq_tagset_busy_iter(ctrl->tagset, @@ -1918,8 +1916,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (remove) nvme_start_queues(ctrl); nvme_tcp_destroy_io_queues(ctrl, remove); -out: - mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static void nvme_tcp_reconnect_or_remove(struct nvme_ctrl *ctrl) @@ -2171,14 +2167,11 @@ static void nvme_tcp_complete_timed_out(struct request *rq) struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; - /* fence other contexts that may complete the command */ - mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock); nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue)); - if (!blk_mq_request_completed(rq)) { + if (blk_mq_request_started(rq) && !blk_mq_request_completed(rq)) { nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD; blk_mq_complete_request(rq); } - mutex_unlock(&to_tcp_ctrl(ctrl)->teardown_lock); } static enum blk_eh_timer_return @@ -2455,7 +2448,6 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, nvme_tcp_reconnect_ctrl_work); INIT_WORK(&ctrl->err_work, nvme_tcp_error_recovery_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_reset_ctrl_work); - mutex_init(&ctrl->teardown_lock); if (!(opts->mask & NVMF_OPT_TRSVCID)) { opts->trsvcid = diff --git a/drivers/of/device.c b/drivers/of/device.c index 3a469c79e6b0..aedfaaafd3e7 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -112,7 +112,7 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, u64 dma_end = 0; /* Determine the overall bounds of all DMA regions */ - for (dma_start = ~0ULL; r->size; r++) { + for (dma_start = ~0; r->size; r++) { /* Take lower and upper limits */ if (r->dma_start < dma_start) dma_start = r->dma_start; diff --git a/drivers/opp/core.c b/drivers/opp/core.c index 2483e765318a..0e0a5269dc82 100644 --- a/drivers/opp/core.c +++ b/drivers/opp/core.c @@ -1181,6 +1181,10 @@ static void _opp_table_kref_release(struct kref *kref) struct opp_device *opp_dev, *temp; int i; + /* Drop the lock as soon as we can */ + list_del(&opp_table->node); + mutex_unlock(&opp_table_lock); + _of_clear_opp_table(opp_table); /* Release clk */ @@ -1208,10 +1212,7 @@ static void _opp_table_kref_release(struct kref *kref) mutex_destroy(&opp_table->genpd_virt_dev_lock); mutex_destroy(&opp_table->lock); - list_del(&opp_table->node); kfree(opp_table); - - mutex_unlock(&opp_table_lock); } void dev_pm_opp_put_opp_table(struct opp_table *opp_table) @@ -1930,7 +1931,7 @@ struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, return ERR_PTR(-EINVAL); opp_table = dev_pm_opp_get_opp_table(dev); - if (!IS_ERR(opp_table)) + if (IS_ERR(opp_table)) return opp_table; /* This should be called before OPPs are initialized */ diff --git a/drivers/opp/of.c b/drivers/opp/of.c index 874b58756220..9faeb83e4b32 100644 --- a/drivers/opp/of.c +++ b/drivers/opp/of.c @@ -944,6 +944,8 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table) nr -= 2; } + return 0; + remove_static_opp: _opp_remove_all_static(opp_table); diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 674f32db85ca..44c2a6572199 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -586,8 +586,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) * ATU, so we should not program the ATU here. */ if (pp->bridge->child_ops == &dw_child_pcie_ops) { - struct resource_entry *entry = - resource_list_first_type(&pp->bridge->windows, IORESOURCE_MEM); + struct resource_entry *tmp, *entry = NULL; + + /* Get last memory resource entry */ + resource_list_for_each_entry(tmp, &pp->bridge->windows) + if (resource_type(tmp->res) == IORESOURCE_MEM) + entry = tmp; dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, PCIE_ATU_TYPE_MEM, entry->res->start, diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index eee82838f4ba..ed13e81cd691 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -958,25 +958,16 @@ static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) } /* - * We can't use devm_of_pci_get_host_bridge_resources() because we - * need to parse our special DT properties encoding the MEM and IO - * apertures. + * devm_of_pci_get_host_bridge_resources() only sets up translateable resources, + * so we need extra resource setup parsing our special DT properties encoding + * the MEM and IO apertures. */ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) { struct device *dev = &pcie->pdev->dev; - struct device_node *np = dev->of_node; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); int ret; - /* Get the bus range */ - ret = of_pci_parse_bus_range(np, &pcie->busn); - if (ret) { - dev_err(dev, "failed to parse bus-range property: %d\n", ret); - return ret; - } - pci_add_resource(&bridge->windows, &pcie->busn); - /* Get the PCIe memory aperture */ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); if (resource_size(&pcie->mem) == 0) { @@ -986,6 +977,9 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) pcie->mem.name = "PCI MEM"; pci_add_resource(&bridge->windows, &pcie->mem); + ret = devm_request_resource(dev, &iomem_resource, &pcie->mem); + if (ret) + return ret; /* Get the PCIe IO aperture */ mvebu_mbus_get_pcie_io_aperture(&pcie->io); @@ -999,9 +993,12 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) pcie->realio.name = "PCI I/O"; pci_add_resource(&bridge->windows, &pcie->realio); + ret = devm_request_resource(dev, &ioport_resource, &pcie->realio); + if (ret) + return ret; } - return devm_request_pci_bus_resources(dev, &bridge->windows); + return 0; } /* diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6d4d5a2f923d..e578d34095e9 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -3516,8 +3516,13 @@ void pci_acs_init(struct pci_dev *dev) { dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (dev->acs_cap) - pci_enable_acs(dev); + /* + * Attempt to enable ACS regardless of capability because some Root + * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have + * the standard ACS capability but still support ACS via those + * quirks. + */ + pci_enable_acs(dev); } /** diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 0b2830efc574..70d6d52bc1e2 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -620,7 +620,7 @@ static u64 rapl_unit_xlate(struct rapl_domain *rd, enum unit_type type, case ARBITRARY_UNIT: default: return value; - }; + } if (to_raw) return div64_u64(value, units) * scale; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index a4ffd71696da..a5ad553da8cd 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -4165,6 +4165,8 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) ret = rdev->desc->fixed_uV; } else if (rdev->supply) { ret = regulator_get_voltage_rdev(rdev->supply->rdev); + } else if (rdev->supply_name) { + return -EPROBE_DEFER; } else { return -EINVAL; } diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 485cbfcbf06e..ef738b42a092 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -680,7 +680,10 @@ static int ap_device_probe(struct device *dev) { struct ap_device *ap_dev = to_ap_dev(dev); struct ap_driver *ap_drv = to_ap_drv(dev->driver); - int card, queue, devres, drvres, rc; + int card, queue, devres, drvres, rc = -ENODEV; + + if (!get_device(dev)) + return rc; if (is_queue_dev(dev)) { /* @@ -697,7 +700,7 @@ static int ap_device_probe(struct device *dev) mutex_unlock(&ap_perms_mutex); drvres = ap_drv->flags & AP_DRIVER_FLAG_DEFAULT; if (!!devres != !!drvres) - return -ENODEV; + goto out; } /* Add queue/card to list of active queues/cards */ @@ -718,6 +721,9 @@ static int ap_device_probe(struct device *dev) ap_dev->drv = NULL; } +out: + if (rc) + put_device(dev); return rc; } @@ -744,6 +750,8 @@ static int ap_device_remove(struct device *dev) hash_del(&to_ap_queue(dev)->hnode); spin_unlock_bh(&ap_queues_lock); + put_device(dev); + return 0; } @@ -1371,6 +1379,8 @@ static inline void ap_scan_domains(struct ap_card *ac) __func__, ac->id, dom); goto put_dev_and_continue; } + /* get it and thus adjust reference counter */ + get_device(dev); if (decfg) AP_DBF_INFO("%s(%d,%d) new (decfg) queue device created\n", __func__, ac->id, dom); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index 99cb60ea663d..dd84995049b9 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -35,9 +35,6 @@ MODULE_DESCRIPTION("s390 protected key interface"); #define PROTKEYBLOBBUFSIZE 256 /* protected key buffer size used internal */ #define MAXAPQNSINLIST 64 /* max 64 apqns within a apqn list */ -/* mask of available pckmo subfunctions, fetched once at module init */ -static cpacf_mask_t pckmo_functions; - /* * debug feature data and functions */ @@ -91,6 +88,9 @@ static int pkey_clr2protkey(u32 keytype, const struct pkey_clrkey *clrkey, struct pkey_protkey *protkey) { + /* mask of available pckmo subfunctions */ + static cpacf_mask_t pckmo_functions; + long fc; int keysize; u8 paramblock[64]; @@ -114,11 +114,13 @@ static int pkey_clr2protkey(u32 keytype, return -EINVAL; } - /* - * Check if the needed pckmo subfunction is available. - * These subfunctions can be enabled/disabled by customers - * in the LPAR profile or may even change on the fly. - */ + /* Did we already check for PCKMO ? */ + if (!pckmo_functions.bytes[0]) { + /* no, so check now */ + if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) + return -ENODEV; + } + /* check for the pckmo subfunction we need now */ if (!cpacf_test_func(&pckmo_functions, fc)) { DEBUG_ERR("%s pckmo functions not available\n", __func__); return -ENODEV; @@ -2058,7 +2060,7 @@ static struct miscdevice pkey_dev = { */ static int __init pkey_init(void) { - cpacf_mask_t kmc_functions; + cpacf_mask_t func_mask; /* * The pckmo instruction should be available - even if we don't @@ -2066,15 +2068,15 @@ static int __init pkey_init(void) * is also the minimum level for the kmc instructions which * are able to work with protected keys. */ - if (!cpacf_query(CPACF_PCKMO, &pckmo_functions)) + if (!cpacf_query(CPACF_PCKMO, &func_mask)) return -ENODEV; /* check for kmc instructions available */ - if (!cpacf_query(CPACF_KMC, &kmc_functions)) + if (!cpacf_query(CPACF_KMC, &func_mask)) return -ENODEV; - if (!cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_128) || - !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_192) || - !cpacf_test_func(&kmc_functions, CPACF_KMC_PAES_256)) + if (!cpacf_test_func(&func_mask, CPACF_KMC_PAES_128) || + !cpacf_test_func(&func_mask, CPACF_KMC_PAES_192) || + !cpacf_test_func(&func_mask, CPACF_KMC_PAES_256)) return -ENODEV; pkey_debug_init(); diff --git a/drivers/s390/crypto/zcrypt_card.c b/drivers/s390/crypto/zcrypt_card.c index e342eb86acd1..33b23884b133 100644 --- a/drivers/s390/crypto/zcrypt_card.c +++ b/drivers/s390/crypto/zcrypt_card.c @@ -157,11 +157,6 @@ int zcrypt_card_register(struct zcrypt_card *zc) { int rc; - rc = sysfs_create_group(&zc->card->ap_dev.device.kobj, - &zcrypt_card_attr_group); - if (rc) - return rc; - spin_lock(&zcrypt_list_lock); list_add_tail(&zc->list, &zcrypt_card_list); spin_unlock(&zcrypt_list_lock); @@ -170,6 +165,14 @@ int zcrypt_card_register(struct zcrypt_card *zc) ZCRYPT_DBF(DBF_INFO, "card=%02x register online=1\n", zc->card->id); + rc = sysfs_create_group(&zc->card->ap_dev.device.kobj, + &zcrypt_card_attr_group); + if (rc) { + spin_lock(&zcrypt_list_lock); + list_del_init(&zc->list); + spin_unlock(&zcrypt_list_lock); + } + return rc; } EXPORT_SYMBOL(zcrypt_card_register); diff --git a/drivers/s390/crypto/zcrypt_queue.c b/drivers/s390/crypto/zcrypt_queue.c index 3c207066313c..5062eae73d4a 100644 --- a/drivers/s390/crypto/zcrypt_queue.c +++ b/drivers/s390/crypto/zcrypt_queue.c @@ -180,7 +180,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq) &zcrypt_queue_attr_group); if (rc) goto out; - get_device(&zq->queue->ap_dev.device); if (zq->ops->rng) { rc = zcrypt_rng_device_add(); @@ -192,7 +191,6 @@ int zcrypt_queue_register(struct zcrypt_queue *zq) out_unregister: sysfs_remove_group(&zq->queue->ap_dev.device.kobj, &zcrypt_queue_attr_group); - put_device(&zq->queue->ap_dev.device); out: spin_lock(&zcrypt_list_lock); list_del_init(&zq->list); @@ -220,12 +218,10 @@ void zcrypt_queue_unregister(struct zcrypt_queue *zq) list_del_init(&zq->list); zcrypt_device_count--; spin_unlock(&zcrypt_list_lock); - zcrypt_card_put(zc); if (zq->ops->rng) zcrypt_rng_device_remove(); sysfs_remove_group(&zq->queue->ap_dev.device.kobj, &zcrypt_queue_attr_group); - put_device(&zq->queue->ap_dev.device); - zcrypt_queue_put(zq); + zcrypt_card_put(zc); } EXPORT_SYMBOL(zcrypt_queue_unregister); diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index f32da0ca529e..308bda2e9c00 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -658,8 +658,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) rcu_read_lock(); list_for_each_entry_rcu(h, &tmp_pg->dh_list, node) { - /* h->sdev should always be valid */ - BUG_ON(!h->sdev); + if (!h->sdev) + continue; h->sdev->access_state = desc[0]; } rcu_read_unlock(); @@ -705,7 +705,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) pg->expiry = 0; rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { - BUG_ON(!h->sdev); + if (!h->sdev) + continue; h->sdev->access_state = (pg->state & SCSI_ACCESS_STATE_MASK); if (pg->pref) @@ -1147,7 +1148,6 @@ static void alua_bus_detach(struct scsi_device *sdev) spin_lock(&h->pg_lock); pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); rcu_assign_pointer(h->pg, NULL); - h->sdev = NULL; spin_unlock(&h->pg_lock); if (pg) { spin_lock_irq(&pg->lock); @@ -1156,6 +1156,7 @@ static void alua_bus_detach(struct scsi_device *sdev) kref_put(&pg->kref, release_port_group); } sdev->handler_data = NULL; + synchronize_rcu(); kfree(h); } diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 83ce4f11a589..8df70c92911d 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -8855,7 +8855,7 @@ reinit_after_soft_reset: /* hook into SCSI subsystem */ rc = hpsa_scsi_add_host(h); if (rc) - goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ + goto clean8; /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ /* Monitor the controller for firmware lockups */ h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; @@ -8870,6 +8870,8 @@ reinit_after_soft_reset: HPSA_EVENT_MONITOR_INTERVAL); return 0; +clean8: /* lastlogicals, perf, sg, cmd, irq, shost, pci, lu, aer/h */ + kfree(h->lastlogicals); clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */ hpsa_free_performant_mode(h); h->access.set_intr_mask(h, HPSA_INTR_OFF); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 93230cd1982f..e4cc92bc4d94 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -1740,6 +1740,13 @@ _base_irqpoll(struct irq_poll *irqpoll, int budget) reply_q->irq_poll_scheduled = false; reply_q->irq_line_enable = true; enable_irq(reply_q->os_irq); + /* + * Go for one more round of processing the + * reply descriptor post queue incase if HBA + * Firmware has posted some reply descriptors + * while reenabling the IRQ. + */ + _base_process_reply_queue(reply_q); } return num_entries; diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index b87116e9b413..7104cf17b848 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1193,7 +1193,6 @@ static int bcm2835_spi_setup(struct spi_device *spi) struct spi_controller *ctlr = spi->controller; struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct gpio_chip *chip; - enum gpio_lookup_flags lflags; u32 cs; /* @@ -1259,21 +1258,9 @@ static int bcm2835_spi_setup(struct spi_device *spi) if (!chip) return 0; - /* - * Retrieve the corresponding GPIO line used for CS. - * The inversion semantics will be handled by the GPIO core - * code, so we pass GPIOD_OUT_LOW for "unasserted" and - * the correct flag for inversion semantics. The SPI_CS_HIGH - * on spi->mode cannot be checked for polarity in this case - * as the flag use_gpio_descriptors enforces SPI_CS_HIGH. - */ - if (of_property_read_bool(spi->dev.of_node, "spi-cs-high")) - lflags = GPIO_ACTIVE_HIGH; - else - lflags = GPIO_ACTIVE_LOW; spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select, DRV_NAME, - lflags, + GPIO_LOOKUP_FLAGS_DEFAULT, GPIOD_OUT_LOW); if (IS_ERR(spi->cs_gpiod)) return PTR_ERR(spi->cs_gpiod); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 3967afa465f0..1a08c1d584ab 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -1080,12 +1080,11 @@ MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); #ifdef CONFIG_PM_SLEEP static int dspi_suspend(struct device *dev) { - struct spi_controller *ctlr = dev_get_drvdata(dev); - struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + struct fsl_dspi *dspi = dev_get_drvdata(dev); if (dspi->irq) disable_irq(dspi->irq); - spi_controller_suspend(ctlr); + spi_controller_suspend(dspi->ctlr); clk_disable_unprepare(dspi->clk); pinctrl_pm_select_sleep_state(dev); @@ -1095,8 +1094,7 @@ static int dspi_suspend(struct device *dev) static int dspi_resume(struct device *dev) { - struct spi_controller *ctlr = dev_get_drvdata(dev); - struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + struct fsl_dspi *dspi = dev_get_drvdata(dev); int ret; pinctrl_pm_select_default_state(dev); @@ -1104,7 +1102,7 @@ static int dspi_resume(struct device *dev) ret = clk_prepare_enable(dspi->clk); if (ret) return ret; - spi_controller_resume(ctlr); + spi_controller_resume(dspi->ctlr); if (dspi->irq) enable_irq(dspi->irq); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 060b1f5c9b04..4b80e27ecdbf 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1676,15 +1676,18 @@ static int spi_imx_probe(struct platform_device *pdev) goto out_master_put; } - pm_runtime_enable(spi_imx->dev); + ret = clk_prepare_enable(spi_imx->clk_per); + if (ret) + goto out_master_put; + + ret = clk_prepare_enable(spi_imx->clk_ipg); + if (ret) + goto out_put_per; + pm_runtime_set_autosuspend_delay(spi_imx->dev, MXC_RPM_TIMEOUT); pm_runtime_use_autosuspend(spi_imx->dev); - - ret = pm_runtime_get_sync(spi_imx->dev); - if (ret < 0) { - dev_err(spi_imx->dev, "failed to enable clock\n"); - goto out_runtime_pm_put; - } + pm_runtime_set_active(spi_imx->dev); + pm_runtime_enable(spi_imx->dev); spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per); /* @@ -1722,8 +1725,12 @@ out_bitbang_start: spi_imx_sdma_exit(spi_imx); out_runtime_pm_put: pm_runtime_dont_use_autosuspend(spi_imx->dev); - pm_runtime_put_sync(spi_imx->dev); + pm_runtime_set_suspended(&pdev->dev); pm_runtime_disable(spi_imx->dev); + + clk_disable_unprepare(spi_imx->clk_ipg); +out_put_per: + clk_disable_unprepare(spi_imx->clk_per); out_master_put: spi_master_put(master); diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml index 43b5630c0407..510edd12ed19 100644 --- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml +++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml @@ -24,7 +24,7 @@ description: In addition, it is recommended to declare a mmc-pwrseq on SDIO host above WFx. Without it, you may encounter issues with warm boot. The mmc-pwrseq should be compatible with mmc-pwrseq-simple. Please consult - Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.txt for more + Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more information. For SPI':' diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index 41f4120abdf2..fa876e2c13e5 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c @@ -317,7 +317,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios, */ baud = tty_termios_baud_rate(termios); - serial8250_do_set_termios(port, termios, old); + serial8250_do_set_termios(port, termios, NULL); tty_termios_encode_baud_rate(termios, baud, baud); diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 1044fc387691..28f22e58639c 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -522,6 +522,7 @@ config SERIAL_IMX_EARLYCON depends on OF select SERIAL_EARLYCON select SERIAL_CORE_CONSOLE + default y if SERIAL_IMX_CONSOLE help If you have enabled the earlycon on the Freescale IMX CPU you can make it the earlycon by answering Y to this option. diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c index b4d89e31730e..7a07e7272de1 100644 --- a/drivers/tty/serial/serial_txx9.c +++ b/drivers/tty/serial/serial_txx9.c @@ -1280,6 +1280,9 @@ static int __init serial_txx9_init(void) #ifdef ENABLE_SERIAL_TXX9_PCI ret = pci_register_driver(&serial_txx9_pci_driver); + if (ret) { + platform_driver_unregister(&serial_txx9_plat_driver); + } #endif if (ret == 0) goto out; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 7a4c02548fb3..9f8b9a567b35 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1515,10 +1515,12 @@ static void release_tty(struct tty_struct *tty, int idx) tty->ops->shutdown(tty); tty_save_termios(tty); tty_driver_remove_tty(tty->driver, tty); - tty->port->itty = NULL; + if (tty->port) + tty->port->itty = NULL; if (tty->link) tty->link->port->itty = NULL; - tty_buffer_cancel_work(tty->port); + if (tty->port) + tty_buffer_cancel_work(tty->port); if (tty->link) tty_buffer_cancel_work(tty->link->port); diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 9506a76f3ab6..d04a162939a4 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -4704,27 +4704,6 @@ static int con_font_default(struct vc_data *vc, struct console_font_op *op) return rc; } -static int con_font_copy(struct vc_data *vc, struct console_font_op *op) -{ - int con = op->height; - int rc; - - - console_lock(); - if (vc->vc_mode != KD_TEXT) - rc = -EINVAL; - else if (!vc->vc_sw->con_font_copy) - rc = -ENOSYS; - else if (con < 0 || !vc_cons_allocated(con)) - rc = -ENOTTY; - else if (con == vc->vc_num) /* nothing to do */ - rc = 0; - else - rc = vc->vc_sw->con_font_copy(vc, con); - console_unlock(); - return rc; -} - int con_font_op(struct vc_data *vc, struct console_font_op *op) { switch (op->op) { @@ -4735,7 +4714,8 @@ int con_font_op(struct vc_data *vc, struct console_font_op *op) case KD_FONT_OP_SET_DEFAULT: return con_font_default(vc, op); case KD_FONT_OP_COPY: - return con_font_copy(vc, op); + /* was buggy and never really used */ + return -EINVAL; } return -ENOSYS; } diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 2321775ef098..5f61b25a9aaa 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -484,7 +484,7 @@ static int vt_k_ioctl(struct tty_struct *tty, unsigned int cmd, return 0; } -static inline int do_fontx_ioctl(int cmd, +static inline int do_fontx_ioctl(struct vc_data *vc, int cmd, struct consolefontdesc __user *user_cfd, struct console_font_op *op) { @@ -502,15 +502,16 @@ static inline int do_fontx_ioctl(int cmd, op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; - return con_font_op(vc_cons[fg_console].d, op); - case GIO_FONTX: { + return con_font_op(vc, op); + + case GIO_FONTX: op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; op->width = 8; op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = cfdarg.chardata; - i = con_font_op(vc_cons[fg_console].d, op); + i = con_font_op(vc, op); if (i) return i; cfdarg.charheight = op->height; @@ -518,12 +519,11 @@ static inline int do_fontx_ioctl(int cmd, if (copy_to_user(user_cfd, &cfdarg, sizeof(struct consolefontdesc))) return -EFAULT; return 0; - } } return -EINVAL; } -static int vt_io_fontreset(struct console_font_op *op) +static int vt_io_fontreset(struct vc_data *vc, struct console_font_op *op) { int ret; @@ -537,12 +537,12 @@ static int vt_io_fontreset(struct console_font_op *op) op->op = KD_FONT_OP_SET_DEFAULT; op->data = NULL; - ret = con_font_op(vc_cons[fg_console].d, op); + ret = con_font_op(vc, op); if (ret) return ret; console_lock(); - con_set_default_unimap(vc_cons[fg_console].d); + con_set_default_unimap(vc); console_unlock(); return 0; @@ -584,7 +584,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, op.height = 0; op.charcount = 256; op.data = up; - return con_font_op(vc_cons[fg_console].d, &op); + return con_font_op(vc, &op); case GIO_FONT: op.op = KD_FONT_OP_GET; @@ -593,7 +593,7 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, op.height = 32; op.charcount = 256; op.data = up; - return con_font_op(vc_cons[fg_console].d, &op); + return con_font_op(vc, &op); case PIO_CMAP: if (!perm) @@ -609,13 +609,13 @@ static int vt_io_ioctl(struct vc_data *vc, unsigned int cmd, void __user *up, fallthrough; case GIO_FONTX: - return do_fontx_ioctl(cmd, up, &op); + return do_fontx_ioctl(vc, cmd, up, &op); case PIO_FONTRESET: if (!perm) return -EPERM; - return vt_io_fontreset(&op); + return vt_io_fontreset(vc, &op); case PIO_SCRNMAP: if (!perm) @@ -1066,8 +1066,9 @@ struct compat_consolefontdesc { }; static inline int -compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, - int perm, struct console_font_op *op) +compat_fontx_ioctl(struct vc_data *vc, int cmd, + struct compat_consolefontdesc __user *user_cfd, + int perm, struct console_font_op *op) { struct compat_consolefontdesc cfdarg; int i; @@ -1085,7 +1086,8 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); - return con_font_op(vc_cons[fg_console].d, op); + return con_font_op(vc, op); + case GIO_FONTX: op->op = KD_FONT_OP_GET; op->flags = KD_FONT_FLAG_OLD; @@ -1093,7 +1095,7 @@ compat_fontx_ioctl(int cmd, struct compat_consolefontdesc __user *user_cfd, op->height = cfdarg.charheight; op->charcount = cfdarg.charcount; op->data = compat_ptr(cfdarg.chardata); - i = con_font_op(vc_cons[fg_console].d, op); + i = con_font_op(vc, op); if (i) return i; cfdarg.charheight = op->height; @@ -1183,7 +1185,7 @@ long vt_compat_ioctl(struct tty_struct *tty, */ case PIO_FONTX: case GIO_FONTX: - return compat_fontx_ioctl(cmd, up, perm, &op); + return compat_fontx_ioctl(vc, cmd, up, perm, &op); case KDFONTOP: return compat_kdfontop_ioctl(up, perm, &op, vc); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 10574fa3f927..a1e3a037a289 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -378,6 +378,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0926, 0x3333), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Kingston DataTraveler 3.0 */ + { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM }, + /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index e2820676beb1..5f18acac7406 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -608,10 +608,13 @@ static int dwc2_driver_probe(struct platform_device *dev) #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) error_debugfs: dwc2_debugfs_exit(hsotg); if (hsotg->hcd_enabled) dwc2_hcd_remove(hsotg); +#endif error_drd: dwc2_drd_exit(hsotg); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 242b6210380a..bae6a70664c8 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -40,6 +40,7 @@ #define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee #define PCI_DEVICE_ID_INTEL_TGPH 0x43ee #define PCI_DEVICE_ID_INTEL_JSP 0x4dee +#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1 #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511" #define PCI_INTEL_BXT_FUNC_PMU_PWR 4 @@ -367,6 +368,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP), (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS), + (kernel_ulong_t) &dwc3_pci_intel_properties, }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB), (kernel_ulong_t) &dwc3_pci_amd_properties, }, { } /* Terminating Entry */ diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 7be3903cb842..8b668ef46f7f 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -1058,10 +1058,11 @@ void dwc3_ep0_send_delayed_status(struct dwc3 *dwc) { unsigned int direction = !dwc->ep0_expect_in; + dwc->delayed_status = false; + if (dwc->ep0state != EP0_STATUS_PHASE) return; - dwc->delayed_status = false; __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); } diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index e01e366d89cd..062dfac30399 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -564,9 +564,12 @@ static int raw_ioctl_event_fetch(struct raw_dev *dev, unsigned long value) return -ENODEV; } length = min(arg.length, event->length); - if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) + if (copy_to_user((void __user *)value, event, sizeof(*event) + length)) { + kfree(event); return -EFAULT; + } + kfree(event); return 0; } diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index de528e3b0662..ad6ff9c4188e 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -1051,7 +1051,7 @@ static int fsl_ep_fifo_status(struct usb_ep *_ep) u32 bitmask; struct ep_queue_head *qh; - if (!_ep || _ep->desc || !(_ep->desc->bEndpointAddress&0xF)) + if (!_ep || !_ep->desc || !(_ep->desc->bEndpointAddress&0xF)) return -ENODEV; ep = container_of(_ep, struct fsl_ep, ep); diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c index 25c1d6ab5adb..3e1267d38774 100644 --- a/drivers/usb/gadget/udc/goku_udc.c +++ b/drivers/usb/gadget/udc/goku_udc.c @@ -1760,6 +1760,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err; } + pci_set_drvdata(pdev, dev); spin_lock_init(&dev->lock); dev->pdev = pdev; dev->gadget.ops = &goku_ops; @@ -1793,7 +1794,6 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) } dev->regs = (struct goku_udc_regs __iomem *) base; - pci_set_drvdata(pdev, dev); INFO(dev, "%s\n", driver_desc); INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); diff --git a/drivers/usb/misc/apple-mfi-fastcharge.c b/drivers/usb/misc/apple-mfi-fastcharge.c index 579d8c84de42..9de0171b5177 100644 --- a/drivers/usb/misc/apple-mfi-fastcharge.c +++ b/drivers/usb/misc/apple-mfi-fastcharge.c @@ -120,8 +120,10 @@ static int apple_mfi_fc_set_property(struct power_supply *psy, dev_dbg(&mfi->udev->dev, "prop: %d\n", psp); ret = pm_runtime_get_sync(&mfi->udev->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(&mfi->udev->dev); return ret; + } switch (psp) { case POWER_SUPPLY_PROP_CHARGE_TYPE: diff --git a/drivers/usb/mtu3/mtu3_gadget.c b/drivers/usb/mtu3/mtu3_gadget.c index 1de5c9a1d20a..38f17d66d5bc 100644 --- a/drivers/usb/mtu3/mtu3_gadget.c +++ b/drivers/usb/mtu3/mtu3_gadget.c @@ -564,6 +564,7 @@ static int mtu3_gadget_stop(struct usb_gadget *g) spin_unlock_irqrestore(&mtu->lock, flags); + synchronize_irq(mtu->irq); return 0; } diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c index 821970609695..2e40908963da 100644 --- a/drivers/usb/serial/cyberjack.c +++ b/drivers/usb/serial/cyberjack.c @@ -357,11 +357,12 @@ static void cyberjack_write_bulk_callback(struct urb *urb) struct device *dev = &port->dev; int status = urb->status; unsigned long flags; + bool resubmitted = false; - set_bit(0, &port->write_urbs_free); if (status) { dev_dbg(dev, "%s - nonzero write bulk status received: %d\n", __func__, status); + set_bit(0, &port->write_urbs_free); return; } @@ -394,6 +395,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb) goto exit; } + resubmitted = true; + dev_dbg(dev, "%s - priv->wrsent=%d\n", __func__, priv->wrsent); dev_dbg(dev, "%s - priv->wrfilled=%d\n", __func__, priv->wrfilled); @@ -410,6 +413,8 @@ static void cyberjack_write_bulk_callback(struct urb *urb) exit: spin_unlock_irqrestore(&priv->lock, flags); + if (!resubmitted) + set_bit(0, &port->write_urbs_free); usb_serial_port_softint(port); } diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 2a3bfd6f867e..54ca85cc920d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -250,6 +250,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EP06 0x0306 #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 +#define QUECTEL_PRODUCT_EC200T 0x6026 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -1117,6 +1118,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10), .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1189,6 +1191,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1054, 0xff), /* Telit FT980-KS */ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1055, 0xff), /* Telit FN980 (PCIe) */ + .driver_info = NCTRL(0) | RSVD(1) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM), @@ -1201,6 +1205,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1203, 0xff), /* Telit LE910Cx (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), @@ -1215,6 +1221,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) }, { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1230, 0xff), /* Telit LE910Cx (rmnet) */ + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ + .driver_info = NCTRL(2) | RSVD(3) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c index 0113a980f974..f27e25112c40 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c @@ -248,7 +248,9 @@ static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd, info.size = vdev->regions[info.index].size; info.flags = vdev->regions[info.index].flags; - return copy_to_user((void __user *)arg, &info, minsz); + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + return 0; } case VFIO_DEVICE_GET_IRQ_INFO: { @@ -267,7 +269,9 @@ static long vfio_fsl_mc_ioctl(void *device_data, unsigned int cmd, info.flags = VFIO_IRQ_INFO_EVENTFD; info.count = 1; - return copy_to_user((void __user *)arg, &info, minsz); + if (copy_to_user((void __user *)arg, &info, minsz)) + return -EFAULT; + return 0; } case VFIO_DEVICE_SET_IRQS: { @@ -468,7 +472,7 @@ static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma) { struct vfio_fsl_mc_device *vdev = device_data; struct fsl_mc_device *mc_dev = vdev->mc_dev; - int index; + unsigned int index; index = vma->vm_pgoff >> (VFIO_FSL_MC_OFFSET_SHIFT - PAGE_SHIFT); diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c index c80dceb46f79..0d9f3002df7f 100644 --- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c +++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c @@ -13,7 +13,7 @@ #include "linux/fsl/mc.h" #include "vfio_fsl_mc_private.h" -int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev) +static int vfio_fsl_mc_irqs_allocate(struct vfio_fsl_mc_device *vdev) { struct fsl_mc_device *mc_dev = vdev->mc_dev; struct vfio_fsl_mc_irq *mc_irq; diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index fbd2b3404184..e6190173482c 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -385,7 +385,7 @@ static int vfio_pci_enable(struct vfio_pci_device *vdev) pdev->vendor == PCI_VENDOR_ID_INTEL && IS_ENABLED(CONFIG_VFIO_PCI_IGD)) { ret = vfio_pci_igd_init(vdev); - if (ret) { + if (ret && ret != -ENODEV) { pci_warn(pdev, "Failed to setup Intel IGD regions\n"); goto disable_exit; } diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c index 9e353c484ace..a0b5fc8e46f4 100644 --- a/drivers/vfio/pci/vfio_pci_rdwr.c +++ b/drivers/vfio/pci/vfio_pci_rdwr.c @@ -356,34 +356,60 @@ ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf, return done; } -static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) +static void vfio_pci_ioeventfd_do_write(struct vfio_pci_ioeventfd *ioeventfd, + bool test_mem) { - struct vfio_pci_ioeventfd *ioeventfd = opaque; - switch (ioeventfd->count) { case 1: - vfio_pci_iowrite8(ioeventfd->vdev, ioeventfd->test_mem, + vfio_pci_iowrite8(ioeventfd->vdev, test_mem, ioeventfd->data, ioeventfd->addr); break; case 2: - vfio_pci_iowrite16(ioeventfd->vdev, ioeventfd->test_mem, + vfio_pci_iowrite16(ioeventfd->vdev, test_mem, ioeventfd->data, ioeventfd->addr); break; case 4: - vfio_pci_iowrite32(ioeventfd->vdev, ioeventfd->test_mem, + vfio_pci_iowrite32(ioeventfd->vdev, test_mem, ioeventfd->data, ioeventfd->addr); break; #ifdef iowrite64 case 8: - vfio_pci_iowrite64(ioeventfd->vdev, ioeventfd->test_mem, + vfio_pci_iowrite64(ioeventfd->vdev, test_mem, ioeventfd->data, ioeventfd->addr); break; #endif } +} + +static int vfio_pci_ioeventfd_handler(void *opaque, void *unused) +{ + struct vfio_pci_ioeventfd *ioeventfd = opaque; + struct vfio_pci_device *vdev = ioeventfd->vdev; + + if (ioeventfd->test_mem) { + if (!down_read_trylock(&vdev->memory_lock)) + return 1; /* Lock contended, use thread */ + if (!__vfio_pci_memory_enabled(vdev)) { + up_read(&vdev->memory_lock); + return 0; + } + } + + vfio_pci_ioeventfd_do_write(ioeventfd, false); + + if (ioeventfd->test_mem) + up_read(&vdev->memory_lock); return 0; } +static void vfio_pci_ioeventfd_thread(void *opaque, void *unused) +{ + struct vfio_pci_ioeventfd *ioeventfd = opaque; + + vfio_pci_ioeventfd_do_write(ioeventfd, ioeventfd->test_mem); +} + long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, uint64_t data, int count, int fd) { @@ -457,7 +483,8 @@ long vfio_pci_ioeventfd(struct vfio_pci_device *vdev, loff_t offset, ioeventfd->test_mem = vdev->pdev->resource[bar].flags & IORESOURCE_MEM; ret = vfio_virqfd_enable(ioeventfd, vfio_pci_ioeventfd_handler, - NULL, NULL, &ioeventfd->virqfd, fd); + vfio_pci_ioeventfd_thread, NULL, + &ioeventfd->virqfd, fd); if (ret) { kfree(ioeventfd); goto out_unlock; diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c index c0771a9567fb..fb4b385191f2 100644 --- a/drivers/vfio/platform/vfio_platform_common.c +++ b/drivers/vfio/platform/vfio_platform_common.c @@ -267,7 +267,7 @@ static int vfio_platform_open(void *device_data) ret = pm_runtime_get_sync(vdev->device); if (ret < 0) - goto err_pm; + goto err_rst; ret = vfio_platform_call_reset(vdev, &extra_dbg); if (ret && vdev->reset_required) { @@ -284,7 +284,6 @@ static int vfio_platform_open(void *device_data) err_rst: pm_runtime_put(vdev->device); -err_pm: vfio_platform_irq_cleanup(vdev); err_irq: vfio_platform_regions_cleanup(vdev); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index bb2684cc245e..67e827638995 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -1993,6 +1993,7 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu, list_splice_tail(iova_copy, iova); } + static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { @@ -2009,18 +2010,10 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, mutex_lock(&iommu->lock); - list_for_each_entry(d, &iommu->domain_list, next) { - if (find_iommu_group(d, iommu_group)) { - mutex_unlock(&iommu->lock); - return -EINVAL; - } - } - - if (iommu->external_domain) { - if (find_iommu_group(iommu->external_domain, iommu_group)) { - mutex_unlock(&iommu->lock); - return -EINVAL; - } + /* Check for duplicates */ + if (vfio_iommu_find_iommu_group(iommu, iommu_group)) { + mutex_unlock(&iommu->lock); + return -EINVAL; } group = kzalloc(sizeof(*group), GFP_KERNEL); diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c index 38884d6c57cd..95c573dcda11 100644 --- a/fs/afs/xattr.c +++ b/fs/afs/xattr.c @@ -148,11 +148,6 @@ static const struct xattr_handler afs_xattr_afs_acl_handler = { .set = afs_xattr_set_acl, }; -static void yfs_acl_put(struct afs_operation *op) -{ - yfs_free_opaque_acl(op->yacl); -} - static const struct afs_operation_ops yfs_fetch_opaque_acl_operation = { .issue_yfs_rpc = yfs_fs_fetch_opaque_acl, .success = afs_acl_success, @@ -246,7 +241,7 @@ error: static const struct afs_operation_ops yfs_store_opaque_acl2_operation = { .issue_yfs_rpc = yfs_fs_store_opaque_acl2, .success = afs_acl_success, - .put = yfs_acl_put, + .put = afs_acl_put, }; /* diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 3b1239b7e90d..bd787e71a657 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -1990,6 +1990,7 @@ void yfs_fs_store_opaque_acl2(struct afs_operation *op) memcpy(bp, acl->data, acl->size); if (acl->size != size) memset((void *)bp + acl->size, 0, size - acl->size); + bp += size / sizeof(__be32); yfs_check_req(call, bp); trace_afs_make_fs_call(call, &vp->fid); diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 5027bbdca419..ded4229c314a 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -4074,7 +4074,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, vino.snap, inode); mutex_lock(&session->s_mutex); - session->s_seq++; + inc_session_sequence(session); dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, (unsigned)seq); diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 08f1c0c31dc2..8f1d7500a7ec 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4231,7 +4231,7 @@ static void handle_lease(struct ceph_mds_client *mdsc, dname.len, dname.name); mutex_lock(&session->s_mutex); - session->s_seq++; + inc_session_sequence(session); if (!inode) { dout("handle_lease no inode %llx\n", vino.ino); @@ -4385,29 +4385,49 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc) bool check_session_state(struct ceph_mds_session *s) { - if (s->s_state == CEPH_MDS_SESSION_CLOSING) { - dout("resending session close request for mds%d\n", - s->s_mds); - request_close_session(s); - return false; - } - if (s->s_ttl && time_after(jiffies, s->s_ttl)) { - if (s->s_state == CEPH_MDS_SESSION_OPEN) { + switch (s->s_state) { + case CEPH_MDS_SESSION_OPEN: + if (s->s_ttl && time_after(jiffies, s->s_ttl)) { s->s_state = CEPH_MDS_SESSION_HUNG; pr_info("mds%d hung\n", s->s_mds); } - } - if (s->s_state == CEPH_MDS_SESSION_NEW || - s->s_state == CEPH_MDS_SESSION_RESTARTING || - s->s_state == CEPH_MDS_SESSION_CLOSED || - s->s_state == CEPH_MDS_SESSION_REJECTED) - /* this mds is failed or recovering, just wait */ + break; + case CEPH_MDS_SESSION_CLOSING: + /* Should never reach this when we're unmounting */ + WARN_ON_ONCE(true); + fallthrough; + case CEPH_MDS_SESSION_NEW: + case CEPH_MDS_SESSION_RESTARTING: + case CEPH_MDS_SESSION_CLOSED: + case CEPH_MDS_SESSION_REJECTED: return false; + } return true; } /* + * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply, + * then we need to retransmit that request. + */ +void inc_session_sequence(struct ceph_mds_session *s) +{ + lockdep_assert_held(&s->s_mutex); + + s->s_seq++; + + if (s->s_state == CEPH_MDS_SESSION_CLOSING) { + int ret; + + dout("resending session close request for mds%d\n", s->s_mds); + ret = request_close_session(s); + if (ret < 0) + pr_err("unable to close session to mds%d: %d\n", + s->s_mds, ret); + } +} + +/* * delayed work -- periodically trim expired leases, renew caps with mds */ static void schedule_delayed(struct ceph_mds_client *mdsc) diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index cbf8af437140..f5adbebcb38e 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -480,6 +480,7 @@ struct ceph_mds_client { extern const char *ceph_mds_op_name(int op); extern bool check_session_state(struct ceph_mds_session *s); +void inc_session_sequence(struct ceph_mds_session *s); extern struct ceph_mds_session * __ceph_lookup_mds_session(struct ceph_mds_client *, int mds); diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 83cb4f26b689..9b785f11e95a 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -53,7 +53,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc, /* increment msg sequence number */ mutex_lock(&session->s_mutex); - session->s_seq++; + inc_session_sequence(session); mutex_unlock(&session->s_mutex); /* lookup inode */ diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 0da39c16dab4..b611f829cb61 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -873,7 +873,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, ceph_snap_op_name(op), split, trace_len); mutex_lock(&session->s_mutex); - session->s_seq++; + inc_session_sequence(session); mutex_unlock(&session->s_mutex); down_write(&mdsc->snap_rwsem); diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 5441c17562c5..d98a2e5dab9f 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -1078,7 +1078,8 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, out_free: kfree(gl->gl_lksb.sb_lvbptr); kmem_cache_free(cachep, gl); - atomic_dec(&sdp->sd_glock_disposal); + if (atomic_dec_and_test(&sdp->sd_glock_disposal)) + wake_up(&sdp->sd_glock_wait); out: return ret; diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index aa3f5236befb..6c1432d78dce 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -165,6 +165,31 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) } /** + * gfs2_rgrp_metasync - sync out the metadata of a resource group + * @gl: the glock protecting the resource group + * + */ + +static int gfs2_rgrp_metasync(struct gfs2_glock *gl) +{ + struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; + struct address_space *metamapping = &sdp->sd_aspace; + struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); + const unsigned bsize = sdp->sd_sb.sb_bsize; + loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; + loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; + int error; + + filemap_fdatawrite_range(metamapping, start, end); + error = filemap_fdatawait_range(metamapping, start, end); + WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); + mapping_set_error(metamapping, error); + if (error) + gfs2_io_error(sdp); + return error; +} + +/** * rgrp_go_sync - sync out the metadata for this glock * @gl: the glock * @@ -176,11 +201,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) static int rgrp_go_sync(struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct address_space *mapping = &sdp->sd_aspace; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); - const unsigned bsize = sdp->sd_sb.sb_bsize; - loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; - loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1; int error; if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) @@ -189,10 +210,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl) gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_RGRP_GO_SYNC); - filemap_fdatawrite_range(mapping, start, end); - error = filemap_fdatawait_range(mapping, start, end); - WARN_ON_ONCE(error && !gfs2_withdrawn(sdp)); - mapping_set_error(mapping, error); + error = gfs2_rgrp_metasync(gl); if (!error) error = gfs2_ail_empty_gl(gl); gfs2_free_clones(rgd); @@ -266,7 +284,24 @@ static void gfs2_clear_glop_pending(struct gfs2_inode *ip) } /** - * inode_go_sync - Sync the dirty data and/or metadata for an inode glock + * gfs2_inode_metasync - sync out the metadata of an inode + * @gl: the glock protecting the inode + * + */ +int gfs2_inode_metasync(struct gfs2_glock *gl) +{ + struct address_space *metamapping = gfs2_glock2aspace(gl); + int error; + + filemap_fdatawrite(metamapping); + error = filemap_fdatawait(metamapping); + if (error) + gfs2_io_error(gl->gl_name.ln_sbd); + return error; +} + +/** + * inode_go_sync - Sync the dirty metadata of an inode * @gl: the glock protecting the inode * */ @@ -297,8 +332,7 @@ static int inode_go_sync(struct gfs2_glock *gl) error = filemap_fdatawait(mapping); mapping_set_error(mapping, error); } - ret = filemap_fdatawait(metamapping); - mapping_set_error(metamapping, ret); + ret = gfs2_inode_metasync(gl); if (!error) error = ret; gfs2_ail_empty_gl(gl); diff --git a/fs/gfs2/glops.h b/fs/gfs2/glops.h index 2dd192e85618..695898afcaf1 100644 --- a/fs/gfs2/glops.h +++ b/fs/gfs2/glops.h @@ -22,6 +22,7 @@ extern const struct gfs2_glock_operations gfs2_quota_glops; extern const struct gfs2_glock_operations gfs2_journal_glops; extern const struct gfs2_glock_operations *gfs2_glops_list[]; +extern int gfs2_inode_metasync(struct gfs2_glock *gl); extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync); #endif /* __GLOPS_DOT_H__ */ diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index 6774865f5b5b..077ccb1b3ccc 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -180,7 +180,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); if (unlikely(error)) goto fail; - gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl); + if (blktype != GFS2_BLKST_UNLINKED) + gfs2_cancel_delete_work(ip->i_iopen_gh.gh_gl); glock_set_object(ip->i_iopen_gh.gh_gl, ip); gfs2_glock_put(io_gl); io_gl = NULL; diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index ed69298dd824..3922b26264f5 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -22,6 +22,7 @@ #include "incore.h" #include "inode.h" #include "glock.h" +#include "glops.h" #include "log.h" #include "lops.h" #include "meta_io.h" @@ -817,41 +818,19 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start, return error; } -/** - * gfs2_meta_sync - Sync all buffers associated with a glock - * @gl: The glock - * - */ - -void gfs2_meta_sync(struct gfs2_glock *gl) -{ - struct address_space *mapping = gfs2_glock2aspace(gl); - struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - int error; - - if (mapping == NULL) - mapping = &sdp->sd_aspace; - - filemap_fdatawrite(mapping); - error = filemap_fdatawait(mapping); - - if (error) - gfs2_io_error(gl->gl_name.ln_sbd); -} - static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { - gfs2_meta_sync(ip->i_gl); + gfs2_inode_metasync(ip->i_gl); return; } if (pass != 1) return; - gfs2_meta_sync(ip->i_gl); + gfs2_inode_metasync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); @@ -1060,14 +1039,14 @@ static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { - gfs2_meta_sync(ip->i_gl); + gfs2_inode_metasync(ip->i_gl); return; } if (pass != 1) return; /* data sync? */ - gfs2_meta_sync(ip->i_gl); + gfs2_inode_metasync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks); diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 4a3d8aecdf82..fbdbb08dcec6 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h @@ -27,8 +27,6 @@ extern void gfs2_log_submit_bio(struct bio **biop, int opf); extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); extern int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, bool keep_cache); -extern void gfs2_meta_sync(struct gfs2_glock *gl); - static inline unsigned int buf_limit(struct gfs2_sbd *sdp) { unsigned int limit; diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 7a7e3c10a9a9..61fce59cb4d3 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -633,8 +633,10 @@ static int init_statfs(struct gfs2_sbd *sdp) if (IS_ERR(sdp->sd_statfs_inode)) { error = PTR_ERR(sdp->sd_statfs_inode); fs_err(sdp, "can't read in statfs inode: %d\n", error); - goto fail; + goto out; } + if (sdp->sd_args.ar_spectator) + goto out; pn = gfs2_lookup_simple(master, "per_node"); if (IS_ERR(pn)) { @@ -682,15 +684,17 @@ free_local: iput(pn); put_statfs: iput(sdp->sd_statfs_inode); -fail: +out: return error; } /* Uninitialize and free up memory used by the list of statfs inodes */ static void uninit_statfs(struct gfs2_sbd *sdp) { - gfs2_glock_dq_uninit(&sdp->sd_sc_gh); - free_local_statfs_inodes(sdp); + if (!sdp->sd_args.ar_spectator) { + gfs2_glock_dq_uninit(&sdp->sd_sc_gh); + free_local_statfs_inodes(sdp); + } iput(sdp->sd_statfs_inode); } @@ -704,7 +708,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) if (undo) { jindex = 0; - goto fail_jinode_gh; + goto fail_statfs; } sdp->sd_jindex = gfs2_lookup_simple(master, "jindex"); diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index b5cbe21efdfb..c26c68ebd29d 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c @@ -349,7 +349,7 @@ static int update_statfs_inode(struct gfs2_jdesc *jd, mark_buffer_dirty(bh); brelse(bh); - gfs2_meta_sync(ip->i_gl); + gfs2_inode_metasync(ip->i_gl); out: return error; diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index ee491bb9c1cc..92d799a193b8 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -719,9 +719,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) } gfs2_free_clones(rgd); + return_all_reservations(rgd); kfree(rgd->rd_bits); rgd->rd_bits = NULL; - return_all_reservations(rgd); kmem_cache_free(gfs2_rgrpd_cachep, rgd); } } @@ -1370,6 +1370,9 @@ int gfs2_fitrim(struct file *filp, void __user *argp) if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) + return -EROFS; + if (!blk_queue_discard(q)) return -EOPNOTSUPP; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index b285192bd6b3..b3d951ab8068 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -738,6 +738,7 @@ restart: gfs2_jindex_free(sdp); /* Take apart glock structures and buffer lists */ gfs2_gl_hash_clear(sdp); + truncate_inode_pages_final(&sdp->sd_aspace); gfs2_delete_debugfs_file(sdp); /* Unmount the locking protocol */ gfs2_lm_unmount(sdp); diff --git a/fs/io-wq.c b/fs/io-wq.c index 02894df7656d..b53c055bea6a 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -482,6 +482,10 @@ static void io_impersonate_work(struct io_worker *worker, current->files = work->identity->files; current->nsproxy = work->identity->nsproxy; task_unlock(current); + if (!work->identity->files) { + /* failed grabbing files, ensure work gets cancelled */ + work->flags |= IO_WQ_WORK_CANCEL; + } } if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->identity->fs) current->fs = work->identity->fs; diff --git a/fs/io_uring.c b/fs/io_uring.c index a7429c977eb3..8018c7076b25 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -995,20 +995,33 @@ static void io_sq_thread_drop_mm(void) if (mm) { kthread_unuse_mm(mm); mmput(mm); + current->mm = NULL; } } static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) { - if (!current->mm) { - if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) || - !ctx->sqo_task->mm || - !mmget_not_zero(ctx->sqo_task->mm))) - return -EFAULT; - kthread_use_mm(ctx->sqo_task->mm); + struct mm_struct *mm; + + if (current->mm) + return 0; + + /* Should never happen */ + if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL))) + return -EFAULT; + + task_lock(ctx->sqo_task); + mm = ctx->sqo_task->mm; + if (unlikely(!mm || !mmget_not_zero(mm))) + mm = NULL; + task_unlock(ctx->sqo_task); + + if (mm) { + kthread_use_mm(mm); + return 0; } - return 0; + return -EFAULT; } static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, @@ -1274,9 +1287,12 @@ static bool io_identity_cow(struct io_kiocb *req) /* add one for this request */ refcount_inc(&id->count); - /* drop old identity, assign new one. one ref for req, one for tctx */ - if (req->work.identity != tctx->identity && - refcount_sub_and_test(2, &req->work.identity->count)) + /* drop tctx and req identity references, if needed */ + if (tctx->identity != &tctx->__identity && + refcount_dec_and_test(&tctx->identity->count)) + kfree(tctx->identity); + if (req->work.identity != &tctx->__identity && + refcount_dec_and_test(&req->work.identity->count)) kfree(req->work.identity); req->work.identity = id; @@ -1577,14 +1593,29 @@ static void io_cqring_mark_overflow(struct io_ring_ctx *ctx) } } -static inline bool io_match_files(struct io_kiocb *req, - struct files_struct *files) +static inline bool __io_match_files(struct io_kiocb *req, + struct files_struct *files) { + return ((req->flags & REQ_F_WORK_INITIALIZED) && + (req->work.flags & IO_WQ_WORK_FILES)) && + req->work.identity->files == files; +} + +static bool io_match_files(struct io_kiocb *req, + struct files_struct *files) +{ + struct io_kiocb *link; + if (!files) return true; - if ((req->flags & REQ_F_WORK_INITIALIZED) && - (req->work.flags & IO_WQ_WORK_FILES)) - return req->work.identity->files == files; + if (__io_match_files(req, files)) + return true; + if (req->flags & REQ_F_LINK_HEAD) { + list_for_each_entry(link, &req->link_list, link_list) { + if (__io_match_files(link, files)) + return true; + } + } return false; } @@ -1668,7 +1699,8 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->res, res); WRITE_ONCE(cqe->flags, cflags); - } else if (ctx->cq_overflow_flushed || req->task->io_uring->in_idle) { + } else if (ctx->cq_overflow_flushed || + atomic_read(&req->task->io_uring->in_idle)) { /* * If we're in ring overflow flush mode, or in task cancel mode, * then we cannot store the request for later flushing, we need @@ -1838,7 +1870,7 @@ static void __io_free_req(struct io_kiocb *req) io_dismantle_req(req); percpu_counter_dec(&tctx->inflight); - if (tctx->in_idle) + if (atomic_read(&tctx->in_idle)) wake_up(&tctx->wait); put_task_struct(req->task); @@ -7695,7 +7727,8 @@ static int io_uring_alloc_task_context(struct task_struct *task) xa_init(&tctx->xa); init_waitqueue_head(&tctx->wait); tctx->last = NULL; - tctx->in_idle = 0; + atomic_set(&tctx->in_idle, 0); + tctx->sqpoll = false; io_init_identity(&tctx->__identity); tctx->identity = &tctx->__identity; task->io_uring = tctx; @@ -8388,22 +8421,6 @@ static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req) return false; } -static bool io_match_link_files(struct io_kiocb *req, - struct files_struct *files) -{ - struct io_kiocb *link; - - if (io_match_files(req, files)) - return true; - if (req->flags & REQ_F_LINK_HEAD) { - list_for_each_entry(link, &req->link_list, link_list) { - if (io_match_files(link, files)) - return true; - } - } - return false; -} - /* * We're looking to cancel 'req' because it's holding on to our files, but * 'req' could be a link to another request. See if it is, and cancel that @@ -8453,7 +8470,21 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx, static bool io_cancel_link_cb(struct io_wq_work *work, void *data) { - return io_match_link(container_of(work, struct io_kiocb, work), data); + struct io_kiocb *req = container_of(work, struct io_kiocb, work); + bool ret; + + if (req->flags & REQ_F_LINK_TIMEOUT) { + unsigned long flags; + struct io_ring_ctx *ctx = req->ctx; + + /* protect against races with linked timeouts */ + spin_lock_irqsave(&ctx->completion_lock, flags); + ret = io_match_link(req, data); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + ret = io_match_link(req, data); + } + return ret; } static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) @@ -8479,6 +8510,7 @@ static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) } static void io_cancel_defer_files(struct io_ring_ctx *ctx, + struct task_struct *task, struct files_struct *files) { struct io_defer_entry *de = NULL; @@ -8486,7 +8518,8 @@ static void io_cancel_defer_files(struct io_ring_ctx *ctx, spin_lock_irq(&ctx->completion_lock); list_for_each_entry_reverse(de, &ctx->defer_list, list) { - if (io_match_link_files(de->req, files)) { + if (io_task_match(de->req, task) && + io_match_files(de->req, files)) { list_cut_position(&list, &ctx->defer_list, &de->list); break; } @@ -8512,7 +8545,6 @@ static bool io_uring_cancel_files(struct io_ring_ctx *ctx, if (list_empty_careful(&ctx->inflight_list)) return false; - io_cancel_defer_files(ctx, files); /* cancel all at once, should be faster than doing it one by one*/ io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); @@ -8598,8 +8630,16 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, { struct task_struct *task = current; - if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { task = ctx->sq_data->thread; + atomic_inc(&task->io_uring->in_idle); + io_sq_thread_park(ctx->sq_data); + } + + if (files) + io_cancel_defer_files(ctx, NULL, files); + else + io_cancel_defer_files(ctx, task, NULL); io_cqring_overflow_flush(ctx, true, task, files); @@ -8607,12 +8647,23 @@ static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx, io_run_task_work(); cond_resched(); } + + if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) { + atomic_dec(&task->io_uring->in_idle); + /* + * If the files that are going away are the ones in the thread + * identity, clear them out. + */ + if (task->io_uring->identity->files == files) + task->io_uring->identity->files = NULL; + io_sq_thread_unpark(ctx->sq_data); + } } /* * Note that this task has used io_uring. We use it for cancelation purposes. */ -static int io_uring_add_task_file(struct file *file) +static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file) { struct io_uring_task *tctx = current->io_uring; @@ -8634,6 +8685,14 @@ static int io_uring_add_task_file(struct file *file) tctx->last = file; } + /* + * This is race safe in that the task itself is doing this, hence it + * cannot be going through the exit/cancel paths at the same time. + * This cannot be modified while exit/cancel is running. + */ + if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL)) + tctx->sqpoll = true; + return 0; } @@ -8675,7 +8734,7 @@ void __io_uring_files_cancel(struct files_struct *files) unsigned long index; /* make sure overflow events are dropped */ - tctx->in_idle = true; + atomic_inc(&tctx->in_idle); xa_for_each(&tctx->xa, index, file) { struct io_ring_ctx *ctx = file->private_data; @@ -8684,6 +8743,35 @@ void __io_uring_files_cancel(struct files_struct *files) if (files) io_uring_del_task_file(file); } + + atomic_dec(&tctx->in_idle); +} + +static s64 tctx_inflight(struct io_uring_task *tctx) +{ + unsigned long index; + struct file *file; + s64 inflight; + + inflight = percpu_counter_sum(&tctx->inflight); + if (!tctx->sqpoll) + return inflight; + + /* + * If we have SQPOLL rings, then we need to iterate and find them, and + * add the pending count for those. + */ + xa_for_each(&tctx->xa, index, file) { + struct io_ring_ctx *ctx = file->private_data; + + if (ctx->flags & IORING_SETUP_SQPOLL) { + struct io_uring_task *__tctx = ctx->sqo_task->io_uring; + + inflight += percpu_counter_sum(&__tctx->inflight); + } + } + + return inflight; } /* @@ -8697,11 +8785,11 @@ void __io_uring_task_cancel(void) s64 inflight; /* make sure overflow events are dropped */ - tctx->in_idle = true; + atomic_inc(&tctx->in_idle); do { /* read completions before cancelations */ - inflight = percpu_counter_sum(&tctx->inflight); + inflight = tctx_inflight(tctx); if (!inflight) break; __io_uring_files_cancel(NULL); @@ -8712,13 +8800,13 @@ void __io_uring_task_cancel(void) * If we've seen completions, retry. This avoids a race where * a completion comes in before we did prepare_to_wait(). */ - if (inflight != percpu_counter_sum(&tctx->inflight)) + if (inflight != tctx_inflight(tctx)) continue; schedule(); } while (1); finish_wait(&tctx->wait, &wait); - tctx->in_idle = false; + atomic_dec(&tctx->in_idle); } static int io_uring_flush(struct file *file, void *data) @@ -8863,7 +8951,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, io_sqpoll_wait_sq(ctx); submitted = to_submit; } else if (to_submit) { - ret = io_uring_add_task_file(f.file); + ret = io_uring_add_task_file(ctx, f.file); if (unlikely(ret)) goto out; mutex_lock(&ctx->uring_lock); @@ -8900,7 +8988,8 @@ out_fput: #ifdef CONFIG_PROC_FS static int io_uring_show_cred(int id, void *p, void *data) { - const struct cred *cred = p; + struct io_identity *iod = p; + const struct cred *cred = iod->creds; struct seq_file *m = data; struct user_namespace *uns = seq_user_ns(m); struct group_info *gi; @@ -9092,7 +9181,7 @@ err_fd: #if defined(CONFIG_UNIX) ctx->ring_sock->file = file; #endif - if (unlikely(io_uring_add_task_file(file))) { + if (unlikely(io_uring_add_task_file(ctx, file))) { file = ERR_PTR(-ENOMEM); goto err_fd; } diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 8180061b9e16..10cc7979ce38 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1374,6 +1374,7 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); WARN_ON_ONCE(!PageLocked(page)); WARN_ON_ONCE(PageWriteback(page)); + WARN_ON_ONCE(PageDirty(page)); /* * We cannot cancel the ioend directly here on error. We may have @@ -1382,33 +1383,22 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, * appropriately. */ if (unlikely(error)) { + /* + * Let the filesystem know what portion of the current page + * failed to map. If the page wasn't been added to ioend, it + * won't be affected by I/O completion and we must unlock it + * now. + */ + if (wpc->ops->discard_page) + wpc->ops->discard_page(page, file_offset); if (!count) { - /* - * If the current page hasn't been added to ioend, it - * won't be affected by I/O completions and we must - * discard and unlock it right here. - */ - if (wpc->ops->discard_page) - wpc->ops->discard_page(page); ClearPageUptodate(page); unlock_page(page); goto done; } - - /* - * If the page was not fully cleaned, we need to ensure that the - * higher layers come back to it correctly. That means we need - * to keep the page dirty, and for WB_SYNC_ALL writeback we need - * to ensure the PAGECACHE_TAG_TOWRITE index mark is not removed - * so another attempt to write this page in this writeback sweep - * will be made. - */ - set_page_writeback_keepwrite(page); - } else { - clear_page_dirty_for_io(page); - set_page_writeback(page); } + set_page_writeback(page); unlock_page(page); /* diff --git a/fs/proc/base.c b/fs/proc/base.c index 0f707003dda5..b362523a9829 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1049,6 +1049,8 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count, oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) / OOM_SCORE_ADJ_MAX; put_task_struct(task); + if (oom_adj > OOM_ADJUST_MAX) + oom_adj = OOM_ADJUST_MAX; len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj); return simple_read_from_buffer(buf, count, ppos, buffer, len); } diff --git a/fs/proc/cpuinfo.c b/fs/proc/cpuinfo.c index d0989a443c77..419760fd77bd 100644 --- a/fs/proc/cpuinfo.c +++ b/fs/proc/cpuinfo.c @@ -19,7 +19,7 @@ static int cpuinfo_open(struct inode *inode, struct file *file) static const struct proc_ops cpuinfo_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = cpuinfo_open, - .proc_read = seq_read, + .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = seq_release, }; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 2f9fa179194d..b84663252add 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -590,7 +590,7 @@ static int proc_seq_release(struct inode *inode, struct file *file) static const struct proc_ops proc_seq_ops = { /* not permanent -- can call into arbitrary seq_operations */ .proc_open = proc_seq_open, - .proc_read = seq_read, + .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = proc_seq_release, }; @@ -621,7 +621,7 @@ static int proc_single_open(struct inode *inode, struct file *file) static const struct proc_ops proc_single_ops = { /* not permanent -- can call into arbitrary ->single_show */ .proc_open = proc_single_open, - .proc_read = seq_read, + .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = single_release, }; diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 58c075e2a452..bde6b6f69852 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -597,6 +597,7 @@ static const struct file_operations proc_iter_file_ops = { .llseek = proc_reg_llseek, .read_iter = proc_reg_read_iter, .write = proc_reg_write, + .splice_read = generic_file_splice_read, .poll = proc_reg_poll, .unlocked_ioctl = proc_reg_unlocked_ioctl, .mmap = proc_reg_mmap, @@ -622,6 +623,7 @@ static const struct file_operations proc_reg_file_ops_compat = { static const struct file_operations proc_iter_file_ops_compat = { .llseek = proc_reg_llseek, .read_iter = proc_reg_read_iter, + .splice_read = generic_file_splice_read, .write = proc_reg_write, .poll = proc_reg_poll, .unlocked_ioctl = proc_reg_unlocked_ioctl, diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 46b3293015fe..4695b6de3151 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -226,7 +226,7 @@ static int stat_open(struct inode *inode, struct file *file) static const struct proc_ops stat_proc_ops = { .proc_flags = PROC_ENTRY_PERMANENT, .proc_open = stat_open, - .proc_read = seq_read, + .proc_read_iter = seq_read_iter, .proc_lseek = seq_lseek, .proc_release = single_release, }; diff --git a/fs/seq_file.c b/fs/seq_file.c index 31219c1db17d..3b20e21604e7 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -18,6 +18,7 @@ #include <linux/mm.h> #include <linux/printk.h> #include <linux/string_helpers.h> +#include <linux/uio.h> #include <linux/uaccess.h> #include <asm/page.h> @@ -146,7 +147,28 @@ Eoverflow: */ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) { - struct seq_file *m = file->private_data; + struct iovec iov = { .iov_base = buf, .iov_len = size}; + struct kiocb kiocb; + struct iov_iter iter; + ssize_t ret; + + init_sync_kiocb(&kiocb, file); + iov_iter_init(&iter, READ, &iov, 1, size); + + kiocb.ki_pos = *ppos; + ret = seq_read_iter(&kiocb, &iter); + *ppos = kiocb.ki_pos; + return ret; +} +EXPORT_SYMBOL(seq_read); + +/* + * Ready-made ->f_op->read_iter() + */ +ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter) +{ + struct seq_file *m = iocb->ki_filp->private_data; + size_t size = iov_iter_count(iter); size_t copied = 0; size_t n; void *p; @@ -158,14 +180,14 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) * if request is to read from zero offset, reset iterator to first * record as it might have been already advanced by previous requests */ - if (*ppos == 0) { + if (iocb->ki_pos == 0) { m->index = 0; m->count = 0; } - /* Don't assume *ppos is where we left it */ - if (unlikely(*ppos != m->read_pos)) { - while ((err = traverse(m, *ppos)) == -EAGAIN) + /* Don't assume ki_pos is where we left it */ + if (unlikely(iocb->ki_pos != m->read_pos)) { + while ((err = traverse(m, iocb->ki_pos)) == -EAGAIN) ; if (err) { /* With prejudice... */ @@ -174,7 +196,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) m->count = 0; goto Done; } else { - m->read_pos = *ppos; + m->read_pos = iocb->ki_pos; } } @@ -187,13 +209,11 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos) /* if not empty - flush it first */ if (m->count) { n = min(m->count, size); - err = copy_to_user(buf, m->buf + m->from, n); - if (err) + if (copy_to_iter(m->buf + m->from, n, iter) != n) goto Efault; m->count -= n; m->from += n; size -= n; - buf += n; copied += n; if (!size) goto Done; @@ -254,8 +274,7 @@ Fill: } m->op->stop(m, p); n = min(m->count, size); - err = copy_to_user(buf, m->buf, n); - if (err) + if (copy_to_iter(m->buf, n, iter) != n) goto Efault; copied += n; m->count -= n; @@ -264,7 +283,7 @@ Done: if (!copied) copied = err; else { - *ppos += copied; + iocb->ki_pos += copied; m->read_pos += copied; } mutex_unlock(&m->lock); @@ -276,7 +295,7 @@ Efault: err = -EFAULT; goto Done; } -EXPORT_SYMBOL(seq_read); +EXPORT_SYMBOL(seq_read_iter); /** * seq_lseek - ->llseek() method for sequential files. diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 852b536551b5..15640015be9d 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -2467,6 +2467,7 @@ xfs_defer_agfl_block( new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); new->xefi_blockcount = 1; new->xefi_oinfo = *oinfo; + new->xefi_skip_discard = false; trace_xfs_agfl_free_defer(mp, agno, 0, agbno, 1); diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index e1bd484e5548..6747e97a7949 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -52,9 +52,9 @@ struct xfs_extent_free_item { xfs_fsblock_t xefi_startblock;/* starting fs block number */ xfs_extlen_t xefi_blockcount;/* number of blocks in extent */ + bool xefi_skip_discard; struct list_head xefi_list; struct xfs_owner_info xefi_oinfo; /* extent owner */ - bool xefi_skip_discard; }; #define XFS_BMAP_MAX_NMAP 4 diff --git a/fs/xfs/scrub/inode.c b/fs/xfs/scrub/inode.c index 3aa85b64de36..bb25ff1b770d 100644 --- a/fs/xfs/scrub/inode.c +++ b/fs/xfs/scrub/inode.c @@ -121,8 +121,7 @@ xchk_inode_flags( goto bad; /* rt flags require rt device */ - if ((flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_RTINHERIT)) && - !mp->m_rtdev_targp) + if ((flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp) goto bad; /* new rt bitmap flag only valid for rbmino */ diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 55d126d4e096..4304c6416fbb 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -346,8 +346,8 @@ xfs_map_blocks( ssize_t count = i_blocksize(inode); xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); - xfs_fileoff_t cow_fsb = NULLFILEOFF; - int whichfork = XFS_DATA_FORK; + xfs_fileoff_t cow_fsb; + int whichfork; struct xfs_bmbt_irec imap; struct xfs_iext_cursor icur; int retries = 0; @@ -381,6 +381,8 @@ xfs_map_blocks( * landed in a hole and we skip the block. */ retry: + cow_fsb = NULLFILEOFF; + whichfork = XFS_DATA_FORK; xfs_ilock(ip, XFS_ILOCK_SHARED); ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || (ip->i_df.if_flags & XFS_IFEXTENTS)); @@ -527,13 +529,15 @@ xfs_prepare_ioend( */ static void xfs_discard_page( - struct page *page) + struct page *page, + loff_t fileoff) { struct inode *inode = page->mapping->host; struct xfs_inode *ip = XFS_I(inode); struct xfs_mount *mp = ip->i_mount; - loff_t offset = page_offset(page); - xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, offset); + unsigned int pageoff = offset_in_page(fileoff); + xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff); + xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff); int error; if (XFS_FORCED_SHUTDOWN(mp)) @@ -541,14 +545,14 @@ xfs_discard_page( xfs_alert_ratelimited(mp, "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.", - page, ip->i_ino, offset); + page, ip->i_ino, fileoff); error = xfs_bmap_punch_delalloc_range(ip, start_fsb, - i_blocks_per_page(inode, page)); + i_blocks_per_page(inode, page) - pageoff_fsb); if (error && !XFS_FORCED_SHUTDOWN(mp)) xfs_alert(mp, "page discard unable to remove delalloc mapping."); out_invalidate: - iomap_invalidatepage(page, 0, PAGE_SIZE); + iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff); } static const struct iomap_writeback_ops xfs_writeback_ops = { diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index 5e165456da68..1414ab79eacf 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -911,6 +911,16 @@ xfs_setattr_size( error = iomap_zero_range(inode, oldsize, newsize - oldsize, &did_zeroing, &xfs_buffered_write_iomap_ops); } else { + /* + * iomap won't detect a dirty page over an unwritten block (or a + * cow block over a hole) and subsequently skips zeroing the + * newly post-EOF portion of the page. Flush the new EOF to + * convert the block before the pagecache truncate. + */ + error = filemap_write_and_wait_range(inode->i_mapping, newsize, + newsize); + if (error) + return error; error = iomap_truncate_page(inode, newsize, &did_zeroing, &xfs_buffered_write_iomap_ops); } diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 16098dc42add..6fa05fb78189 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -1502,7 +1502,8 @@ xfs_reflink_unshare( &xfs_buffered_write_iomap_ops); if (error) goto out; - error = filemap_write_and_wait(inode->i_mapping); + + error = filemap_write_and_wait_range(inode->i_mapping, offset, len); if (error) goto out; diff --git a/include/kunit/test.h b/include/kunit/test.h index 9197da792336..db1b0ae666c4 100644 --- a/include/kunit/test.h +++ b/include/kunit/test.h @@ -252,13 +252,14 @@ static inline int kunit_run_all_tests(void) } #endif /* IS_BUILTIN(CONFIG_KUNIT) */ +#ifdef MODULE /** - * kunit_test_suites() - used to register one or more &struct kunit_suite - * with KUnit. + * kunit_test_suites_for_module() - used to register one or more + * &struct kunit_suite with KUnit. * - * @suites_list...: a statically allocated list of &struct kunit_suite. + * @__suites: a statically allocated list of &struct kunit_suite. * - * Registers @suites_list with the test framework. See &struct kunit_suite for + * Registers @__suites with the test framework. See &struct kunit_suite for * more information. * * If a test suite is built-in, module_init() gets translated into @@ -267,7 +268,6 @@ static inline int kunit_run_all_tests(void) * module_{init|exit} functions for the builtin case when registering * suites via kunit_test_suites() below. */ -#ifdef MODULE #define kunit_test_suites_for_module(__suites) \ static int __init kunit_test_suites_init(void) \ { \ @@ -294,7 +294,7 @@ static inline int kunit_run_all_tests(void) * kunit_test_suites() - used to register one or more &struct kunit_suite * with KUnit. * - * @suites: a statically allocated list of &struct kunit_suite. + * @__suites: a statically allocated list of &struct kunit_suite. * * Registers @suites with the test framework. See &struct kunit_suite for * more information. @@ -308,10 +308,10 @@ static inline int kunit_run_all_tests(void) * module. * */ -#define kunit_test_suites(...) \ +#define kunit_test_suites(__suites...) \ __kunit_test_suites(__UNIQUE_ID(array), \ __UNIQUE_ID(suites), \ - __VA_ARGS__) + ##__suites) #define kunit_test_suite(suite) kunit_test_suites(&suite) diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index b23eeca4d677..794b2a33a2c3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -235,6 +235,8 @@ enum hctx_type { * @flags: Zero or more BLK_MQ_F_* flags. * @driver_data: Pointer to data owned by the block driver that created this * tag set. + * @active_queues_shared_sbitmap: + * number of active request queues per tag set. * @__bitmap_tags: A shared tags sbitmap, used over all hctx's * @__breserved_tags: * A shared reserved tags sbitmap, used over all hctx's diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h index 900b9f4e0605..fc61cf4eff1c 100644 --- a/include/linux/can/skb.h +++ b/include/linux/can/skb.h @@ -61,21 +61,17 @@ static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk) */ static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb) { - if (skb_shared(skb)) { - struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); + struct sk_buff *nskb; - if (likely(nskb)) { - can_skb_set_owner(nskb, skb->sk); - consume_skb(skb); - return nskb; - } else { - kfree_skb(skb); - return NULL; - } + nskb = skb_clone(skb, GFP_ATOMIC); + if (unlikely(!nskb)) { + kfree_skb(skb); + return NULL; } - /* we can assume to have an unshared skb with proper owner */ - return skb; + can_skb_set_owner(nskb, skb->sk); + consume_skb(skb); + return nskb; } #endif /* !_CAN_SKB_H */ diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 868364cea3b7..35b2d845704d 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -30,7 +30,8 @@ struct io_uring_task { struct percpu_counter inflight; struct io_identity __identity; struct io_identity *identity; - bool in_idle; + atomic_t in_idle; + bool sqpoll; }; #if defined(CONFIG_IO_URING) diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 172b3397a1a3..5bd3cac4df9c 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -221,7 +221,7 @@ struct iomap_writeback_ops { * Optional, allows the file system to discard state on a page where * we failed to submit any I/O. */ - void (*discard_page)(struct page *page); + void (*discard_page)(struct page *page, loff_t fileoff); }; struct iomap_writepage_ctx { diff --git a/include/linux/mm.h b/include/linux/mm.h index ef360fe70aaf..db6ae4d3fb4e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2759,6 +2759,15 @@ static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, return VM_FAULT_NOPAGE; } +#ifndef io_remap_pfn_range +static inline int io_remap_pfn_range(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn, + unsigned long size, pgprot_t prot) +{ + return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot)); +} +#endif + static inline vm_fault_t vmf_error(int err) { if (err == -ENOMEM) diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 89016d08f6a2..f6267e2883f2 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -24,6 +24,12 @@ struct nfnl_callback { const u_int16_t attr_count; /* number of nlattr's */ }; +enum nfnl_abort_action { + NFNL_ABORT_NONE = 0, + NFNL_ABORT_AUTOLOAD, + NFNL_ABORT_VALIDATE, +}; + struct nfnetlink_subsystem { const char *name; __u8 subsys_id; /* nfnetlink subsystem ID */ @@ -31,7 +37,8 @@ struct nfnetlink_subsystem { const struct nfnl_callback *cb; /* callback for individual types */ struct module *owner; int (*commit)(struct net *net, struct sk_buff *skb); - int (*abort)(struct net *net, struct sk_buff *skb, bool autoload); + int (*abort)(struct net *net, struct sk_buff *skb, + enum nfnl_abort_action action); void (*cleanup)(struct net *net); bool (*valid_genid)(struct net *net, u32 genid); }; diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 082e2c41b7ff..5b70ca868bb1 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -16,7 +16,7 @@ struct ip_rt_info { u_int32_t mark; }; -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned addr_type); struct nf_queue_entry; diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 9b67394471e1..48314ade1506 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -42,7 +42,7 @@ struct nf_ipv6_ops { #if IS_MODULE(CONFIG_IPV6) int (*chk_addr)(struct net *net, const struct in6_addr *addr, const struct net_device *dev, int strict); - int (*route_me_harder)(struct net *net, struct sk_buff *skb); + int (*route_me_harder)(struct net *net, struct sock *sk, struct sk_buff *skb); int (*dev_get_saddr)(struct net *net, const struct net_device *dev, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); @@ -143,9 +143,9 @@ static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk, #endif } -int ip6_route_me_harder(struct net *net, struct sk_buff *skb); +int ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb); -static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) +static inline int nf_ip6_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb) { #if IS_MODULE(CONFIG_IPV6) const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); @@ -153,9 +153,9 @@ static inline int nf_ip6_route_me_harder(struct net *net, struct sk_buff *skb) if (!v6_ops) return -EHOSTUNREACH; - return v6_ops->route_me_harder(net, skb); + return v6_ops->route_me_harder(net, sk, skb); #elif IS_BUILTIN(CONFIG_IPV6) - return ip6_route_me_harder(net, skb); + return ip6_route_me_harder(net, sk, skb); #else return -EHOSTUNREACH; #endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c77b7c31b2e4..e1e19c1f9ec9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -344,9 +344,9 @@ static inline struct page *find_get_page_flags(struct address_space *mapping, /** * find_lock_page - locate, pin and lock a pagecache page * @mapping: the address_space to search - * @offset: the page index + * @index: the page index * - * Looks up the page cache entry at @mapping & @offset. If there is a + * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, it is returned locked and with an increased * refcount. * @@ -363,9 +363,9 @@ static inline struct page *find_lock_page(struct address_space *mapping, /** * find_lock_head - Locate, pin and lock a pagecache page. * @mapping: The address_space to search. - * @offset: The page index. + * @index: The page index. * - * Looks up the page cache entry at @mapping & @offset. If there is a + * Looks up the page cache entry at @mapping & @index. If there is a * page cache page, its head page is returned locked and with an increased * refcount. * diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 38c33eabea89..71125a4676c4 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1427,10 +1427,6 @@ typedef unsigned int pgtbl_mod_mask; #endif /* !__ASSEMBLY__ */ -#ifndef io_remap_pfn_range -#define io_remap_pfn_range remap_pfn_range -#endif - #ifndef has_transparent_hugepage #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define has_transparent_hugepage() 1 diff --git a/include/linux/phy.h b/include/linux/phy.h index eb3cb1a98b45..56563e5e0dc7 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -147,16 +147,8 @@ typedef enum { PHY_INTERFACE_MODE_MAX, } phy_interface_t; -/** +/* * phy_supported_speeds - return all speeds currently supported by a PHY device - * @phy: The PHY device to return supported speeds of. - * @speeds: buffer to store supported speeds in. - * @size: size of speeds buffer. - * - * Description: Returns the number of supported speeds, and fills - * the speeds buffer with the supported speeds. If speeds buffer is - * too small to contain all currently supported speeds, will return as - * many speeds as can fit. */ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, @@ -1022,14 +1014,9 @@ static inline int __phy_modify_changed(struct phy_device *phydev, u32 regnum, regnum, mask, set); } -/** +/* * phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Same rules as for phy_read(); */ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); @@ -1064,38 +1051,21 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); __ret; \ }) -/** +/* * __phy_read_mmd - Convenience function for reading a register * from an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to read from - * @regnum: The register on the MMD to read - * - * Same rules as for __phy_read(); */ int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); -/** +/* * phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to write to - * @regnum: The register on the MMD to read - * @val: value to write to @regnum - * - * Same rules as for phy_write(); */ int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); -/** +/* * __phy_write_mmd - Convenience function for writing a register * on an MMD on a given PHY. - * @phydev: The phy_device struct - * @devad: The MMD to write to - * @regnum: The register on the MMD to read - * @val: value to write to @regnum - * - * Same rules as for __phy_write(); */ int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 18b02dcc168e..4b708f4e8eed 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -54,11 +54,10 @@ extern u64 pm_runtime_autosuspend_expiration(struct device *dev); extern void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns); extern void pm_runtime_set_memalloc_noio(struct device *dev, bool enable); -extern void pm_runtime_clean_up_links(struct device *dev); extern void pm_runtime_get_suppliers(struct device *dev); extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); -extern void pm_runtime_drop_link(struct device *dev); +extern void pm_runtime_drop_link(struct device_link *link); /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. @@ -276,11 +275,10 @@ static inline u64 pm_runtime_autosuspend_expiration( struct device *dev) { return 0; } static inline void pm_runtime_set_memalloc_noio(struct device *dev, bool enable){} -static inline void pm_runtime_clean_up_links(struct device *dev) {} static inline void pm_runtime_get_suppliers(struct device *dev) {} static inline void pm_runtime_put_suppliers(struct device *dev) {} static inline void pm_runtime_new_link(struct device *dev) {} -static inline void pm_runtime_drop_link(struct device *dev) {} +static inline void pm_runtime_drop_link(struct device_link *link) {} #endif /* !CONFIG_PM */ diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 7fabb1af18e0..497990c69b0b 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -147,24 +147,6 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } -/** - * refcount_add_not_zero - add a value to a refcount unless it is 0 - * @i: the value to add to the refcount - * @r: the refcount - * - * Will saturate at REFCOUNT_SATURATED and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - * - * Use of this function is not recommended for the normal reference counting - * use case in which references are taken and released one at a time. In these - * cases, refcount_inc(), or one of its variants, should instead be used to - * increment a reference count. - * - * Return: false if the passed refcount is 0, true otherwise - */ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) { int old = refcount_read(r); @@ -183,17 +165,12 @@ static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, in return old; } -static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) -{ - return __refcount_add_not_zero(i, r, NULL); -} - /** - * refcount_add - add a value to a refcount + * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount * @r: the refcount * - * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. + * Will saturate at REFCOUNT_SATURATED and WARN. * * Provides no memory ordering, it is assumed the caller has guaranteed the * object memory to be stable (RCU, etc.). It does provide a control dependency @@ -203,7 +180,14 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) * use case in which references are taken and released one at a time. In these * cases, refcount_inc(), or one of its variants, should instead be used to * increment a reference count. + * + * Return: false if the passed refcount is 0, true otherwise */ +static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) +{ + return __refcount_add_not_zero(i, r, NULL); +} + static inline void __refcount_add(int i, refcount_t *r, int *oldp) { int old = atomic_fetch_add_relaxed(i, &r->refs); @@ -217,11 +201,32 @@ static inline void __refcount_add(int i, refcount_t *r, int *oldp) refcount_warn_saturate(r, REFCOUNT_ADD_OVF); } +/** + * refcount_add - add a value to a refcount + * @i: the value to add to the refcount + * @r: the refcount + * + * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN. + * + * Provides no memory ordering, it is assumed the caller has guaranteed the + * object memory to be stable (RCU, etc.). It does provide a control dependency + * and thereby orders future stores. See the comment on top. + * + * Use of this function is not recommended for the normal reference counting + * use case in which references are taken and released one at a time. In these + * cases, refcount_inc(), or one of its variants, should instead be used to + * increment a reference count. + */ static inline void refcount_add(int i, refcount_t *r) { __refcount_add(i, r, NULL); } +static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero(1, r, oldp); +} + /** * refcount_inc_not_zero - increment a refcount unless it is 0 * @r: the refcount to increment @@ -235,14 +240,14 @@ static inline void refcount_add(int i, refcount_t *r) * * Return: true if the increment was successful, false otherwise */ -static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) +static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { - return __refcount_add_not_zero(1, r, oldp); + return __refcount_inc_not_zero(r, NULL); } -static inline __must_check bool refcount_inc_not_zero(refcount_t *r) +static inline void __refcount_inc(refcount_t *r, int *oldp) { - return __refcount_inc_not_zero(r, NULL); + __refcount_add(1, r, oldp); } /** @@ -257,14 +262,27 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) * Will WARN if the refcount is 0, as this represents a possible use-after-free * condition. */ -static inline void __refcount_inc(refcount_t *r, int *oldp) +static inline void refcount_inc(refcount_t *r) { - __refcount_add(1, r, oldp); + __refcount_inc(r, NULL); } -static inline void refcount_inc(refcount_t *r) +static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) { - __refcount_inc(r, NULL); + int old = atomic_fetch_sub_release(i, &r->refs); + + if (oldp) + *oldp = old; + + if (old == i) { + smp_acquire__after_ctrl_dep(); + return true; + } + + if (unlikely(old < 0 || old - i < 0)) + refcount_warn_saturate(r, REFCOUNT_SUB_UAF); + + return false; } /** @@ -287,27 +305,14 @@ static inline void refcount_inc(refcount_t *r) * * Return: true if the resulting refcount is 0, false otherwise */ -static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) +static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) { - int old = atomic_fetch_sub_release(i, &r->refs); - - if (oldp) - *oldp = old; - - if (old == i) { - smp_acquire__after_ctrl_dep(); - return true; - } - - if (unlikely(old < 0 || old - i < 0)) - refcount_warn_saturate(r, REFCOUNT_SUB_UAF); - - return false; + return __refcount_sub_and_test(i, r, NULL); } -static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) +static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) { - return __refcount_sub_and_test(i, r, NULL); + return __refcount_sub_and_test(1, r, oldp); } /** @@ -323,26 +328,11 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) * * Return: true if the resulting refcount is 0, false otherwise */ -static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) -{ - return __refcount_sub_and_test(1, r, oldp); -} - static inline __must_check bool refcount_dec_and_test(refcount_t *r) { return __refcount_dec_and_test(r, NULL); } -/** - * refcount_dec - decrement a refcount - * @r: the refcount - * - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at REFCOUNT_SATURATED. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ static inline void __refcount_dec(refcount_t *r, int *oldp) { int old = atomic_fetch_sub_release(1, &r->refs); @@ -354,6 +344,16 @@ static inline void __refcount_dec(refcount_t *r, int *oldp) refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); } +/** + * refcount_dec - decrement a refcount + * @r: the refcount + * + * Similar to atomic_dec(), it will WARN on underflow and fail to decrement + * when saturated at REFCOUNT_SATURATED. + * + * Provides release memory ordering, such that prior loads and stores are done + * before. + */ static inline void refcount_dec(refcount_t *r) { __refcount_dec(r, NULL); diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 813614d4b71f..b83b3ae3c877 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h @@ -107,6 +107,7 @@ void seq_pad(struct seq_file *m, char c); char *mangle_path(char *s, const char *p, const char *esc); int seq_open(struct file *, const struct seq_operations *); ssize_t seq_read(struct file *, char __user *, size_t, loff_t *); +ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter); loff_t seq_lseek(struct file *, loff_t, int); int seq_release(struct inode *, struct file *); int seq_write(struct seq_file *seq, const void *data, size_t len); diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 661edfc8722e..d5ab8d99739f 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1444,7 +1444,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy, enum cfg80211_station_type statype); /** - * enum station_info_rate_flags - bitrate info flags + * enum rate_info_flags - bitrate info flags * * Used by the driver to indicate the specific rate transmission * type for 802.11n transmissions. @@ -1517,7 +1517,7 @@ struct rate_info { }; /** - * enum station_info_rate_flags - bitrate info flags + * enum bss_param_flags - bitrate info flags * * Used by the driver to indicate the specific rate transmission * type for 802.11n transmissions. @@ -6467,7 +6467,8 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, struct ieee80211_channel *channel, gfp_t gfp); /** - * cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate + * cfg80211_notify_new_peer_candidate - notify cfg80211 of a new mesh peer + * candidate * * @dev: network device * @macaddr: the MAC address of the new candidate @@ -7606,7 +7607,7 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate); void cfg80211_unregister_wdev(struct wireless_dev *wdev); /** - * struct cfg80211_ft_event - FT Information Elements + * struct cfg80211_ft_event_params - FT Information Elements * @ies: FT IEs * @ies_len: length of the FT IE in bytes * @target_ap: target AP's MAC address diff --git a/include/net/mac80211.h b/include/net/mac80211.h index e8e295dae744..dcdba96814a2 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -3311,7 +3311,7 @@ enum ieee80211_roc_type { }; /** - * enum ieee80211_reconfig_complete_type - reconfig type + * enum ieee80211_reconfig_type - reconfig type * * This enum is used by the reconfig_complete() callback to indicate what * reconfiguration type was completed. @@ -6334,7 +6334,8 @@ bool ieee80211_tx_prepare_skb(struct ieee80211_hw *hw, int band, struct ieee80211_sta **sta); /** - * Sanity-check and parse the radiotap header of injected frames + * ieee80211_parse_tx_radiotap - Sanity-check and parse the radiotap header + * of injected frames * @skb: packet injected by userspace * @dev: the &struct device of this 802.11 device */ @@ -6389,7 +6390,7 @@ int ieee80211_parse_p2p_noa(const struct ieee80211_p2p_noa_attr *attr, void ieee80211_update_p2p_noa(struct ieee80211_noa_data *data, u32 tsf); /** - * ieee80211_tdls_oper - request userspace to perform a TDLS operation + * ieee80211_tdls_oper_request - request userspace to perform a TDLS operation * @vif: virtual interface * @peer: the peer's destination address * @oper: the requested TDLS operation diff --git a/include/sound/control.h b/include/sound/control.h index e128cff10dfa..77d9fa10812d 100644 --- a/include/sound/control.h +++ b/include/sound/control.h @@ -42,7 +42,7 @@ struct snd_kcontrol_new { snd_ctl_elem_iface_t iface; /* interface identifier */ unsigned int device; /* device/client number */ unsigned int subdevice; /* subdevice (substream) number */ - const unsigned char *name; /* ASCII name of item */ + const char *name; /* ASCII name of item */ unsigned int index; /* index of item */ unsigned int access; /* access rights */ unsigned int count; /* count of same elements */ diff --git a/include/sound/core.h b/include/sound/core.h index 381a010a1bd4..0462c577d7a3 100644 --- a/include/sound/core.h +++ b/include/sound/core.h @@ -332,7 +332,8 @@ void __snd_printk(unsigned int level, const char *file, int line, #define snd_BUG() WARN(1, "BUG?\n") /** - * Suppress high rates of output when CONFIG_SND_DEBUG is enabled. + * snd_printd_ratelimit - Suppress high rates of output when + * CONFIG_SND_DEBUG is enabled. */ #define snd_printd_ratelimit() printk_ratelimit() diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 2ba5df2c9e23..2336bf9243e1 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -1284,8 +1284,8 @@ snd_pcm_sgbuf_get_ptr(struct snd_pcm_substream *substream, unsigned int ofs) } /** - * snd_pcm_sgbuf_chunk_size - Compute the max size that fits within the contig. - * page from the given size + * snd_pcm_sgbuf_get_chunk_size - Compute the max size that fits within the + * contig. page from the given size * @substream: PCM substream * @ofs: byte offset * @size: byte size to examine diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h index c1661febc2dc..0564fd7ccde4 100644 --- a/include/uapi/linux/icmpv6.h +++ b/include/uapi/linux/icmpv6.h @@ -138,6 +138,7 @@ struct icmp6hdr { #define ICMPV6_HDR_FIELD 0 #define ICMPV6_UNK_NEXTHDR 1 #define ICMPV6_UNK_OPTION 2 +#define ICMPV6_HDR_INCOMP 3 /* * constants for (set|get)sockopt diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index 7184265c0b0d..9555f31c8425 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -144,7 +144,7 @@ struct snd_compr_metadata { __u32 value[8]; } __attribute__((packed, aligned(4))); -/** +/* * compress path ioctl definitions * SNDRV_COMPRESS_GET_CAPS: Query capability of DSP * SNDRV_COMPRESS_GET_CODEC_CAPS: Query capability of a codec diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h index 06b0b57e996c..d1b3889f74d8 100644 --- a/include/video/imx-ipu-v3.h +++ b/include/video/imx-ipu-v3.h @@ -484,9 +484,6 @@ int ipu_smfc_set_watermark(struct ipu_smfc *smfc, u32 set_level, u32 clr_level); enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc); enum ipu_color_space ipu_pixelformat_to_colorspace(u32 pixelformat); -enum ipu_color_space ipu_mbus_code_to_colorspace(u32 mbus_code); -int ipu_stride_to_bytes(u32 pixel_stride, u32 pixelformat); -bool ipu_pixelformat_is_planar(u32 pixelformat); int ipu_degrees_to_rot_mode(enum ipu_rotate_mode *mode, int degrees, bool hflip, bool vflip); int ipu_rot_mode_to_degrees(int *degrees, enum ipu_rotate_mode mode, diff --git a/kernel/entry/common.c b/kernel/entry/common.c index 2b8366693d5c..e9e2df3f3f9e 100644 --- a/kernel/entry/common.c +++ b/kernel/entry/common.c @@ -337,10 +337,10 @@ noinstr irqentry_state_t irqentry_enter(struct pt_regs *regs) * already contains a warning when RCU is not watching, so no point * in having another one here. */ + lockdep_hardirqs_off(CALLER_ADDR0); instrumentation_begin(); rcu_irq_enter_check_tick(); - /* Use the combo lockdep/tracing function */ - trace_hardirqs_off(); + trace_hardirqs_off_finish(); instrumentation_end(); return ret; diff --git a/kernel/events/core.c b/kernel/events/core.c index da467e1dd49a..5a29ab09e72d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10085,6 +10085,7 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) { int fpos = token == IF_SRC_FILE ? 2 : 1; + kfree(filename); filename = match_strdup(&args[fpos]); if (!filename) { ret = -ENOMEM; @@ -10131,16 +10132,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, */ ret = -EOPNOTSUPP; if (!event->ctx->task) - goto fail_free_name; + goto fail; /* look up the path and grab its inode */ ret = kern_path(filename, LOOKUP_FOLLOW, &filter->path); if (ret) - goto fail_free_name; - - kfree(filename); - filename = NULL; + goto fail; ret = -EINVAL; if (!filter->path.dentry || @@ -10160,13 +10158,13 @@ perf_event_parse_addr_filter(struct perf_event *event, char *fstr, if (state != IF_STATE_ACTION) goto fail; + kfree(filename); kfree(orig); return 0; -fail_free_name: - kfree(filename); fail: + kfree(filename); free_filters_list(filters); kfree(orig); diff --git a/kernel/fork.c b/kernel/fork.c index 32083db7a2a2..6d266388d380 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2167,14 +2167,9 @@ static __latent_entropy struct task_struct *copy_process( /* ok, now we should be set up.. */ p->pid = pid_nr(pid); if (clone_flags & CLONE_THREAD) { - p->exit_signal = -1; p->group_leader = current->group_leader; p->tgid = current->tgid; } else { - if (clone_flags & CLONE_PARENT) - p->exit_signal = current->group_leader->exit_signal; - else - p->exit_signal = args->exit_signal; p->group_leader = p; p->tgid = p->pid; } @@ -2218,9 +2213,14 @@ static __latent_entropy struct task_struct *copy_process( if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { p->real_parent = current->real_parent; p->parent_exec_id = current->parent_exec_id; + if (clone_flags & CLONE_THREAD) + p->exit_signal = -1; + else + p->exit_signal = current->group_leader->exit_signal; } else { p->real_parent = current; p->parent_exec_id = current->self_exec_id; + p->exit_signal = args->exit_signal; } klp_copy_process(p); diff --git a/kernel/futex.c b/kernel/futex.c index f8614ef4ff31..ac328874f6e5 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -2380,10 +2380,22 @@ retry: } /* - * Since we just failed the trylock; there must be an owner. + * The trylock just failed, so either there is an owner or + * there is a higher priority waiter than this one. */ newowner = rt_mutex_owner(&pi_state->pi_mutex); - BUG_ON(!newowner); + /* + * If the higher priority waiter has not yet taken over the + * rtmutex then newowner is NULL. We can't return here with + * that state because it's inconsistent vs. the user space + * state. So drop the locks and try again. It's a valid + * situation and not any different from the other retry + * conditions. + */ + if (unlikely(!newowner)) { + err = -EAGAIN; + goto handle_err; + } } else { WARN_ON_ONCE(argowner != current); if (oldowner == current) { diff --git a/kernel/hung_task.c b/kernel/hung_task.c index ce76f490126c..396ebaebea3f 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -225,8 +225,7 @@ static long hung_timeout_jiffies(unsigned long last_checked, * Process updating of timeout sysctl */ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) + void *buffer, size_t *lenp, loff_t *ppos) { int ret; diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 10a5aff4eecc..164a031cfdb6 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -82,6 +82,7 @@ config IRQ_FASTEOI_HIERARCHY_HANDLERS # Generic IRQ IPI support config GENERIC_IRQ_IPI bool + select IRQ_DOMAIN_HIERARCHY # Generic MSI interrupt support config GENERIC_MSI_IRQ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 8a12a25fa40d..41fdbb7953c6 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1249,7 +1249,13 @@ __acquires(hlist_lock) *head = &kretprobe_inst_table[hash]; hlist_lock = kretprobe_table_lock_ptr(hash); - raw_spin_lock_irqsave(hlist_lock, *flags); + /* + * Nested is a workaround that will soon not be needed. + * There's other protections that make sure the same lock + * is not taken on the same CPU that lockdep is unaware of. + * Differentiate when it is taken in NMI context. + */ + raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi()); } NOKPROBE_SYMBOL(kretprobe_hash_lock); @@ -1258,7 +1264,13 @@ static void kretprobe_table_lock(unsigned long hash, __acquires(hlist_lock) { raw_spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); - raw_spin_lock_irqsave(hlist_lock, *flags); + /* + * Nested is a workaround that will soon not be needed. + * There's other protections that make sure the same lock + * is not taken on the same CPU that lockdep is unaware of. + * Differentiate when it is taken in NMI context. + */ + raw_spin_lock_irqsave_nested(hlist_lock, *flags, !!in_nmi()); } NOKPROBE_SYMBOL(kretprobe_table_lock); @@ -2028,7 +2040,12 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) /* TODO: consider to only swap the RA after the last pre_handler fired */ hash = hash_ptr(current, KPROBE_HASH_BITS); - raw_spin_lock_irqsave(&rp->lock, flags); + /* + * Nested is a workaround that will soon not be needed. + * There's other protections that make sure the same lock + * is not taken on the same CPU that lockdep is unaware of. + */ + raw_spin_lock_irqsave_nested(&rp->lock, flags, 1); if (!hlist_empty(&rp->free_instances)) { ri = hlist_entry(rp->free_instances.first, struct kretprobe_instance, hlist); @@ -2039,7 +2056,7 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) ri->task = current; if (rp->entry_handler && rp->entry_handler(ri, regs)) { - raw_spin_lock_irqsave(&rp->lock, flags); + raw_spin_lock_irqsave_nested(&rp->lock, flags, 1); hlist_add_head(&ri->hlist, &rp->free_instances); raw_spin_unlock_irqrestore(&rp->lock, flags); return 0; diff --git a/kernel/kthread.c b/kernel/kthread.c index e29773c82b70..933a625621b8 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -897,7 +897,8 @@ void kthread_delayed_work_timer_fn(struct timer_list *t) /* Move the work from worker->delayed_work_list. */ WARN_ON_ONCE(list_empty(&work->node)); list_del_init(&work->node); - kthread_insert_work(worker, work, &worker->work_list); + if (!work->canceling) + kthread_insert_work(worker, work, &worker->work_list); raw_spin_unlock_irqrestore(&worker->lock, flags); } diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index c03a5775d019..d73bccde2720 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -102,9 +102,12 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time) static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time, unsigned int next_freq) { - if (sg_policy->next_freq == next_freq && - !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS)) - return false; + if (!sg_policy->need_freq_update) { + if (sg_policy->next_freq == next_freq) + return false; + } else { + sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); + } sg_policy->next_freq = next_freq; sg_policy->last_freq_update_time = time; @@ -162,11 +165,9 @@ static unsigned int get_next_freq(struct sugov_policy *sg_policy, freq = map_util_freq(util, freq, max); - if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update && - !cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS)) + if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update) return sg_policy->next_freq; - sg_policy->need_freq_update = false; sg_policy->cached_raw_freq = freq; return cpufreq_driver_resolve_freq(policy, freq); } @@ -442,7 +443,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, struct sugov_policy *sg_policy = sg_cpu->sg_policy; unsigned long util, max; unsigned int next_f; - bool busy; unsigned int cached_freq = sg_policy->cached_raw_freq; sugov_iowait_boost(sg_cpu, time, flags); @@ -453,9 +453,6 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, if (!sugov_should_update_freq(sg_policy, time)) return; - /* Limits may have changed, don't skip frequency update */ - busy = !sg_policy->need_freq_update && sugov_cpu_is_busy(sg_cpu); - util = sugov_get_util(sg_cpu); max = sg_cpu->max; util = sugov_iowait_apply(sg_cpu, time, util, max); @@ -464,7 +461,7 @@ static void sugov_update_single(struct update_util_data *hook, u64 time, * Do not reduce the frequency if the CPU has not been idle * recently, as the reduction is likely to be premature then. */ - if (busy && next_f < sg_policy->next_freq) { + if (sugov_cpu_is_busy(sg_cpu) && next_f < sg_policy->next_freq) { next_f = sg_policy->next_freq; /* Restore cached freq as next_freq has changed */ @@ -829,9 +826,10 @@ static int sugov_start(struct cpufreq_policy *policy) sg_policy->next_freq = 0; sg_policy->work_in_progress = false; sg_policy->limits_changed = false; - sg_policy->need_freq_update = false; sg_policy->cached_raw_freq = 0; + sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS); + for_each_cpu(cpu, policy->cpus) { struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); diff --git a/kernel/signal.c b/kernel/signal.c index a38b3edc6851..ef8f2a28d37c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -391,16 +391,17 @@ static bool task_participate_group_stop(struct task_struct *task) void task_join_group_stop(struct task_struct *task) { + unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; + struct signal_struct *sig = current->signal; + + if (sig->group_stop_count) { + sig->group_stop_count++; + mask |= JOBCTL_STOP_CONSUME; + } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) + return; + /* Have the new thread join an on-going signal group stop */ - unsigned long jobctl = current->jobctl; - if (jobctl & JOBCTL_STOP_PENDING) { - struct signal_struct *sig = current->signal; - unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK; - unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; - if (task_set_jobctl_pending(task, signr | gstop)) { - sig->group_stop_count++; - } - } + task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); } /* diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 7f45fd9d5a45..dc83b3fa9fe7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -438,14 +438,16 @@ enum { }; /* * Used for which event context the event is in. - * NMI = 0 - * IRQ = 1 - * SOFTIRQ = 2 - * NORMAL = 3 + * TRANSITION = 0 + * NMI = 1 + * IRQ = 2 + * SOFTIRQ = 3 + * NORMAL = 4 * * See trace_recursive_lock() comment below for more details. */ enum { + RB_CTX_TRANSITION, RB_CTX_NMI, RB_CTX_IRQ, RB_CTX_SOFTIRQ, @@ -3014,10 +3016,10 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) * a bit of overhead in something as critical as function tracing, * we use a bitmask trick. * - * bit 0 = NMI context - * bit 1 = IRQ context - * bit 2 = SoftIRQ context - * bit 3 = normal context. + * bit 1 = NMI context + * bit 2 = IRQ context + * bit 3 = SoftIRQ context + * bit 4 = normal context. * * This works because this is the order of contexts that can * preempt other contexts. A SoftIRQ never preempts an IRQ @@ -3040,6 +3042,30 @@ rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) * The least significant bit can be cleared this way, and it * just so happens that it is the same bit corresponding to * the current context. + * + * Now the TRANSITION bit breaks the above slightly. The TRANSITION bit + * is set when a recursion is detected at the current context, and if + * the TRANSITION bit is already set, it will fail the recursion. + * This is needed because there's a lag between the changing of + * interrupt context and updating the preempt count. In this case, + * a false positive will be found. To handle this, one extra recursion + * is allowed, and this is done by the TRANSITION bit. If the TRANSITION + * bit is already set, then it is considered a recursion and the function + * ends. Otherwise, the TRANSITION bit is set, and that bit is returned. + * + * On the trace_recursive_unlock(), the TRANSITION bit will be the first + * to be cleared. Even if it wasn't the context that set it. That is, + * if an interrupt comes in while NORMAL bit is set and the ring buffer + * is called before preempt_count() is updated, since the check will + * be on the NORMAL bit, the TRANSITION bit will then be set. If an + * NMI then comes in, it will set the NMI bit, but when the NMI code + * does the trace_recursive_unlock() it will clear the TRANSTION bit + * and leave the NMI bit set. But this is fine, because the interrupt + * code that set the TRANSITION bit will then clear the NMI bit when it + * calls trace_recursive_unlock(). If another NMI comes in, it will + * set the TRANSITION bit and continue. + * + * Note: The TRANSITION bit only handles a single transition between context. */ static __always_inline int @@ -3055,8 +3081,16 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) bit = pc & NMI_MASK ? RB_CTX_NMI : pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ; - if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) - return 1; + if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { + /* + * It is possible that this was called by transitioning + * between interrupt context, and preempt_count() has not + * been updated yet. In this case, use the TRANSITION bit. + */ + bit = RB_CTX_TRANSITION; + if (val & (1 << (bit + cpu_buffer->nest))) + return 1; + } val |= (1 << (bit + cpu_buffer->nest)); cpu_buffer->current_context = val; @@ -3071,8 +3105,8 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->current_context - (1 << cpu_buffer->nest); } -/* The recursive locking above uses 4 bits */ -#define NESTED_BITS 4 +/* The recursive locking above uses 5 bits */ +#define NESTED_BITS 5 /** * ring_buffer_nest_start - Allow to trace while nested diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 528971714fc6..410cfeb16db5 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -2750,7 +2750,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb, /* * If tracing is off, but we have triggers enabled * we still need to look at the event data. Use the temp_buffer - * to store the trace event for the tigger to use. It's recusive + * to store the trace event for the trigger to use. It's recursive * safe and will not be recorded anywhere. */ if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) { @@ -2952,7 +2952,7 @@ static void __ftrace_trace_stack(struct trace_buffer *buffer, stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1; /* This should never happen. If it does, yell once and skip */ - if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING)) + if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING)) goto out; /* @@ -3132,7 +3132,7 @@ static char *get_trace_buf(void) /* Interrupts must see nesting incremented before we use the buffer */ barrier(); - return &buffer->buffer[buffer->nesting][0]; + return &buffer->buffer[buffer->nesting - 1][0]; } static void put_trace_buf(void) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index f3f5e77123ad..1dadef445cd1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -637,6 +637,12 @@ enum { * function is called to clear it. */ TRACE_GRAPH_NOTRACE_BIT, + + /* + * When transitioning between context, the preempt_count() may + * not be correct. Allow for a single recursion to cover this case. + */ + TRACE_TRANSITION_BIT, }; #define trace_recursion_set(bit) do { (current)->trace_recursion |= (1<<(bit)); } while (0) @@ -691,14 +697,27 @@ static __always_inline int trace_test_and_set_recursion(int start, int max) return 0; bit = trace_get_context_bit() + start; - if (unlikely(val & (1 << bit))) - return -1; + if (unlikely(val & (1 << bit))) { + /* + * It could be that preempt_count has not been updated during + * a switch between contexts. Allow for a single recursion. + */ + bit = TRACE_TRANSITION_BIT; + if (trace_recursion_test(bit)) + return -1; + trace_recursion_set(bit); + barrier(); + return bit + 1; + } + + /* Normal check passed, clear the transition to allow it again */ + trace_recursion_clear(TRACE_TRANSITION_BIT); val |= 1 << bit; current->trace_recursion = val; barrier(); - return bit; + return bit + 1; } static __always_inline void trace_clear_recursion(int bit) @@ -708,6 +727,7 @@ static __always_inline void trace_clear_recursion(int bit) if (!bit) return; + bit--; bit = 1 << bit; val &= ~bit; diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 84b7cab55291..881df991742a 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -584,7 +584,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, { struct synth_field *field; const char *prefix = NULL, *field_type = argv[0], *field_name, *array; - int len, ret = 0; + int len, ret = -ENOMEM; struct seq_buf s; ssize_t size; @@ -617,10 +617,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, len--; field->name = kmemdup_nul(field_name, len, GFP_KERNEL); - if (!field->name) { - ret = -ENOMEM; + if (!field->name) goto free; - } + if (!is_good_name(field->name)) { synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name)); ret = -EINVAL; @@ -638,10 +637,9 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, len += strlen(prefix); field->type = kzalloc(len, GFP_KERNEL); - if (!field->type) { - ret = -ENOMEM; + if (!field->type) goto free; - } + seq_buf_init(&s, field->type, len); if (prefix) seq_buf_puts(&s, prefix); @@ -653,6 +651,7 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, } if (WARN_ON_ONCE(!seq_buf_buffer_left(&s))) goto free; + s.buffer[s.len] = '\0'; size = synth_field_size(field->type); @@ -666,10 +665,8 @@ static struct synth_field *parse_synth_field(int argc, const char **argv, len = sizeof("__data_loc ") + strlen(field->type) + 1; type = kzalloc(len, GFP_KERNEL); - if (!type) { - ret = -ENOMEM; + if (!type) goto free; - } seq_buf_init(&s, type, len); seq_buf_puts(&s, "__data_loc "); diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index b5e3496cf803..4738ad48a667 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -492,8 +492,13 @@ trace_selftest_function_recursion(void) unregister_ftrace_function(&test_rec_probe); ret = -1; - if (trace_selftest_recursion_cnt != 1) { - pr_cont("*callback not called once (%d)* ", + /* + * Recursion allows for transitions between context, + * and may call the callback twice. + */ + if (trace_selftest_recursion_cnt != 1 && + trace_selftest_recursion_cnt != 2) { + pr_cont("*callback not called once (or twice) (%d)* ", trace_selftest_recursion_cnt); goto out; } diff --git a/lib/crc32test.c b/lib/crc32test.c index 97d6a57cefcc..61ddce2cff77 100644 --- a/lib/crc32test.c +++ b/lib/crc32test.c @@ -683,7 +683,6 @@ static int __init crc32c_test(void) /* reduce OS noise */ local_irq_save(flags); - local_irq_disable(); nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { @@ -694,7 +693,6 @@ static int __init crc32c_test(void) nsec = ktime_get_ns() - nsec; local_irq_restore(flags); - local_irq_enable(); pr_info("crc32c: CRC_LE_BITS = %d\n", CRC_LE_BITS); @@ -768,7 +766,6 @@ static int __init crc32_test(void) /* reduce OS noise */ local_irq_save(flags); - local_irq_disable(); nsec = ktime_get_ns(); for (i = 0; i < 100; i++) { @@ -783,7 +780,6 @@ static int __init crc32_test(void) nsec = ktime_get_ns() - nsec; local_irq_restore(flags); - local_irq_enable(); pr_info("crc32: CRC_LE_BITS = %d, CRC_BE BITS = %d\n", CRC_LE_BITS, CRC_BE_BITS); diff --git a/lib/fonts/font_10x18.c b/lib/fonts/font_10x18.c index 0e2deac97da0..e02f9df24d1e 100644 --- a/lib/fonts/font_10x18.c +++ b/lib/fonts/font_10x18.c @@ -8,7 +8,7 @@ #define FONTDATAMAX 9216 -static struct font_data fontdata_10x18 = { +static const struct font_data fontdata_10x18 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 0000000000 */ diff --git a/lib/fonts/font_6x10.c b/lib/fonts/font_6x10.c index 87da8acd07db..6e3c4b7691c8 100644 --- a/lib/fonts/font_6x10.c +++ b/lib/fonts/font_6x10.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 2560 -static struct font_data fontdata_6x10 = { +static const struct font_data fontdata_6x10 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_6x11.c b/lib/fonts/font_6x11.c index 5e975dfa10a5..2d22a24e816f 100644 --- a/lib/fonts/font_6x11.c +++ b/lib/fonts/font_6x11.c @@ -9,7 +9,7 @@ #define FONTDATAMAX (11*256) -static struct font_data fontdata_6x11 = { +static const struct font_data fontdata_6x11 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_6x8.c b/lib/fonts/font_6x8.c index 700039a9ceae..e7442a0d183d 100644 --- a/lib/fonts/font_6x8.c +++ b/lib/fonts/font_6x8.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 2048 -static struct font_data fontdata_6x8 = { +static const struct font_data fontdata_6x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 000000 */ diff --git a/lib/fonts/font_7x14.c b/lib/fonts/font_7x14.c index 86d298f38505..9cc7ae2e03f7 100644 --- a/lib/fonts/font_7x14.c +++ b/lib/fonts/font_7x14.c @@ -8,7 +8,7 @@ #define FONTDATAMAX 3584 -static struct font_data fontdata_7x14 = { +static const struct font_data fontdata_7x14 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ diff --git a/lib/fonts/font_8x16.c b/lib/fonts/font_8x16.c index 37cedd36ca5e..bab25dc59e8d 100644 --- a/lib/fonts/font_8x16.c +++ b/lib/fonts/font_8x16.c @@ -10,7 +10,7 @@ #define FONTDATAMAX 4096 -static struct font_data fontdata_8x16 = { +static const struct font_data fontdata_8x16 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_8x8.c b/lib/fonts/font_8x8.c index 8ab695538395..109d0572368f 100644 --- a/lib/fonts/font_8x8.c +++ b/lib/fonts/font_8x8.c @@ -9,7 +9,7 @@ #define FONTDATAMAX 2048 -static struct font_data fontdata_8x8 = { +static const struct font_data fontdata_8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_acorn_8x8.c b/lib/fonts/font_acorn_8x8.c index 069b3e80c434..fb395f0d4031 100644 --- a/lib/fonts/font_acorn_8x8.c +++ b/lib/fonts/font_acorn_8x8.c @@ -5,7 +5,7 @@ #define FONTDATAMAX 2048 -static struct font_data acorndata_8x8 = { +static const struct font_data acorndata_8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 00 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* ^@ */ /* 01 */ 0x7e, 0x81, 0xa5, 0x81, 0xbd, 0x99, 0x81, 0x7e, /* ^A */ diff --git a/lib/fonts/font_mini_4x6.c b/lib/fonts/font_mini_4x6.c index 1449876c6a27..592774a90917 100644 --- a/lib/fonts/font_mini_4x6.c +++ b/lib/fonts/font_mini_4x6.c @@ -43,7 +43,7 @@ __END__; #define FONTDATAMAX 1536 -static struct font_data fontdata_mini_4x6 = { +static const struct font_data fontdata_mini_4x6 = { { 0, 0, FONTDATAMAX, 0 }, { /*{*/ /* Char 0: ' ' */ diff --git a/lib/fonts/font_pearl_8x8.c b/lib/fonts/font_pearl_8x8.c index 32d65551e7ed..a6f95ebce950 100644 --- a/lib/fonts/font_pearl_8x8.c +++ b/lib/fonts/font_pearl_8x8.c @@ -14,7 +14,7 @@ #define FONTDATAMAX 2048 -static struct font_data fontdata_pearl8x8 = { +static const struct font_data fontdata_pearl8x8 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 00000000 */ diff --git a/lib/fonts/font_sun12x22.c b/lib/fonts/font_sun12x22.c index 641a6b4dca42..a5b65bd49604 100644 --- a/lib/fonts/font_sun12x22.c +++ b/lib/fonts/font_sun12x22.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 11264 -static struct font_data fontdata_sun12x22 = { +static const struct font_data fontdata_sun12x22 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, 0x00, /* 000000000000 */ diff --git a/lib/fonts/font_sun8x16.c b/lib/fonts/font_sun8x16.c index 193fe6d988e0..e577e76a6a7c 100644 --- a/lib/fonts/font_sun8x16.c +++ b/lib/fonts/font_sun8x16.c @@ -3,7 +3,7 @@ #define FONTDATAMAX 4096 -static struct font_data fontdata_sun8x16 = { +static const struct font_data fontdata_sun8x16 = { { 0, 0, FONTDATAMAX, 0 }, { /* */ 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, /* */ 0x00,0x00,0x7e,0x81,0xa5,0x81,0x81,0xbd,0x99,0x81,0x81,0x7e,0x00,0x00,0x00,0x00, diff --git a/lib/fonts/font_ter16x32.c b/lib/fonts/font_ter16x32.c index 91b9c283bd9c..f7c3abb6b99e 100644 --- a/lib/fonts/font_ter16x32.c +++ b/lib/fonts/font_ter16x32.c @@ -4,7 +4,7 @@ #define FONTDATAMAX 16384 -static struct font_data fontdata_ter16x32 = { +static const struct font_data fontdata_ter16x32 = { { 0, 0, FONTDATAMAX, 0 }, { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xfc, 0x7f, 0xfc, diff --git a/lib/test_kasan.c b/lib/test_kasan.c index 63c26171a791..662f862702fc 100644 --- a/lib/test_kasan.c +++ b/lib/test_kasan.c @@ -216,6 +216,12 @@ static void kmalloc_oob_16(struct kunit *test) u64 words[2]; } *ptr1, *ptr2; + /* This test is specifically crafted for the generic mode. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required\n"); + return; + } + ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); @@ -227,6 +233,23 @@ static void kmalloc_oob_16(struct kunit *test) kfree(ptr2); } +static void kmalloc_uaf_16(struct kunit *test) +{ + struct { + u64 words[2]; + } *ptr1, *ptr2; + + ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1); + + ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2); + kfree(ptr2); + + KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2); + kfree(ptr1); +} + static void kmalloc_oob_memset_2(struct kunit *test) { char *ptr; @@ -429,6 +452,12 @@ static void kasan_global_oob(struct kunit *test) volatile int i = 3; char *p = &global_array[ARRAY_SIZE(global_array) + i]; + /* Only generic mode instruments globals. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required"); + return; + } + KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p); } @@ -467,6 +496,12 @@ static void kasan_alloca_oob_left(struct kunit *test) char alloca_array[i]; char *p = alloca_array - 1; + /* Only generic mode instruments dynamic allocas. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required"); + return; + } + if (!IS_ENABLED(CONFIG_KASAN_STACK)) { kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); return; @@ -481,6 +516,12 @@ static void kasan_alloca_oob_right(struct kunit *test) char alloca_array[i]; char *p = alloca_array + i; + /* Only generic mode instruments dynamic allocas. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required"); + return; + } + if (!IS_ENABLED(CONFIG_KASAN_STACK)) { kunit_info(test, "CONFIG_KASAN_STACK is not enabled"); return; @@ -551,6 +592,9 @@ static void kasan_memchr(struct kunit *test) return; } + if (OOB_TAG_OFF) + size = round_up(size, OOB_TAG_OFF); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); @@ -573,6 +617,9 @@ static void kasan_memcmp(struct kunit *test) return; } + if (OOB_TAG_OFF) + size = round_up(size, OOB_TAG_OFF); + ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr); memset(arr, 0, sizeof(arr)); @@ -619,13 +666,50 @@ static void kasan_strings(struct kunit *test) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1)); } -static void kasan_bitops(struct kunit *test) +static void kasan_bitops_modify(struct kunit *test, int nr, void *addr) +{ + KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr)); +} + +static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr) +{ + KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr)); + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr)); + +#if defined(clear_bit_unlock_is_negative_byte) + KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = + clear_bit_unlock_is_negative_byte(nr, addr)); +#endif +} + +static void kasan_bitops_generic(struct kunit *test) { + long *bits; + + /* This test is specifically crafted for the generic mode. */ + if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_GENERIC required\n"); + return; + } + /* * Allocate 1 more byte, which causes kzalloc to round up to 16-bytes; * this way we do not actually corrupt other memory. */ - long *bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); + bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); /* @@ -633,55 +717,34 @@ static void kasan_bitops(struct kunit *test) * below accesses are still out-of-bounds, since bitops are defined to * operate on the whole long the bit is in. */ - KUNIT_EXPECT_KASAN_FAIL(test, set_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, change_bit(BITS_PER_LONG, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(BITS_PER_LONG, bits)); + kasan_bitops_modify(test, BITS_PER_LONG, bits); /* * Below calls try to access bit beyond allocated memory. */ - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, - __test_and_set_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); - - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_set_bit_lock(BITS_PER_LONG + BITS_PER_BYTE, bits)); + kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits); - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + kfree(bits); +} - KUNIT_EXPECT_KASAN_FAIL(test, - __test_and_clear_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); +static void kasan_bitops_tags(struct kunit *test) +{ + long *bits; - KUNIT_EXPECT_KASAN_FAIL(test, - test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + /* This test is specifically crafted for the tag-based mode. */ + if (IS_ENABLED(CONFIG_KASAN_GENERIC)) { + kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n"); + return; + } - KUNIT_EXPECT_KASAN_FAIL(test, - __test_and_change_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + /* Allocation size will be rounded to up granule size, which is 16. */ + bits = kzalloc(sizeof(*bits), GFP_KERNEL); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits); - KUNIT_EXPECT_KASAN_FAIL(test, - kasan_int_result = - test_bit(BITS_PER_LONG + BITS_PER_BYTE, bits)); + /* Do the accesses past the 16 allocated bytes. */ + kasan_bitops_modify(test, BITS_PER_LONG, &bits[1]); + kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, &bits[1]); -#if defined(clear_bit_unlock_is_negative_byte) - KUNIT_EXPECT_KASAN_FAIL(test, - kasan_int_result = clear_bit_unlock_is_negative_byte( - BITS_PER_LONG + BITS_PER_BYTE, bits)); -#endif kfree(bits); } @@ -728,6 +791,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_krealloc_more), KUNIT_CASE(kmalloc_oob_krealloc_less), KUNIT_CASE(kmalloc_oob_16), + KUNIT_CASE(kmalloc_uaf_16), KUNIT_CASE(kmalloc_oob_in_memset), KUNIT_CASE(kmalloc_oob_memset_2), KUNIT_CASE(kmalloc_oob_memset_4), @@ -751,7 +815,8 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kasan_memchr), KUNIT_CASE(kasan_memcmp), KUNIT_CASE(kasan_strings), - KUNIT_CASE(kasan_bitops), + KUNIT_CASE(kasan_bitops_generic), + KUNIT_CASE(kasan_bitops_tags), KUNIT_CASE(kmalloc_double_kzfree), KUNIT_CASE(vmalloc_oob), {} diff --git a/mm/hugetlb.c b/mm/hugetlb.c index fe76f8fd5a73..5a620f690911 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -648,6 +648,8 @@ retry: } del += t - f; + hugetlb_cgroup_uncharge_file_region( + resv, rg, t - f); /* New entry for end of split region */ nrg->from = t; @@ -660,9 +662,6 @@ retry: /* Original entry is trimmed */ rg->to = f; - hugetlb_cgroup_uncharge_file_region( - resv, rg, nrg->to - nrg->from); - list_add(&nrg->link, &rg->link); nrg = NULL; break; @@ -678,17 +677,17 @@ retry: } if (f <= rg->from) { /* Trim beginning of region */ - del += t - rg->from; - rg->from = t; - hugetlb_cgroup_uncharge_file_region(resv, rg, t - rg->from); - } else { /* Trim end of region */ - del += rg->to - f; - rg->to = f; + del += t - rg->from; + rg->from = t; + } else { /* Trim end of region */ hugetlb_cgroup_uncharge_file_region(resv, rg, rg->to - f); + + del += rg->to - f; + rg->to = f; } } @@ -2443,6 +2442,9 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, rsv_adjust = hugepage_subpool_put_pages(spool, 1); hugetlb_acct_memory(h, -rsv_adjust); + if (deferred_reserve) + hugetlb_cgroup_uncharge_page_rsvd(hstate_index(h), + pages_per_huge_page(h), page); } return page; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a24e3b619f5..3dcbf24d2227 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4110,11 +4110,17 @@ static int memcg_stat_show(struct seq_file *m, void *v) (u64)memsw * PAGE_SIZE); for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { + unsigned long nr; + if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) continue; + nr = memcg_page_state(memcg, memcg1_stats[i]); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (memcg1_stats[i] == NR_ANON_THPS) + nr *= HPAGE_PMD_NR; +#endif seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], - (u64)memcg_page_state(memcg, memcg1_stats[i]) * - PAGE_SIZE); + (u64)nr * PAGE_SIZE); } for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) @@ -5339,17 +5345,22 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) memcg->swappiness = mem_cgroup_swappiness(parent); memcg->oom_kill_disable = parent->oom_kill_disable; } - if (parent && parent->use_hierarchy) { + if (!parent) { + page_counter_init(&memcg->memory, NULL); + page_counter_init(&memcg->swap, NULL); + page_counter_init(&memcg->kmem, NULL); + page_counter_init(&memcg->tcpmem, NULL); + } else if (parent->use_hierarchy) { memcg->use_hierarchy = true; page_counter_init(&memcg->memory, &parent->memory); page_counter_init(&memcg->swap, &parent->swap); page_counter_init(&memcg->kmem, &parent->kmem); page_counter_init(&memcg->tcpmem, &parent->tcpmem); } else { - page_counter_init(&memcg->memory, NULL); - page_counter_init(&memcg->swap, NULL); - page_counter_init(&memcg->kmem, NULL); - page_counter_init(&memcg->tcpmem, NULL); + page_counter_init(&memcg->memory, &root_mem_cgroup->memory); + page_counter_init(&memcg->swap, &root_mem_cgroup->swap); + page_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); + page_counter_init(&memcg->tcpmem, &root_mem_cgroup->tcpmem); /* * Deeper hierachy with use_hierarchy == false doesn't make * much sense so let cgroup subsystem know about this diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 3fde772ef5ef..3ca4898f3f24 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -525,7 +525,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, unsigned long flags = qp->flags; int ret; bool has_unmovable = false; - pte_t *pte; + pte_t *pte, *mapped_pte; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); @@ -539,7 +539,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); for (; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; @@ -571,7 +571,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, } else break; } - pte_unmap_unlock(pte - 1, ptl); + pte_unmap_unlock(mapped_pte, ptl); cond_resched(); if (has_unmovable) diff --git a/mm/memremap.c b/mm/memremap.c index 73a206d0f645..16b2fb482da1 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -41,28 +41,24 @@ EXPORT_SYMBOL_GPL(memremap_compat_align); DEFINE_STATIC_KEY_FALSE(devmap_managed_key); EXPORT_SYMBOL(devmap_managed_key); -static void devmap_managed_enable_put(void) +static void devmap_managed_enable_put(struct dev_pagemap *pgmap) { - static_branch_dec(&devmap_managed_key); + if (pgmap->type == MEMORY_DEVICE_PRIVATE || + pgmap->type == MEMORY_DEVICE_FS_DAX) + static_branch_dec(&devmap_managed_key); } -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) +static void devmap_managed_enable_get(struct dev_pagemap *pgmap) { - if (pgmap->type == MEMORY_DEVICE_PRIVATE && - (!pgmap->ops || !pgmap->ops->page_free)) { - WARN(1, "Missing page_free method\n"); - return -EINVAL; - } - - static_branch_inc(&devmap_managed_key); - return 0; + if (pgmap->type == MEMORY_DEVICE_PRIVATE || + pgmap->type == MEMORY_DEVICE_FS_DAX) + static_branch_inc(&devmap_managed_key); } #else -static int devmap_managed_enable_get(struct dev_pagemap *pgmap) +static void devmap_managed_enable_get(struct dev_pagemap *pgmap) { - return -EINVAL; } -static void devmap_managed_enable_put(void) +static void devmap_managed_enable_put(struct dev_pagemap *pgmap) { } #endif /* CONFIG_DEV_PAGEMAP_OPS */ @@ -169,7 +165,7 @@ void memunmap_pages(struct dev_pagemap *pgmap) pageunmap_range(pgmap, i); WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n"); - devmap_managed_enable_put(); + devmap_managed_enable_put(pgmap); } EXPORT_SYMBOL_GPL(memunmap_pages); @@ -307,7 +303,6 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) .pgprot = PAGE_KERNEL, }; const int nr_range = pgmap->nr_range; - bool need_devmap_managed = true; int error, i; if (WARN_ONCE(!nr_range, "nr_range must be specified\n")) @@ -323,6 +318,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) WARN(1, "Missing migrate_to_ram method\n"); return ERR_PTR(-EINVAL); } + if (!pgmap->ops->page_free) { + WARN(1, "Missing page_free method\n"); + return ERR_PTR(-EINVAL); + } if (!pgmap->owner) { WARN(1, "Missing owner\n"); return ERR_PTR(-EINVAL); @@ -336,11 +335,9 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) } break; case MEMORY_DEVICE_GENERIC: - need_devmap_managed = false; break; case MEMORY_DEVICE_PCI_P2PDMA: params.pgprot = pgprot_noncached(params.pgprot); - need_devmap_managed = false; break; default: WARN(1, "Invalid pgmap type %d\n", pgmap->type); @@ -364,11 +361,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid) } } - if (need_devmap_managed) { - error = devmap_managed_enable_get(pgmap); - if (error) - return ERR_PTR(error); - } + devmap_managed_enable_get(pgmap); /* * Clear the pgmap nr_range as it will be incremented for each diff --git a/mm/truncate.c b/mm/truncate.c index 18cec39a9f53..960edf5803ca 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -528,7 +528,7 @@ void truncate_inode_pages_final(struct address_space *mapping) } EXPORT_SYMBOL(truncate_inode_pages_final); -unsigned long __invalidate_mapping_pages(struct address_space *mapping, +static unsigned long __invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) { pgoff_t indices[PAGEVEC_SIZE]; diff --git a/net/atm/lec.c b/net/atm/lec.c index dbabb65d8b67..7226c784dbe0 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c @@ -954,9 +954,8 @@ static void *lec_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct lec_state *state = seq->private; - v = lec_get_idx(state, 1); - *pos += !!PTR_ERR(v); - return v; + ++*pos; + return lec_get_idx(state, 1); } static int lec_seq_show(struct seq_file *seq, void *v) diff --git a/net/can/Kconfig b/net/can/Kconfig index 224e5e0283a9..7c9958df91d3 100644 --- a/net/can/Kconfig +++ b/net/can/Kconfig @@ -62,8 +62,9 @@ config CAN_ISOTP communication between CAN nodes via two defined CAN Identifiers. As CAN frames can only transport a small amount of data bytes (max. 8 bytes for 'classic' CAN and max. 64 bytes for CAN FD) this - segmentation is needed to transport longer PDUs as needed e.g. for - vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN traffic. + segmentation is needed to transport longer Protocol Data Units (PDU) + as needed e.g. for vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN + traffic. This protocol driver implements data transfers according to ISO 15765-2:2016 for 'classic' CAN and CAN FD frame types. If you want to perform automotive vehicle diagnostic services (UDS), diff --git a/net/can/isotp.c b/net/can/isotp.c index 4c2062875893..d78ab13bd8be 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -252,14 +252,16 @@ static void isotp_rcv_skb(struct sk_buff *skb, struct sock *sk) static u8 padlen(u8 datalen) { - const u8 plen[] = {8, 8, 8, 8, 8, 8, 8, 8, 8, /* 0 - 8 */ - 12, 12, 12, 12, /* 9 - 12 */ - 16, 16, 16, 16, /* 13 - 16 */ - 20, 20, 20, 20, /* 17 - 20 */ - 24, 24, 24, 24, /* 21 - 24 */ - 32, 32, 32, 32, 32, 32, 32, 32, /* 25 - 32 */ - 48, 48, 48, 48, 48, 48, 48, 48, /* 33 - 40 */ - 48, 48, 48, 48, 48, 48, 48, 48}; /* 41 - 48 */ + static const u8 plen[] = { + 8, 8, 8, 8, 8, 8, 8, 8, 8, /* 0 - 8 */ + 12, 12, 12, 12, /* 9 - 12 */ + 16, 16, 16, 16, /* 13 - 16 */ + 20, 20, 20, 20, /* 17 - 20 */ + 24, 24, 24, 24, /* 21 - 24 */ + 32, 32, 32, 32, 32, 32, 32, 32, /* 25 - 32 */ + 48, 48, 48, 48, 48, 48, 48, 48, /* 33 - 40 */ + 48, 48, 48, 48, 48, 48, 48, 48 /* 41 - 48 */ + }; if (datalen > 48) return 64; @@ -569,10 +571,6 @@ static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae, return 0; } - /* no creation of flow control frames */ - if (so->opt.flags & CAN_ISOTP_LISTEN_MODE) - return 0; - /* perform blocksize handling, if enabled */ if (!so->rxfc.bs || ++so->rx.bs < so->rxfc.bs) { /* start rx timeout watchdog */ @@ -581,6 +579,10 @@ static int isotp_rcv_cf(struct sock *sk, struct canfd_frame *cf, int ae, return 0; } + /* no creation of flow control frames */ + if (so->opt.flags & CAN_ISOTP_LISTEN_MODE) + return 0; + /* we reached the specified blocksize so->rxfc.bs */ isotp_send_fc(sk, ae, ISOTP_FC_CTS); return 0; diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c index 1be4c898b2fa..f23966526a88 100644 --- a/net/can/j1939/socket.c +++ b/net/can/j1939/socket.c @@ -475,6 +475,12 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out_release_sock; } + if (!(ndev->flags & IFF_UP)) { + dev_put(ndev); + ret = -ENETDOWN; + goto out_release_sock; + } + priv = j1939_netdev_start(ndev); dev_put(ndev); if (IS_ERR(priv)) { diff --git a/net/can/proc.c b/net/can/proc.c index 550928b8b8a2..5ea8695f507e 100644 --- a/net/can/proc.c +++ b/net/can/proc.c @@ -462,6 +462,9 @@ void can_init_proc(struct net *net) */ void can_remove_proc(struct net *net) { + if (!net->can.proc_dir) + return; + if (net->can.pde_stats) remove_proc_entry(CAN_PROC_STATS, net->can.proc_dir); @@ -486,6 +489,5 @@ void can_remove_proc(struct net *net) if (net->can.pde_rcvlist_sff) remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir); - if (net->can.proc_dir) - remove_proc_entry("can", net->proc_net); + remove_proc_entry("can", net->proc_net); } diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 8b04d1dcfec4..ee65c9225178 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -608,9 +608,6 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, ttl = ip4_dst_hoplimit(&rt->dst); } - if (!df && skb->protocol == htons(ETH_P_IP)) - df = inner_iph->frag_off & htons(IP_DF); - headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; if (headroom > dev->needed_headroom) dev->needed_headroom = headroom; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index a058213b77a7..7c841037c533 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -17,17 +17,19 @@ #include <net/netfilter/nf_queue.h> /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ -int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_type) +int ip_route_me_harder(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int addr_type) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; - const struct sock *sk = skb_to_full_sk(skb); - __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0; + __u8 flags; struct net_device *dev = skb_dst(skb)->dev; unsigned int hh_len; + sk = sk_to_full_sk(sk); + flags = sk ? inet_sk_flowi_flags(sk) : 0; + if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type_dev_table(net, dev, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c index f703a717ab1d..833079589273 100644 --- a/net/ipv4/netfilter/iptable_mangle.c +++ b/net/ipv4/netfilter/iptable_mangle.c @@ -62,7 +62,7 @@ ipt_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) iph->daddr != daddr || skb->mark != mark || iph->tos != tos) { - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 9dcfa4e461b6..93b07739807b 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -145,7 +145,7 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) ip4_dst_hoplimit(skb_dst(nskb))); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); - if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) goto free_nskb; niph = ip_hdr(nskb); diff --git a/net/ipv4/xfrm4_tunnel.c b/net/ipv4/xfrm4_tunnel.c index dc19aff7c2e0..fb0648e7fb32 100644 --- a/net/ipv4/xfrm4_tunnel.c +++ b/net/ipv4/xfrm4_tunnel.c @@ -64,14 +64,14 @@ static int xfrm_tunnel_err(struct sk_buff *skb, u32 info) static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, - .priority = 3, + .priority = 4, }; #if IS_ENABLED(CONFIG_IPV6) static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, - .priority = 2, + .priority = 3, }; #endif diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index ec448b71bf9a..8956144ea65e 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -158,7 +158,13 @@ static bool is_ineligible(const struct sk_buff *skb) tp = skb_header_pointer(skb, ptr+offsetof(struct icmp6hdr, icmp6_type), sizeof(_type), &_type); - if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) + + /* Based on RFC 8200, Section 4.5 Fragment Header, return + * false if this is a fragment packet with no icmp header info. + */ + if (!tp && frag_off != 0) + return false; + else if (!tp || !(*tp & ICMPV6_INFOMSG_MASK)) return true; } return false; diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a0217e5bf3bc..648db3fe508f 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1271,6 +1271,8 @@ route_lookup: if (max_headroom > dev->needed_headroom) dev->needed_headroom = max_headroom; + skb_set_inner_ipproto(skb, proto); + err = ip6_tnl_encap(skb, t, &proto, fl6); if (err) return err; @@ -1280,8 +1282,6 @@ route_lookup: ipv6_push_frag_opts(skb, &opt.ops, &proto); } - skb_set_inner_ipproto(skb, proto); - skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 6d0e942d082d..ab9a279dd6d4 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -20,10 +20,10 @@ #include <net/netfilter/ipv6/nf_defrag_ipv6.h> #include "../bridge/br_private.h" -int ip6_route_me_harder(struct net *net, struct sk_buff *skb) +int ip6_route_me_harder(struct net *net, struct sock *sk_partial, struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); - struct sock *sk = sk_to_full_sk(skb->sk); + struct sock *sk = sk_to_full_sk(sk_partial); unsigned int hh_len; struct dst_entry *dst; int strict = (ipv6_addr_type(&iph->daddr) & @@ -84,7 +84,7 @@ static int nf_ip6_reroute(struct sk_buff *skb, if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) - return ip6_route_me_harder(entry->state.net, skb); + return ip6_route_me_harder(entry->state.net, entry->state.sk, skb); } return 0; } diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c index 1a2748611e00..cee74803d7a1 100644 --- a/net/ipv6/netfilter/ip6table_mangle.c +++ b/net/ipv6/netfilter/ip6table_mangle.c @@ -57,7 +57,7 @@ ip6t_mangle_out(struct sk_buff *skb, const struct nf_hook_state *state) skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u_int32_t *)ipv6_hdr(skb)))) { - err = ip6_route_me_harder(state->net, skb); + err = ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 1f5d4d196dcc..c8cf1bbad74a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -42,6 +42,8 @@ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/tcp.h> +#include <linux/udp.h> #include <net/sock.h> #include <net/snmp.h> @@ -322,7 +324,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) struct frag_queue *fq; const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); - int iif; + __be16 frag_off; + int iif, offset; + u8 nexthdr; if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) goto fail_hdr; @@ -351,6 +355,33 @@ static int ipv6_frag_rcv(struct sk_buff *skb) return 1; } + /* RFC 8200, Section 4.5 Fragment Header: + * If the first fragment does not include all headers through an + * Upper-Layer header, then that fragment should be discarded and + * an ICMP Parameter Problem, Code 3, message should be sent to + * the source of the fragment, with the Pointer field set to zero. + */ + nexthdr = hdr->nexthdr; + offset = ipv6_skip_exthdr(skb, skb_transport_offset(skb), &nexthdr, &frag_off); + if (offset >= 0) { + /* Check some common protocols' header */ + if (nexthdr == IPPROTO_TCP) + offset += sizeof(struct tcphdr); + else if (nexthdr == IPPROTO_UDP) + offset += sizeof(struct udphdr); + else if (nexthdr == IPPROTO_ICMPV6) + offset += sizeof(struct icmp6hdr); + else + offset += 1; + + if (!(frag_off & htons(IP6_OFFSET)) && offset > skb->len) { + __IP6_INC_STATS(net, __in6_dev_get_safely(skb->dev), + IPSTATS_MIB_INHDRERRORS); + icmpv6_param_prob(skb, ICMPV6_HDR_INCOMP, 0); + return -1; + } + } + iif = skb->dev ? skb->dev->ifindex : 0; fq = fq_find(net, fhdr->identification, hdr, iif); if (fq) { diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index 25b7ebda2fab..f696d46e6910 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c @@ -303,13 +303,13 @@ static const struct xfrm_type xfrm6_tunnel_type = { static struct xfrm6_tunnel xfrm6_tunnel_handler __read_mostly = { .handler = xfrm6_tunnel_rcv, .err_handler = xfrm6_tunnel_err, - .priority = 2, + .priority = 3, }; static struct xfrm6_tunnel xfrm46_tunnel_handler __read_mostly = { .handler = xfrm6_tunnel_rcv, .err_handler = xfrm6_tunnel_err, - .priority = 2, + .priority = 3, }; static int __net_init xfrm6_tunnel_net_init(struct net *net) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f400240a556f..6adfcb9c06dc 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -5464,6 +5464,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, struct cfg80211_assoc_request *req) { bool is_6ghz = req->bss->channel->band == NL80211_BAND_6GHZ; + bool is_5ghz = req->bss->channel->band == NL80211_BAND_5GHZ; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; struct ieee80211_bss *bss = (void *)req->bss->priv; @@ -5616,7 +5617,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, if (vht_ie && vht_ie[1] >= sizeof(struct ieee80211_vht_cap)) memcpy(&assoc_data->ap_vht_cap, vht_ie + 2, sizeof(struct ieee80211_vht_cap)); - else if (!is_6ghz) + else if (is_5ghz) ifmgd->flags |= IEEE80211_STA_DISABLE_VHT | IEEE80211_STA_DISABLE_HE; rcu_read_unlock(); diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index fb4f2b9b294f..4fe284ff1ea3 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -258,6 +258,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata, */ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta) { + /* + * If we had used sta_info_pre_move_state() then we might not + * have gone through the state transitions down again, so do + * it here now (and warn if it's inserted). + * + * This will clear state such as fast TX/RX that may have been + * allocated during state transitions. + */ + while (sta->sta_state > IEEE80211_STA_NONE) { + int ret; + + WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED)); + + ret = sta_info_move_state(sta, sta->sta_state - 1); + if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret)) + break; + } + if (sta->rate_ctrl) rate_control_free_sta(sta); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 00ae81e9e1a1..7afd07636b81 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -785,7 +785,7 @@ int sta_info_init(struct ieee80211_local *local); void sta_info_stop(struct ieee80211_local *local); /** - * sta_info_flush - flush matching STA entries from the STA table + * __sta_info_flush - flush matching STA entries from the STA table * * Returns the number of removed STA entries. * @@ -794,6 +794,13 @@ void sta_info_stop(struct ieee80211_local *local); */ int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans); +/** + * sta_info_flush - flush matching STA entries from the STA table + * + * Returns the number of removed STA entries. + * + * @sdata: sdata to remove all stations from + */ static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata) { return __sta_info_flush(sdata, false); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8ba10a48ded4..56a4d0d20a26 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1942,19 +1942,24 @@ static bool ieee80211_tx(struct ieee80211_sub_if_data *sdata, /* device xmit handlers */ +enum ieee80211_encrypt { + ENCRYPT_NO, + ENCRYPT_MGMT, + ENCRYPT_DATA, +}; + static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb, - int head_need, bool may_encrypt) + int head_need, + enum ieee80211_encrypt encrypt) { struct ieee80211_local *local = sdata->local; - struct ieee80211_hdr *hdr; bool enc_tailroom; int tail_need = 0; - hdr = (struct ieee80211_hdr *) skb->data; - enc_tailroom = may_encrypt && - (sdata->crypto_tx_tailroom_needed_cnt || - ieee80211_is_mgmt(hdr->frame_control)); + enc_tailroom = encrypt == ENCRYPT_MGMT || + (encrypt == ENCRYPT_DATA && + sdata->crypto_tx_tailroom_needed_cnt); if (enc_tailroom) { tail_need = IEEE80211_ENCRYPT_TAILROOM; @@ -1985,23 +1990,29 @@ void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, { struct ieee80211_local *local = sdata->local; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_hdr *hdr; + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; int headroom; - bool may_encrypt; + enum ieee80211_encrypt encrypt; - may_encrypt = !(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT); + if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT) + encrypt = ENCRYPT_NO; + else if (ieee80211_is_mgmt(hdr->frame_control)) + encrypt = ENCRYPT_MGMT; + else + encrypt = ENCRYPT_DATA; headroom = local->tx_headroom; - if (may_encrypt) + if (encrypt != ENCRYPT_NO) headroom += sdata->encrypt_headroom; headroom -= skb_headroom(skb); headroom = max_t(int, 0, headroom); - if (ieee80211_skb_resize(sdata, skb, headroom, may_encrypt)) { + if (ieee80211_skb_resize(sdata, skb, headroom, encrypt)) { ieee80211_free_txskb(&local->hw, skb); return; } + /* reload after potential resize */ hdr = (struct ieee80211_hdr *) skb->data; info->control.vif = &sdata->vif; @@ -2828,7 +2839,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, head_need += sdata->encrypt_headroom; head_need += local->tx_headroom; head_need = max_t(int, 0, head_need); - if (ieee80211_skb_resize(sdata, skb, head_need, true)) { + if (ieee80211_skb_resize(sdata, skb, head_need, ENCRYPT_DATA)) { ieee80211_free_txskb(&local->hw, skb); skb = NULL; return ERR_PTR(-ENOMEM); @@ -3502,7 +3513,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, if (unlikely(ieee80211_skb_resize(sdata, skb, max_t(int, extra_head + hw_headroom - skb_headroom(skb), 0), - false))) { + ENCRYPT_NO))) { kfree_skb(skb); return true; } @@ -3619,13 +3630,14 @@ begin: tx.skb = skb; tx.sdata = vif_to_sdata(info->control.vif); - if (txq->sta && !(info->flags & IEEE80211_TX_CTL_INJECTED)) { + if (txq->sta) { tx.sta = container_of(txq->sta, struct sta_info, sta); /* * Drop unicast frames to unauthorised stations unless they are - * EAPOL frames from the local station. + * injected frames or EAPOL frames from the local station. */ - if (unlikely(ieee80211_is_data(hdr->frame_control) && + if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) && + ieee80211_is_data(hdr->frame_control) && !ieee80211_vif_is_mesh(&tx.sdata->vif) && tx.sdata->vif.type != NL80211_IFTYPE_OCB && !is_multicast_ether_addr(hdr->addr1) && diff --git a/net/mptcp/token.c b/net/mptcp/token.c index 8b47c4bb1c6b..feb4b9ffd462 100644 --- a/net/mptcp/token.c +++ b/net/mptcp/token.c @@ -291,7 +291,7 @@ struct mptcp_sock *mptcp_token_iter_next(const struct net *net, long *s_slot, { struct mptcp_sock *ret = NULL; struct hlist_nulls_node *pos; - int slot, num; + int slot, num = 0; for (slot = *s_slot; slot <= token_mask; *s_num = 0, slot++) { struct token_bucket *bucket = &token_hash[slot]; diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 6f35832f0de3..7cff6e5e7445 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c @@ -637,13 +637,14 @@ ip_set_match_extensions(struct ip_set *set, const struct ip_set_ext *ext, if (SET_WITH_COUNTER(set)) { struct ip_set_counter *counter = ext_counter(data, set); + ip_set_update_counter(counter, ext, flags); + if (flags & IPSET_FLAG_MATCH_COUNTERS && !(ip_set_match_counter(ip_set_get_packets(counter), mext->packets, mext->packets_op) && ip_set_match_counter(ip_set_get_bytes(counter), mext->bytes, mext->bytes_op))) return false; - ip_set_update_counter(counter, ext, flags); } if (SET_WITH_SKBINFO(set)) ip_set_get_skbinfo(ext_skbinfo(data, set), diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index cc3c275934f4..c0b8215ab3d4 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -742,12 +742,12 @@ static int ip_vs_route_me_harder(struct netns_ipvs *ipvs, int af, struct dst_entry *dst = skb_dst(skb); if (dst->dev && !(dst->dev->flags & IFF_LOOPBACK) && - ip6_route_me_harder(ipvs->net, skb) != 0) + ip6_route_me_harder(ipvs->net, skb->sk, skb) != 0) return 1; } else #endif if (!(skb_rtable(skb)->rt_flags & RTCF_LOCAL) && - ip_route_me_harder(ipvs->net, skb, RTN_LOCAL) != 0) + ip_route_me_harder(ipvs->net, skb->sk, skb, RTN_LOCAL) != 0) return 1; return 0; diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c index 59151dc07fdc..e87b6bd6b3cd 100644 --- a/net/netfilter/nf_nat_proto.c +++ b/net/netfilter/nf_nat_proto.c @@ -715,7 +715,7 @@ nf_nat_ipv4_local_fn(void *priv, struct sk_buff *skb, if (ct->tuplehash[dir].tuple.dst.u3.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } @@ -953,7 +953,7 @@ nf_nat_ipv6_local_fn(void *priv, struct sk_buff *skb, if (!nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, &ct->tuplehash[!dir].tuple.src.u3)) { - err = nf_ip6_route_me_harder(state->net, skb); + err = nf_ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index 9cca35d22927..d7d34a62d3bf 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -446,7 +446,7 @@ synproxy_send_tcp(struct net *net, skb_dst_set_noref(nskb, skb_dst(skb)); nskb->protocol = htons(ETH_P_IP); - if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) + if (ip_route_me_harder(net, nskb->sk, nskb, RTN_UNSPEC)) goto free_nskb; if (nfct) { diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 65cb8e3c13d9..0f58e98542be 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -7137,7 +7137,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, GFP_KERNEL); kfree(buf); - if (ctx->report && + if (!ctx->report && !nfnetlink_has_listeners(ctx->net, NFNLGRP_NFTABLES)) return; @@ -7259,7 +7259,7 @@ static void nf_tables_gen_notify(struct net *net, struct sk_buff *skb, audit_log_nfcfg("?:0;?:0", 0, net->nft.base_seq, AUDIT_NFT_OP_GEN_REGISTER, GFP_KERNEL); - if (nlmsg_report(nlh) && + if (!nlmsg_report(nlh) && !nfnetlink_has_listeners(net, NFNLGRP_NFTABLES)) return; @@ -8053,12 +8053,16 @@ static void nf_tables_abort_release(struct nft_trans *trans) kfree(trans); } -static int __nf_tables_abort(struct net *net, bool autoload) +static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) { struct nft_trans *trans, *next; struct nft_trans_elem *te; struct nft_hook *hook; + if (action == NFNL_ABORT_VALIDATE && + nf_tables_validate(net) < 0) + return -EAGAIN; + list_for_each_entry_safe_reverse(trans, next, &net->nft.commit_list, list) { switch (trans->msg_type) { @@ -8190,7 +8194,7 @@ static int __nf_tables_abort(struct net *net, bool autoload) nf_tables_abort_release(trans); } - if (autoload) + if (action == NFNL_ABORT_AUTOLOAD) nf_tables_module_autoload(net); else nf_tables_module_autoload_cleanup(net); @@ -8203,9 +8207,10 @@ static void nf_tables_cleanup(struct net *net) nft_validate_state_update(net, NFT_VALIDATE_SKIP); } -static int nf_tables_abort(struct net *net, struct sk_buff *skb, bool autoload) +static int nf_tables_abort(struct net *net, struct sk_buff *skb, + enum nfnl_abort_action action) { - int ret = __nf_tables_abort(net, autoload); + int ret = __nf_tables_abort(net, action); mutex_unlock(&net->nft.commit_mutex); @@ -8836,7 +8841,7 @@ static void __net_exit nf_tables_exit_net(struct net *net) { mutex_lock(&net->nft.commit_mutex); if (!list_empty(&net->nft.commit_list)) - __nf_tables_abort(net, false); + __nf_tables_abort(net, NFNL_ABORT_NONE); __nft_release_tables(net); mutex_unlock(&net->nft.commit_mutex); WARN_ON_ONCE(!list_empty(&net->nft.tables)); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 2daa1f6ae344..d3df66a39b5e 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -333,7 +333,7 @@ static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh, return netlink_ack(skb, nlh, -EINVAL, NULL); replay: status = 0; - +replay_abort: skb = netlink_skb_clone(oskb, GFP_KERNEL); if (!skb) return netlink_ack(oskb, nlh, -ENOMEM, NULL); @@ -499,7 +499,7 @@ ack: } done: if (status & NFNL_BATCH_REPLAY) { - ss->abort(net, oskb, true); + ss->abort(net, oskb, NFNL_ABORT_AUTOLOAD); nfnl_err_reset(&err_list); kfree_skb(skb); module_put(ss->owner); @@ -510,11 +510,25 @@ done: status |= NFNL_BATCH_REPLAY; goto done; } else if (err) { - ss->abort(net, oskb, false); + ss->abort(net, oskb, NFNL_ABORT_NONE); netlink_ack(oskb, nlmsg_hdr(oskb), err, NULL); } } else { - ss->abort(net, oskb, false); + enum nfnl_abort_action abort_action; + + if (status & NFNL_BATCH_FAILURE) + abort_action = NFNL_ABORT_NONE; + else + abort_action = NFNL_ABORT_VALIDATE; + + err = ss->abort(net, oskb, abort_action); + if (err == -EAGAIN) { + nfnl_err_reset(&err_list); + kfree_skb(skb); + module_put(ss->owner); + status |= NFNL_BATCH_FAILURE; + goto replay_abort; + } } if (ss->cleanup) ss->cleanup(net); diff --git a/net/netfilter/nft_chain_route.c b/net/netfilter/nft_chain_route.c index 8826bbe71136..edd02cda57fc 100644 --- a/net/netfilter/nft_chain_route.c +++ b/net/netfilter/nft_chain_route.c @@ -42,7 +42,7 @@ static unsigned int nf_route_table_hook4(void *priv, iph->daddr != daddr || skb->mark != mark || iph->tos != tos) { - err = ip_route_me_harder(state->net, skb, RTN_UNSPEC); + err = ip_route_me_harder(state->net, state->sk, skb, RTN_UNSPEC); if (err < 0) ret = NF_DROP_ERR(err); } @@ -92,7 +92,7 @@ static unsigned int nf_route_table_hook6(void *priv, skb->mark != mark || ipv6_hdr(skb)->hop_limit != hop_limit || flowlabel != *((u32 *)ipv6_hdr(skb)))) { - err = nf_ip6_route_me_harder(state->net, skb); + err = nf_ip6_route_me_harder(state->net, state->sk, skb); if (err < 0) ret = NF_DROP_ERR(err); } diff --git a/net/netfilter/utils.c b/net/netfilter/utils.c index cedf47ab3c6f..2182d361e273 100644 --- a/net/netfilter/utils.c +++ b/net/netfilter/utils.c @@ -191,8 +191,8 @@ static int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry skb->mark == rt_info->mark && iph->daddr == rt_info->daddr && iph->saddr == rt_info->saddr)) - return ip_route_me_harder(entry->state.net, skb, - RTN_UNSPEC); + return ip_route_me_harder(entry->state.net, entry->state.sk, + skb, RTN_UNSPEC); } #endif return 0; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 832f898edb6a..9d6ef6cb9b26 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -1703,13 +1703,13 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) parms.port_no = OVSP_LOCAL; parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID]; - err = ovs_dp_change(dp, a); - if (err) - goto err_destroy_meters; - /* So far only local changes have been made, now need the lock. */ ovs_lock(); + err = ovs_dp_change(dp, a); + if (err) + goto err_unlock_and_destroy_meters; + vport = new_vport(&parms); if (IS_ERR(vport)) { err = PTR_ERR(vport); @@ -1725,8 +1725,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_dp_reset_user_features(skb, info); } - ovs_unlock(); - goto err_destroy_meters; + goto err_unlock_and_destroy_meters; } err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, @@ -1741,7 +1740,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info) ovs_notify(&dp_datapath_genl_family, reply, info); return 0; -err_destroy_meters: +err_unlock_and_destroy_meters: + ovs_unlock(); ovs_meters_exit(dp); err_destroy_ports: kfree(dp->ports); diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index f3486a37361a..c89c8da99f1a 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c @@ -390,7 +390,7 @@ static struct mask_cache *tbl_mask_cache_alloc(u32 size) } int ovs_flow_tbl_masks_cache_resize(struct flow_table *table, u32 size) { - struct mask_cache *mc = rcu_dereference(table->mask_cache); + struct mask_cache *mc = rcu_dereference_ovsl(table->mask_cache); struct mask_cache *new; if (size == mc->cache_size) diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index aa821e71f05e..813d30767204 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1601,12 +1601,12 @@ static int sctp_cmd_interpreter(enum sctp_event_type event_type, break; case SCTP_CMD_INIT_FAILED: - sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); + sctp_cmd_init_failed(commands, asoc, cmd->obj.u16); break; case SCTP_CMD_ASSOC_FAILED: sctp_cmd_assoc_failed(commands, asoc, event_type, - subtype, chunk, cmd->obj.u32); + subtype, chunk, cmd->obj.u16); break; case SCTP_CMD_INIT_COUNTER_INC: diff --git a/net/wireless/core.c b/net/wireless/core.c index 9f23923e8d29..240282c083aa 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1250,8 +1250,7 @@ void cfg80211_stop_iface(struct wiphy *wiphy, struct wireless_dev *wdev, } EXPORT_SYMBOL(cfg80211_stop_iface); -void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev) +void cfg80211_init_wdev(struct wireless_dev *wdev) { mutex_init(&wdev->mtx); INIT_LIST_HEAD(&wdev->event_list); @@ -1262,6 +1261,30 @@ void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, spin_lock_init(&wdev->pmsr_lock); INIT_WORK(&wdev->pmsr_free_wk, cfg80211_pmsr_free_wk); +#ifdef CONFIG_CFG80211_WEXT + wdev->wext.default_key = -1; + wdev->wext.default_mgmt_key = -1; + wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; +#endif + + if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) + wdev->ps = true; + else + wdev->ps = false; + /* allow mac80211 to determine the timeout */ + wdev->ps_timeout = -1; + + if ((wdev->iftype == NL80211_IFTYPE_STATION || + wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || + wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) + wdev->netdev->priv_flags |= IFF_DONT_BRIDGE; + + INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); +} + +void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev) +{ /* * We get here also when the interface changes network namespaces, * as it's registered into the new one, but we don't want it to @@ -1295,6 +1318,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, switch (state) { case NETDEV_POST_INIT: SET_NETDEV_DEVTYPE(dev, &wiphy_type); + wdev->netdev = dev; + /* can only change netns with wiphy */ + dev->features |= NETIF_F_NETNS_LOCAL; + + cfg80211_init_wdev(wdev); break; case NETDEV_REGISTER: /* @@ -1302,35 +1330,12 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, * called within code protected by it when interfaces * are added with nl80211. */ - /* can only change netns with wiphy */ - dev->features |= NETIF_F_NETNS_LOCAL; - if (sysfs_create_link(&dev->dev.kobj, &rdev->wiphy.dev.kobj, "phy80211")) { pr_err("failed to add phy80211 symlink to netdev!\n"); } - wdev->netdev = dev; -#ifdef CONFIG_CFG80211_WEXT - wdev->wext.default_key = -1; - wdev->wext.default_mgmt_key = -1; - wdev->wext.connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; -#endif - - if (wdev->wiphy->flags & WIPHY_FLAG_PS_ON_BY_DEFAULT) - wdev->ps = true; - else - wdev->ps = false; - /* allow mac80211 to determine the timeout */ - wdev->ps_timeout = -1; - - if ((wdev->iftype == NL80211_IFTYPE_STATION || - wdev->iftype == NL80211_IFTYPE_P2P_CLIENT || - wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr) - dev->priv_flags |= IFF_DONT_BRIDGE; - - INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk); - cfg80211_init_wdev(rdev, wdev); + cfg80211_register_wdev(rdev, wdev); break; case NETDEV_GOING_DOWN: cfg80211_leave(rdev, wdev); diff --git a/net/wireless/core.h b/net/wireless/core.h index e1ec9ac8e608..e3e9686859d4 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -209,8 +209,9 @@ struct wiphy *wiphy_idx_to_wiphy(int wiphy_idx); int cfg80211_switch_netns(struct cfg80211_registered_device *rdev, struct net *net); -void cfg80211_init_wdev(struct cfg80211_registered_device *rdev, - struct wireless_dev *wdev); +void cfg80211_init_wdev(struct wireless_dev *wdev); +void cfg80211_register_wdev(struct cfg80211_registered_device *rdev, + struct wireless_dev *wdev); static inline void wdev_lock(struct wireless_dev *wdev) __acquires(wdev) diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 554796a6c6fe..a77174b99b07 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -3885,7 +3885,8 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) * P2P Device and NAN do not have a netdev, so don't go * through the netdev notifier and must be added here */ - cfg80211_init_wdev(rdev, wdev); + cfg80211_init_wdev(wdev); + cfg80211_register_wdev(rdev, wdev); break; default: break; diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 3dab859641e1..a04fdfb35f07 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -3616,7 +3616,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd) power_rule = ®_rule->power_rule; if (reg_rule->flags & NL80211_RRF_AUTO_BW) - snprintf(bw, sizeof(bw), "%d KHz, %d KHz AUTO", + snprintf(bw, sizeof(bw), "%d KHz, %u KHz AUTO", freq_range->max_bandwidth_khz, reg_get_max_bandwidth(rd, reg_rule)); else diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index aa4cdcf69d47..9b8e292a7c6a 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c @@ -803,14 +803,14 @@ static struct xfrm6_tunnel xfrmi_ipv6_handler __read_mostly = { .handler = xfrmi6_rcv_tunnel, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi6_err, - .priority = -1, + .priority = 2, }; static struct xfrm6_tunnel xfrmi_ip6ip_handler __read_mostly = { .handler = xfrmi6_rcv_tunnel, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi6_err, - .priority = -1, + .priority = 2, }; #endif @@ -848,14 +848,14 @@ static struct xfrm_tunnel xfrmi_ipip_handler __read_mostly = { .handler = xfrmi4_rcv_tunnel, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi4_err, - .priority = -1, + .priority = 3, }; static struct xfrm_tunnel xfrmi_ipip6_handler __read_mostly = { .handler = xfrmi4_rcv_tunnel, .cb_handler = xfrmi_rcv_cb, .err_handler = xfrmi4_err, - .priority = -1, + .priority = 2, }; #endif diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index bbd4643d7e82..a77da7aae6fe 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2004,6 +2004,7 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) int err = -ENOENT; __be32 minspi = htonl(low); __be32 maxspi = htonl(high); + __be32 newspi = 0; u32 mark = x->mark.v & x->mark.m; spin_lock_bh(&x->lock); @@ -2022,21 +2023,22 @@ int xfrm_alloc_spi(struct xfrm_state *x, u32 low, u32 high) xfrm_state_put(x0); goto unlock; } - x->id.spi = minspi; + newspi = minspi; } else { u32 spi = 0; for (h = 0; h < high-low+1; h++) { spi = low + prandom_u32()%(high-low+1); x0 = xfrm_state_lookup(net, mark, &x->id.daddr, htonl(spi), x->id.proto, x->props.family); if (x0 == NULL) { - x->id.spi = htonl(spi); + newspi = htonl(spi); break; } xfrm_state_put(x0); } } - if (x->id.spi) { + if (newspi) { spin_lock_bh(&net->xfrm.xfrm_state_lock); + x->id.spi = newspi; h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, x->props.family); hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); spin_unlock_bh(&net->xfrm.xfrm_state_lock); diff --git a/scripts/get_abi.pl b/scripts/get_abi.pl index 2cb592f8eba4..68dab828a722 100755 --- a/scripts/get_abi.pl +++ b/scripts/get_abi.pl @@ -287,6 +287,8 @@ my $bondary = qr { (?<![\w\/\`\{])(?=[\w\/\`\{])|(?<=[\w\/\`\{])(?![\w\/\`\{]) } sub output_rest { create_labels(); + my $part = ""; + foreach my $what (sort { ($data{$a}->{type} eq "File") cmp ($data{$b}->{type} eq "File") || $a cmp $b @@ -306,6 +308,21 @@ sub output_rest { $w =~ s/([\(\)\_\-\*\=\^\~\\])/\\$1/g; if ($type ne "File") { + my $cur_part = $what; + if ($what =~ '/') { + if ($what =~ m#^(\/?(?:[\w\-]+\/?){1,2})#) { + $cur_part = "Symbols under $1"; + $cur_part =~ s,/$,,; + } + } + + if ($cur_part ne "" && $part ne $cur_part) { + $part = $cur_part; + my $bar = $part; + $bar =~ s/./-/g; + print "$part\n$bar\n\n"; + } + printf ".. _%s:\n\n", $data{$what}->{label}; my @names = split /, /,$w; @@ -352,6 +369,12 @@ sub output_rest { if (!($desc =~ /^\s*$/)) { if ($description_is_rst) { + # Remove title markups from the description + # Having titles inside ABI files will only work if extra + # care would be taken in order to strictly follow the same + # level order for each markup. + $desc =~ s/\n[\-\*\=\^\~]+\n/\n\n/g; + # Enrich text by creating cross-references $desc =~ s,Documentation/(?!devicetree)(\S+)\.rst,:doc:`/$1`,g; diff --git a/scripts/kernel-doc b/scripts/kernel-doc index c8f6b11d5da1..f699cf05d409 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -1092,7 +1092,11 @@ sub output_struct_rst(%) { print "\n\n.. c:type:: " . $name . "\n\n"; } else { my $name = $args{'struct'}; - print "\n\n.. c:struct:: " . $name . "\n\n"; + if ($args{'type'} eq 'union') { + print "\n\n.. c:union:: " . $name . "\n\n"; + } else { + print "\n\n.. c:struct:: " . $name . "\n\n"; + } } print_lineno($declaration_start_line); $lineprefix = " "; @@ -1427,20 +1431,25 @@ sub dump_enum($$) { } } +my $typedef_type = qr { ((?:\s+[\w\*]+){1,8})\s* }x; +my $typedef_ident = qr { \*?\s*(\w\S+)\s* }x; +my $typedef_args = qr { \s*\((.*)\); }x; + +my $typedef1 = qr { typedef$typedef_type\($typedef_ident\)$typedef_args }x; +my $typedef2 = qr { typedef$typedef_type$typedef_ident$typedef_args }x; + sub dump_typedef($$) { my $x = shift; my $file = shift; $x =~ s@/\*.*?\*/@@gos; # strip comments. - # Parse function prototypes - if ($x =~ /typedef\s+(\w+)\s*\(\*\s*(\w\S+)\s*\)\s*\((.*)\);/ || - $x =~ /typedef\s+(\w+)\s*(\w\S+)\s*\s*\((.*)\);/) { - - # Function typedefs + # Parse function typedef prototypes + if ($x =~ $typedef1 || $x =~ $typedef2) { $return_type = $1; $declaration_name = $2; my $args = $3; + $return_type =~ s/^\s+//; create_parameterlist($args, ',', $file, $declaration_name); diff --git a/sound/core/control.c b/sound/core/control.c index 421ddc76f264..4373de42a5a0 100644 --- a/sound/core/control.c +++ b/sound/core/control.c @@ -1925,8 +1925,8 @@ EXPORT_SYMBOL(snd_ctl_unregister_ioctl); #ifdef CONFIG_COMPAT /** - * snd_ctl_unregister_ioctl - de-register the device-specific compat 32bit - * control-ioctls + * snd_ctl_unregister_ioctl_compat - de-register the device-specific compat + * 32bit control-ioctls * @fcn: ioctl callback function to unregister */ int snd_ctl_unregister_ioctl_compat(snd_kctl_ioctl_func_t fcn) diff --git a/sound/core/pcm_dmaengine.c b/sound/core/pcm_dmaengine.c index 4d059ff2b2e4..4d0e8fe535a1 100644 --- a/sound/core/pcm_dmaengine.c +++ b/sound/core/pcm_dmaengine.c @@ -356,7 +356,8 @@ int snd_dmaengine_pcm_close(struct snd_pcm_substream *substream) EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_close); /** - * snd_dmaengine_pcm_release_chan_close - Close a dmaengine based PCM substream and release channel + * snd_dmaengine_pcm_close_release_chan - Close a dmaengine based PCM + * substream and release channel * @substream: PCM substream * * Releases the DMA channel associated with the PCM substream. diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index d531e1bc2b81..bda3514c7b2d 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c @@ -490,7 +490,7 @@ void snd_pcm_set_ops(struct snd_pcm *pcm, int direction, EXPORT_SYMBOL(snd_pcm_set_ops); /** - * snd_pcm_sync - set the PCM sync id + * snd_pcm_set_sync - set the PCM sync id * @substream: the pcm substream * * Sets the PCM sync identifier for the card. diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 9e0b2d73faf6..47b155a49226 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -112,7 +112,7 @@ void snd_pcm_stream_lock(struct snd_pcm_substream *substream) EXPORT_SYMBOL_GPL(snd_pcm_stream_lock); /** - * snd_pcm_stream_lock - Unlock the PCM stream + * snd_pcm_stream_unlock - Unlock the PCM stream * @substream: PCM substream * * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock(). @@ -595,7 +595,7 @@ static void snd_pcm_sync_stop(struct snd_pcm_substream *substream) } /** - * snd_pcm_hw_param_choose - choose a configuration defined by @params + * snd_pcm_hw_params_choose - choose a configuration defined by @params * @pcm: PCM instance * @params: the hw_params instance * diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c index 4d060d5b1db6..b0c0ef824d7d 100644 --- a/sound/hda/ext/hdac_ext_controller.c +++ b/sound/hda/ext/hdac_ext_controller.c @@ -148,6 +148,8 @@ struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus, return NULL; if (bus->idx != bus_idx) return NULL; + if (addr < 0 || addr > 31) + return NULL; list_for_each_entry(hlink, &bus->hlink_list, list) { for (i = 0; i < HDA_MAX_CODECS; i++) { diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index a356c21edb90..4bb58e8b08a8 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -2934,7 +2934,7 @@ static void hda_call_codec_resume(struct hda_codec *codec) snd_hdac_leave_pm(&codec->core); } -static int hda_codec_runtime_suspend(struct device *dev) +static int hda_codec_suspend(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); unsigned int state; @@ -2953,7 +2953,7 @@ static int hda_codec_runtime_suspend(struct device *dev) return 0; } -static int hda_codec_runtime_resume(struct device *dev) +static int hda_codec_resume(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); @@ -2967,57 +2967,70 @@ static int hda_codec_runtime_resume(struct device *dev) pm_runtime_mark_last_busy(dev); return 0; } + +static int hda_codec_runtime_suspend(struct device *dev) +{ + return hda_codec_suspend(dev); +} + +static int hda_codec_runtime_resume(struct device *dev) +{ + return hda_codec_resume(dev); +} + #endif /* CONFIG_PM */ #ifdef CONFIG_PM_SLEEP -static int hda_codec_force_resume(struct device *dev) +static int hda_codec_pm_prepare(struct device *dev) +{ + return pm_runtime_suspended(dev); +} + +static void hda_codec_pm_complete(struct device *dev) { struct hda_codec *codec = dev_to_hda_codec(dev); - int ret; - ret = pm_runtime_force_resume(dev); - /* schedule jackpoll work for jack detection update */ - if (codec->jackpoll_interval || - (pm_runtime_suspended(dev) && hda_codec_need_resume(codec))) - schedule_delayed_work(&codec->jackpoll_work, - codec->jackpoll_interval); - return ret; + if (pm_runtime_suspended(dev) && (codec->jackpoll_interval || + hda_codec_need_resume(codec) || codec->forced_resume)) + pm_request_resume(dev); } static int hda_codec_pm_suspend(struct device *dev) { dev->power.power_state = PMSG_SUSPEND; - return pm_runtime_force_suspend(dev); + return hda_codec_suspend(dev); } static int hda_codec_pm_resume(struct device *dev) { dev->power.power_state = PMSG_RESUME; - return hda_codec_force_resume(dev); + return hda_codec_resume(dev); } static int hda_codec_pm_freeze(struct device *dev) { dev->power.power_state = PMSG_FREEZE; - return pm_runtime_force_suspend(dev); + return hda_codec_suspend(dev); } static int hda_codec_pm_thaw(struct device *dev) { dev->power.power_state = PMSG_THAW; - return hda_codec_force_resume(dev); + return hda_codec_resume(dev); } static int hda_codec_pm_restore(struct device *dev) { dev->power.power_state = PMSG_RESTORE; - return hda_codec_force_resume(dev); + return hda_codec_resume(dev); } #endif /* CONFIG_PM_SLEEP */ /* referred in hda_bind.c */ const struct dev_pm_ops hda_codec_driver_pm = { #ifdef CONFIG_PM_SLEEP + .prepare = hda_codec_pm_prepare, + .complete = hda_codec_pm_complete, .suspend = hda_codec_pm_suspend, .resume = hda_codec_pm_resume, .freeze = hda_codec_pm_freeze, diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index be63ead8161f..68f9668788ea 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -41,7 +41,7 @@ /* 24 unused */ #define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ #define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ -#define AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP (1 << 27) /* Workaround for spurious wakeups after suspend */ +/* 27 unused */ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ @@ -143,6 +143,7 @@ struct azx { unsigned int align_buffer_size:1; unsigned int region_requested:1; unsigned int disabled:1; /* disabled by vga_switcheroo */ + unsigned int pm_prepared:1; /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 749b88090970..d539f52009a1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -297,8 +297,7 @@ enum { /* PCH for HSW/BDW; with runtime PM */ /* no i915 binding for this as HSW/BDW has another controller for HDMI */ #define AZX_DCAPS_INTEL_PCH \ - (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME |\ - AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) + (AZX_DCAPS_INTEL_PCH_BASE | AZX_DCAPS_PM_RUNTIME) /* HSW HDMI */ #define AZX_DCAPS_INTEL_HASWELL \ @@ -985,7 +984,7 @@ static void __azx_runtime_suspend(struct azx *chip) display_power(chip, false); } -static void __azx_runtime_resume(struct azx *chip, bool from_rt) +static void __azx_runtime_resume(struct azx *chip) { struct hda_intel *hda = container_of(chip, struct hda_intel, chip); struct hdac_bus *bus = azx_bus(chip); @@ -1002,7 +1001,8 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) azx_init_pci(chip); hda_intel_init_chip(chip, true); - if (from_rt) { + /* Avoid codec resume if runtime resume is for system suspend */ + if (!chip->pm_prepared) { list_for_each_codec(codec, &chip->bus) { if (codec->relaxed_resume) continue; @@ -1018,6 +1018,29 @@ static void __azx_runtime_resume(struct azx *chip, bool from_rt) } #ifdef CONFIG_PM_SLEEP +static int azx_prepare(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + + chip = card->private_data; + chip->pm_prepared = 1; + + /* HDA controller always requires different WAKEEN for runtime suspend + * and system suspend, so don't use direct-complete here. + */ + return 0; +} + +static void azx_complete(struct device *dev) +{ + struct snd_card *card = dev_get_drvdata(dev); + struct azx *chip; + + chip = card->private_data; + chip->pm_prepared = 0; +} + static int azx_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); @@ -1029,15 +1052,7 @@ static int azx_suspend(struct device *dev) chip = card->private_data; bus = azx_bus(chip); - snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); - /* An ugly workaround: direct call of __azx_runtime_suspend() and - * __azx_runtime_resume() for old Intel platforms that suffer from - * spurious wakeups after S3 suspend - */ - if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) - __azx_runtime_suspend(chip); - else - pm_runtime_force_suspend(dev); + __azx_runtime_suspend(chip); if (bus->irq >= 0) { free_irq(bus->irq, chip); bus->irq = -1; @@ -1066,11 +1081,7 @@ static int azx_resume(struct device *dev) if (azx_acquire_irq(chip, 1) < 0) return -EIO; - if (chip->driver_caps & AZX_DCAPS_SUSPEND_SPURIOUS_WAKEUP) - __azx_runtime_resume(chip, false); - else - pm_runtime_force_resume(dev); - snd_power_change_state(card, SNDRV_CTL_POWER_D0); + __azx_runtime_resume(chip); trace_azx_resume(chip); return 0; @@ -1118,10 +1129,7 @@ static int azx_runtime_suspend(struct device *dev) chip = card->private_data; /* enable controller wake up event */ - if (snd_power_get_state(card) == SNDRV_CTL_POWER_D0) { - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | - STATESTS_INT_MASK); - } + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) | STATESTS_INT_MASK); __azx_runtime_suspend(chip); trace_azx_runtime_suspend(chip); @@ -1132,18 +1140,14 @@ static int azx_runtime_resume(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); struct azx *chip; - bool from_rt = snd_power_get_state(card) == SNDRV_CTL_POWER_D0; if (!azx_is_pm_ready(card)) return 0; chip = card->private_data; - __azx_runtime_resume(chip, from_rt); + __azx_runtime_resume(chip); /* disable controller Wake Up event*/ - if (from_rt) { - azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & - ~STATESTS_INT_MASK); - } + azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) & ~STATESTS_INT_MASK); trace_azx_runtime_resume(chip); return 0; @@ -1177,6 +1181,8 @@ static int azx_runtime_idle(struct device *dev) static const struct dev_pm_ops azx_pm = { SET_SYSTEM_SLEEP_PM_OPS(azx_suspend, azx_resume) #ifdef CONFIG_PM_SLEEP + .prepare = azx_prepare, + .complete = azx_complete, .freeze_noirq = azx_freeze_noirq, .thaw_noirq = azx_thaw_noirq, #endif @@ -2356,6 +2362,7 @@ static int azx_probe_continue(struct azx *chip) if (azx_has_pm_runtime(chip)) { pm_runtime_use_autosuspend(&pci->dev); + pm_runtime_allow(&pci->dev); pm_runtime_put_autosuspend(&pci->dev); } diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index f2398721ac1e..6899089d132e 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6008,6 +6008,27 @@ static void alc285_fixup_invalidate_dacs(struct hda_codec *codec, snd_hda_override_wcaps(codec, 0x03, 0); } +static void alc_combo_jack_hp_jd_restart(struct hda_codec *codec) +{ + switch (codec->core.vendor_id) { + case 0x10ec0274: + case 0x10ec0294: + case 0x10ec0225: + case 0x10ec0295: + case 0x10ec0299: + alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ + alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); + break; + case 0x10ec0235: + case 0x10ec0236: + case 0x10ec0255: + case 0x10ec0256: + alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ + alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); + break; + } +} + static void alc295_fixup_chromebook(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -6018,16 +6039,7 @@ static void alc295_fixup_chromebook(struct hda_codec *codec, spec->ultra_low_power = true; break; case HDA_FIXUP_ACT_INIT: - switch (codec->core.vendor_id) { - case 0x10ec0295: - alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */ - alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15); - break; - case 0x10ec0236: - alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */ - alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15); - break; - } + alc_combo_jack_hp_jd_restart(codec); break; } } @@ -6083,6 +6095,16 @@ static void alc285_fixup_hp_gpio_amp_init(struct hda_codec *codec, alc_write_coef_idx(codec, 0x65, 0x0); } +static void alc274_fixup_hp_headset_mic(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + switch (action) { + case HDA_FIXUP_ACT_INIT: + alc_combo_jack_hp_jd_restart(codec); + break; + } +} + /* for hda_fixup_thinkpad_acpi() */ #include "thinkpad_helper.c" @@ -6277,6 +6299,8 @@ enum { ALC256_FIXUP_INTEL_NUC8_RUGGED, ALC255_FIXUP_XIAOMI_HEADSET_MIC, ALC274_FIXUP_HP_MIC, + ALC274_FIXUP_HP_HEADSET_MIC, + ALC256_FIXUP_ASUS_HPE, }; static const struct hda_fixup alc269_fixups[] = { @@ -7664,6 +7688,23 @@ static const struct hda_fixup alc269_fixups[] = { { } }, }, + [ALC274_FIXUP_HP_HEADSET_MIC] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc274_fixup_hp_headset_mic, + .chained = true, + .chain_id = ALC274_FIXUP_HP_MIC + }, + [ALC256_FIXUP_ASUS_HPE] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + /* Set EAPD high */ + { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f }, + { 0x20, AC_VERB_SET_PROC_COEF, 0x7778 }, + { } + }, + .chained = true, + .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC + }, }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { @@ -7815,7 +7856,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x8729, "HP", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8736, "HP", ALC285_FIXUP_HP_GPIO_AMP_INIT), - SND_PCI_QUIRK(0x103c, 0x874e, "HP", ALC274_FIXUP_HP_MIC), SND_PCI_QUIRK(0x103c, 0x8760, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877a, "HP", ALC285_FIXUP_HP_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x877d, "HP", ALC236_FIXUP_HP_MUTE_LED), @@ -7848,6 +7888,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1bbd, "ASUS Z550MA", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC), + SND_PCI_QUIRK(0x1043, 0x1d4e, "ASUS TM420", ALC256_FIXUP_ASUS_HPE), SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS), @@ -8339,6 +8380,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x1a, 0x90a70130}, {0x1b, 0x90170110}, {0x21, 0x03211020}), + SND_HDA_PIN_QUIRK(0x10ec0274, 0x103c, "HP", ALC274_FIXUP_HP_HEADSET_MIC, + {0x17, 0x90170110}, + {0x19, 0x03a11030}, + {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, {0x12, 0x90a60130}, {0x14, 0x90170110}, diff --git a/sound/soc/atmel/mchp-spdiftx.c b/sound/soc/atmel/mchp-spdiftx.c index 82c1eecd2528..3bd350afb743 100644 --- a/sound/soc/atmel/mchp-spdiftx.c +++ b/sound/soc/atmel/mchp-spdiftx.c @@ -487,7 +487,6 @@ static int mchp_spdiftx_hw_params(struct snd_pcm_substream *substream, } mchp_spdiftx_channel_status_write(dev); spin_unlock_irqrestore(&ctrl->lock, flags); - mr |= SPDIFTX_MR_VALID1 | SPDIFTX_MR_VALID2; if (dev->gclk_enabled) { clk_disable_unprepare(dev->gclk); diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index 097c4e8d9950..c61b17dc2af8 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -254,8 +254,28 @@ static const struct snd_soc_dapm_widget cs42l51_dapm_widgets[] = { &cs42l51_adcr_mux_controls), }; +static int mclk_event(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_component *comp = snd_soc_dapm_to_component(w->dapm); + struct cs42l51_private *cs42l51 = snd_soc_component_get_drvdata(comp); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + return clk_prepare_enable(cs42l51->mclk_handle); + case SND_SOC_DAPM_POST_PMD: + /* Delay mclk shutdown to fulfill power-down sequence requirements */ + msleep(20); + clk_disable_unprepare(cs42l51->mclk_handle); + break; + } + + return 0; +} + static const struct snd_soc_dapm_widget cs42l51_dapm_mclk_widgets[] = { - SND_SOC_DAPM_CLOCK_SUPPLY("MCLK") + SND_SOC_DAPM_SUPPLY("MCLK", SND_SOC_NOPM, 0, 0, mclk_event, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route cs42l51_routes[] = { diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c index f2d9d52ee171..4d2b1ec7c03b 100644 --- a/sound/soc/codecs/wcd9335.c +++ b/sound/soc/codecs/wcd9335.c @@ -618,7 +618,7 @@ static const char * const sb_tx8_mux_text[] = { "ZERO", "RX_MIX_TX8", "DEC8", "DEC8_192" }; -static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); +static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400); static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0); diff --git a/sound/soc/codecs/wcd934x.c b/sound/soc/codecs/wcd934x.c index 35697b072367..40f682f5dab8 100644 --- a/sound/soc/codecs/wcd934x.c +++ b/sound/soc/codecs/wcd934x.c @@ -551,7 +551,7 @@ struct wcd_iir_filter_ctl { struct soc_bytes_ext bytes_ext; }; -static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0); +static const DECLARE_TLV_DB_SCALE(digital_gain, -8400, 100, -8400); static const DECLARE_TLV_DB_SCALE(line_gain, 0, 7, 1); static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1); static const DECLARE_TLV_DB_SCALE(ear_pa_gain, 0, 150, 0); diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c index 68e774e69c85..4530b74f5921 100644 --- a/sound/soc/codecs/wsa881x.c +++ b/sound/soc/codecs/wsa881x.c @@ -1026,6 +1026,8 @@ static struct snd_soc_dai_driver wsa881x_dais[] = { .id = 0, .playback = { .stream_name = "SPKR Playback", + .rates = SNDRV_PCM_RATE_48000, + .formats = SNDRV_PCM_FMTBIT_S16_LE, .rate_max = 48000, .rate_min = 48000, .channels_min = 1, diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig index d5bae5d1ab6f..a5b446d5af19 100644 --- a/sound/soc/intel/Kconfig +++ b/sound/soc/intel/Kconfig @@ -15,22 +15,6 @@ config SND_SOC_INTEL_SST_TOPLEVEL if SND_SOC_INTEL_SST_TOPLEVEL -config SND_SST_IPC - tristate - # This option controls the IPC core for HiFi2 platforms - -config SND_SST_IPC_PCI - tristate - select SND_SST_IPC - # This option controls the PCI-based IPC for HiFi2 platforms - # (Medfield, Merrifield). - -config SND_SST_IPC_ACPI - tristate - select SND_SST_IPC - # This option controls the ACPI-based IPC for HiFi2 platforms - # (Baytrail, Cherrytrail) - config SND_SOC_INTEL_SST tristate @@ -57,7 +41,6 @@ config SND_SST_ATOM_HIFI2_PLATFORM config SND_SST_ATOM_HIFI2_PLATFORM_PCI tristate "PCI HiFi2 (Merrifield) Platforms" depends on X86 && PCI - select SND_SST_IPC_PCI select SND_SST_ATOM_HIFI2_PLATFORM help If you have a Intel Merrifield/Edison platform, then @@ -70,7 +53,6 @@ config SND_SST_ATOM_HIFI2_PLATFORM_ACPI tristate "ACPI HiFi2 (Baytrail, Cherrytrail) Platforms" default ACPI depends on X86 && ACPI && PCI - select SND_SST_IPC_ACPI select SND_SST_ATOM_HIFI2_PLATFORM select SND_SOC_ACPI_INTEL_MATCH select IOSF_MBI diff --git a/sound/soc/intel/atom/Makefile b/sound/soc/intel/atom/Makefile index a9326d5ec44c..c66f03f5d8d6 100644 --- a/sound/soc/intel/atom/Makefile +++ b/sound/soc/intel/atom/Makefile @@ -6,4 +6,4 @@ snd-soc-sst-atom-hifi2-platform-objs := sst-mfld-platform-pcm.o \ obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += snd-soc-sst-atom-hifi2-platform.o # DSP driver -obj-$(CONFIG_SND_SST_IPC) += sst/ +obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += sst/ diff --git a/sound/soc/intel/atom/sst/Makefile b/sound/soc/intel/atom/sst/Makefile index f17c905df3e2..5761d30a5f9d 100644 --- a/sound/soc/intel/atom/sst/Makefile +++ b/sound/soc/intel/atom/sst/Makefile @@ -3,6 +3,6 @@ snd-intel-sst-core-objs := sst.o sst_ipc.o sst_stream.o sst_drv_interface.o sst_ snd-intel-sst-pci-objs += sst_pci.o snd-intel-sst-acpi-objs += sst_acpi.o -obj-$(CONFIG_SND_SST_IPC) += snd-intel-sst-core.o -obj-$(CONFIG_SND_SST_IPC_PCI) += snd-intel-sst-pci.o -obj-$(CONFIG_SND_SST_IPC_ACPI) += snd-intel-sst-acpi.o +obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM) += snd-intel-sst-core.o +obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_PCI) += snd-intel-sst-pci.o +obj-$(CONFIG_SND_SST_ATOM_HIFI2_PLATFORM_ACPI) += snd-intel-sst-acpi.o diff --git a/sound/soc/intel/boards/kbl_rt5663_max98927.c b/sound/soc/intel/boards/kbl_rt5663_max98927.c index 3ea4602dfb3e..9a4b3d0973f6 100644 --- a/sound/soc/intel/boards/kbl_rt5663_max98927.c +++ b/sound/soc/intel/boards/kbl_rt5663_max98927.c @@ -401,17 +401,40 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, struct snd_interval *chan = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); - struct snd_soc_dpcm *dpcm = container_of( - params, struct snd_soc_dpcm, hw_params); - struct snd_soc_dai_link *fe_dai_link = dpcm->fe->dai_link; - struct snd_soc_dai_link *be_dai_link = dpcm->be->dai_link; + struct snd_soc_dpcm *dpcm, *rtd_dpcm = NULL; + + /* + * The following loop will be called only for playback stream + * In this platform, there is only one playback device on every SSP + */ + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_PLAYBACK, dpcm) { + rtd_dpcm = dpcm; + break; + } + + /* + * This following loop will be called only for capture stream + * In this platform, there is only one capture device on every SSP + */ + for_each_dpcm_fe(rtd, SNDRV_PCM_STREAM_CAPTURE, dpcm) { + rtd_dpcm = dpcm; + break; + } + + if (!rtd_dpcm) + return -EINVAL; + + /* + * The above 2 loops are mutually exclusive based on the stream direction, + * thus rtd_dpcm variable will never be overwritten + */ /* * The ADSP will convert the FE rate to 48k, stereo, 24 bit */ - if (!strcmp(fe_dai_link->name, "Kbl Audio Port") || - !strcmp(fe_dai_link->name, "Kbl Audio Headset Playback") || - !strcmp(fe_dai_link->name, "Kbl Audio Capture Port")) { + if (!strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Port") || + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Headset Playback") || + !strcmp(rtd_dpcm->fe->dai_link->name, "Kbl Audio Capture Port")) { rate->min = rate->max = 48000; chan->min = chan->max = 2; snd_mask_none(fmt); @@ -421,7 +444,7 @@ static int kabylake_ssp_fixup(struct snd_soc_pcm_runtime *rtd, * The speaker on the SSP0 supports S16_LE and not S24_LE. * thus changing the mask here */ - if (!strcmp(be_dai_link->name, "SSP0-Codec")) + if (!strcmp(rtd_dpcm->be->dai_link->name, "SSP0-Codec")) snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); return 0; diff --git a/sound/soc/intel/catpt/dsp.c b/sound/soc/intel/catpt/dsp.c index 7d2968571951..9e807b941732 100644 --- a/sound/soc/intel/catpt/dsp.c +++ b/sound/soc/intel/catpt/dsp.c @@ -267,9 +267,12 @@ static int catpt_dsp_select_lpclock(struct catpt_dev *cdev, bool lp, bool waiti) reg, (reg & CATPT_ISD_DCPWM), 500, 10000); if (ret) { - dev_err(cdev->dev, "await WAITI timeout\n"); - mutex_unlock(&cdev->clk_mutex); - return ret; + dev_warn(cdev->dev, "await WAITI timeout\n"); + /* no signal - only high clock selection allowed */ + if (lp) { + mutex_unlock(&cdev->clk_mutex); + return 0; + } } } diff --git a/sound/soc/intel/catpt/pcm.c b/sound/soc/intel/catpt/pcm.c index f78018c857b8..ba653ebea7d1 100644 --- a/sound/soc/intel/catpt/pcm.c +++ b/sound/soc/intel/catpt/pcm.c @@ -667,7 +667,17 @@ static int catpt_dai_pcm_new(struct snd_soc_pcm_runtime *rtm, break; } + /* see if this is a new configuration */ + if (!memcmp(&cdev->devfmt[devfmt.iface], &devfmt, sizeof(devfmt))) + return 0; + + pm_runtime_get_sync(cdev->dev); + ret = catpt_ipc_set_device_format(cdev, &devfmt); + + pm_runtime_mark_last_busy(cdev->dev); + pm_runtime_put_autosuspend(cdev->dev); + if (ret) return CATPT_IPC_ERROR(ret); diff --git a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c index c2c1eb16fcc0..26e7d9a7198f 100644 --- a/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c +++ b/sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c @@ -630,15 +630,34 @@ static struct snd_soc_codec_conf mt8183_da7219_rt1015_codec_conf[] = { }, }; +static const struct snd_kcontrol_new mt8183_da7219_rt1015_snd_controls[] = { + SOC_DAPM_PIN_SWITCH("Left Spk"), + SOC_DAPM_PIN_SWITCH("Right Spk"), +}; + +static const +struct snd_soc_dapm_widget mt8183_da7219_rt1015_dapm_widgets[] = { + SND_SOC_DAPM_SPK("Left Spk", NULL), + SND_SOC_DAPM_SPK("Right Spk", NULL), + SND_SOC_DAPM_PINCTRL("TDM_OUT_PINCTRL", + "aud_tdm_out_on", "aud_tdm_out_off"), +}; + +static const struct snd_soc_dapm_route mt8183_da7219_rt1015_dapm_routes[] = { + {"Left Spk", NULL, "Left SPO"}, + {"Right Spk", NULL, "Right SPO"}, + {"I2S Playback", NULL, "TDM_OUT_PINCTRL"}, +}; + static struct snd_soc_card mt8183_da7219_rt1015_card = { .name = "mt8183_da7219_rt1015", .owner = THIS_MODULE, - .controls = mt8183_da7219_max98357_snd_controls, - .num_controls = ARRAY_SIZE(mt8183_da7219_max98357_snd_controls), - .dapm_widgets = mt8183_da7219_max98357_dapm_widgets, - .num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_max98357_dapm_widgets), - .dapm_routes = mt8183_da7219_max98357_dapm_routes, - .num_dapm_routes = ARRAY_SIZE(mt8183_da7219_max98357_dapm_routes), + .controls = mt8183_da7219_rt1015_snd_controls, + .num_controls = ARRAY_SIZE(mt8183_da7219_rt1015_snd_controls), + .dapm_widgets = mt8183_da7219_rt1015_dapm_widgets, + .num_dapm_widgets = ARRAY_SIZE(mt8183_da7219_rt1015_dapm_widgets), + .dapm_routes = mt8183_da7219_rt1015_dapm_routes, + .num_dapm_routes = ARRAY_SIZE(mt8183_da7219_rt1015_dapm_routes), .dai_link = mt8183_da7219_dai_links, .num_links = ARRAY_SIZE(mt8183_da7219_dai_links), .aux_dev = &mt8183_da7219_max98357_headset_dev, diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c index ba2aca301a9b..9d17c87445a9 100644 --- a/sound/soc/qcom/lpass-cpu.c +++ b/sound/soc/qcom/lpass-cpu.c @@ -80,6 +80,12 @@ static int lpass_cpu_daiops_startup(struct snd_pcm_substream *substream, dev_err(dai->dev, "error in enabling mi2s osr clk: %d\n", ret); return ret; } + ret = clk_prepare(drvdata->mi2s_bit_clk[dai->driver->id]); + if (ret) { + dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret); + clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]); + return ret; + } return 0; } @@ -88,9 +94,8 @@ static void lpass_cpu_daiops_shutdown(struct snd_pcm_substream *substream, { struct lpass_data *drvdata = snd_soc_dai_get_drvdata(dai); - clk_disable_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]); - clk_disable_unprepare(drvdata->mi2s_osr_clk[dai->driver->id]); + clk_unprepare(drvdata->mi2s_bit_clk[dai->driver->id]); } static int lpass_cpu_daiops_hw_params(struct snd_pcm_substream *substream, @@ -303,10 +308,10 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream, dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret); - ret = clk_prepare_enable(drvdata->mi2s_bit_clk[id]); + ret = clk_enable(drvdata->mi2s_bit_clk[id]); if (ret) { dev_err(dai->dev, "error in enabling mi2s bit clk: %d\n", ret); - clk_disable_unprepare(drvdata->mi2s_osr_clk[id]); + clk_disable(drvdata->mi2s_osr_clk[id]); return ret; } @@ -324,6 +329,7 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream, if (ret) dev_err(dai->dev, "error writing to i2sctl reg: %d\n", ret); + clk_disable(drvdata->mi2s_bit_clk[dai->driver->id]); break; } diff --git a/sound/soc/qcom/lpass-sc7180.c b/sound/soc/qcom/lpass-sc7180.c index c6292f9e613f..bc998d501600 100644 --- a/sound/soc/qcom/lpass-sc7180.c +++ b/sound/soc/qcom/lpass-sc7180.c @@ -188,7 +188,7 @@ static struct lpass_variant sc7180_data = { .micmode = REG_FIELD_ID(0x1000, 4, 8, 3, 0x1000), .micmono = REG_FIELD_ID(0x1000, 3, 3, 3, 0x1000), .wssrc = REG_FIELD_ID(0x1000, 2, 2, 3, 0x1000), - .bitwidth = REG_FIELD_ID(0x1000, 0, 0, 3, 0x1000), + .bitwidth = REG_FIELD_ID(0x1000, 0, 1, 3, 0x1000), .rdma_dyncclk = REG_FIELD_ID(0xC000, 21, 21, 5, 0x1000), .rdma_bursten = REG_FIELD_ID(0xC000, 20, 20, 5, 0x1000), diff --git a/sound/soc/qcom/sdm845.c b/sound/soc/qcom/sdm845.c index ab1bf23c21a6..6c2760e27ea6 100644 --- a/sound/soc/qcom/sdm845.c +++ b/sound/soc/qcom/sdm845.c @@ -17,6 +17,7 @@ #include "qdsp6/q6afe.h" #include "../codecs/rt5663.h" +#define DRIVER_NAME "sdm845" #define DEFAULT_SAMPLE_RATE_48K 48000 #define DEFAULT_MCLK_RATE 24576000 #define TDM_BCLK_RATE 6144000 @@ -552,6 +553,7 @@ static int sdm845_snd_platform_probe(struct platform_device *pdev) if (!data) return -ENOMEM; + card->driver_name = DRIVER_NAME; card->dapm_widgets = sdm845_snd_widgets; card->num_dapm_widgets = ARRAY_SIZE(sdm845_snd_widgets); card->dev = dev; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index ea3986a46c12..05a085f6dc7c 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2341,7 +2341,7 @@ struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component, } /** - * snd_soc_unregister_dai - Unregister DAIs from the ASoC core + * snd_soc_unregister_dais - Unregister DAIs from the ASoC core * * @component: The component for which the DAIs should be unregistered */ diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 980f2c330b87..7f87b449f950 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1276,7 +1276,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget, } /** - * snd_soc_dapm_get_connected_widgets - query audio path and it's widgets. + * snd_soc_dapm_dai_get_connected_widgets - query audio path and it's widgets. * @dai: the soc DAI. * @stream: stream direction. * @list: list of active widgets for this stream. diff --git a/sound/soc/sof/loader.c b/sound/soc/sof/loader.c index 68ed454f7ddf..ba9ed66f98bc 100644 --- a/sound/soc/sof/loader.c +++ b/sound/soc/sof/loader.c @@ -118,6 +118,11 @@ int snd_sof_fw_parse_ext_data(struct snd_sof_dev *sdev, u32 bar, u32 offset) case SOF_IPC_EXT_CC_INFO: ret = get_cc_info(sdev, ext_hdr); break; + case SOF_IPC_EXT_UNUSED: + case SOF_IPC_EXT_PROBE_INFO: + case SOF_IPC_EXT_USER_ABI_INFO: + /* They are supported but we don't do anything here */ + break; default: dev_warn(sdev->dev, "warning: unknown ext header type %d size 0x%x\n", ext_hdr->type, ext_hdr->hdr.size); diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index b401ee894e1b..a860303cc522 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c @@ -336,6 +336,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, switch (subs->stream->chip->usb_id) { case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */ + case USB_ID(0x22f0, 0x0006): /* Allen&Heath Qu-16 */ ep = 0x81; ifnum = 3; goto add_sync_ep_from_ifnum; @@ -345,6 +346,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ifnum = 2; goto add_sync_ep_from_ifnum; case USB_ID(0x2466, 0x8003): /* Fractal Audio Axe-Fx II */ + case USB_ID(0x0499, 0x172a): /* Yamaha MODX */ ep = 0x86; ifnum = 2; goto add_sync_ep_from_ifnum; @@ -352,6 +354,10 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, ep = 0x81; ifnum = 2; goto add_sync_ep_from_ifnum; + case USB_ID(0x1686, 0xf029): /* Zoom UAC-2 */ + ep = 0x82; + ifnum = 2; + goto add_sync_ep_from_ifnum; case USB_ID(0x1397, 0x0001): /* Behringer UFX1604 */ case USB_ID(0x1397, 0x0002): /* Behringer UFX1204 */ ep = 0x81; diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index b4fa80ef730d..c989ad8052ae 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1800,6 +1800,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, case 0x278b: /* Rotel? */ case 0x292b: /* Gustard/Ess based devices */ case 0x2ab6: /* T+A devices */ + case 0x3353: /* Khadas devices */ case 0x3842: /* EVGA */ case 0xc502: /* HiBy devices */ if (fp->dsd_raw) diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index ba85bb23f060..1c17c3a24411 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h @@ -159,6 +159,21 @@ struct kvm_sync_regs { struct kvm_arch_memory_slot { }; +/* + * PMU filter structure. Describe a range of events with a particular + * action. To be used with KVM_ARM_VCPU_PMU_V3_FILTER. + */ +struct kvm_pmu_event_filter { + __u16 base_event; + __u16 nevents; + +#define KVM_PMU_EVENT_ALLOW 0 +#define KVM_PMU_EVENT_DENY 1 + + __u8 action; + __u8 pad[3]; +}; + /* for KVM_GET/SET_VCPU_EVENTS */ struct kvm_vcpu_events { struct { @@ -242,6 +257,15 @@ struct kvm_vcpu_events { #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 + +/* + * Only two states can be presented by the host kernel: + * - NOT_REQUIRED: the guest doesn't need to do anything + * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available) + * + * All the other values are deprecated. The host still accepts all + * values (they are ABI), but will narrow them to the above two. + */ #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 @@ -329,6 +353,7 @@ struct kvm_vcpu_events { #define KVM_ARM_VCPU_PMU_V3_CTRL 0 #define KVM_ARM_VCPU_PMU_V3_IRQ 0 #define KVM_ARM_VCPU_PMU_V3_INIT 1 +#define KVM_ARM_VCPU_PMU_V3_FILTER 2 #define KVM_ARM_VCPU_TIMER_CTRL 1 #define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 #define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 diff --git a/tools/arch/s390/include/uapi/asm/sie.h b/tools/arch/s390/include/uapi/asm/sie.h index 6ca1e68d7103..ede318653c87 100644 --- a/tools/arch/s390/include/uapi/asm/sie.h +++ b/tools/arch/s390/include/uapi/asm/sie.h @@ -29,7 +29,7 @@ { 0x13, "SIGP conditional emergency signal" }, \ { 0x15, "SIGP sense running" }, \ { 0x16, "SIGP set multithreading"}, \ - { 0x17, "SIGP store additional status ait address"} + { 0x17, "SIGP store additional status at address"} #define icpt_prog_codes \ { 0x0001, "Prog Operation" }, \ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 2901d5df4366..dad350d42ecf 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -96,7 +96,7 @@ #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */ #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */ #define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */ -/* free ( 3*32+17) */ +#define X86_FEATURE_SME_COHERENT ( 3*32+17) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" LFENCE synchronizes RDTSC */ #define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */ #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ @@ -236,6 +236,7 @@ #define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */ #define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */ #define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */ +#define X86_FEATURE_SEV_ES ( 8*32+20) /* AMD Secure Encrypted Virtualization - Encrypted State */ /* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */ #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/ @@ -288,6 +289,7 @@ #define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */ #define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */ #define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */ +#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -353,6 +355,7 @@ #define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */ #define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */ #define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */ +#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */ /* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */ @@ -368,6 +371,7 @@ #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ #define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */ +#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */ #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ #define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */ #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 4ea8584682f9..5861d34f9771 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -56,6 +56,12 @@ # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) #endif +#ifdef CONFIG_IOMMU_SUPPORT +# define DISABLE_ENQCMD 0 +#else +# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) +#endif + /* * Make sure to add features to the correct mask */ @@ -75,7 +81,8 @@ #define DISABLED_MASK13 0 #define DISABLED_MASK14 0 #define DISABLED_MASK15 0 -#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP) +#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57|DISABLE_UMIP| \ + DISABLE_ENQCMD) #define DISABLED_MASK17 0 #define DISABLED_MASK18 0 #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 2859ee4f39a8..972a34d93505 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -257,6 +257,9 @@ #define MSR_IA32_LASTINTFROMIP 0x000001dd #define MSR_IA32_LASTINTTOIP 0x000001de +#define MSR_IA32_PASID 0x00000d93 +#define MSR_IA32_PASID_VALID BIT_ULL(31) + /* DEBUGCTLMSR bits (others vary by model): */ #define DEBUGCTLMSR_LBR (1UL << 0) /* last branch recording */ #define DEBUGCTLMSR_BTF_SHIFT 1 @@ -464,11 +467,15 @@ #define MSR_AMD64_IBSOP_REG_MASK ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1) #define MSR_AMD64_IBSCTL 0xc001103a #define MSR_AMD64_IBSBRTARGET 0xc001103b +#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c #define MSR_AMD64_IBSOPDATA4 0xc001103d #define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */ +#define MSR_AMD64_SEV_ES_GHCB 0xc0010130 #define MSR_AMD64_SEV 0xc0010131 #define MSR_AMD64_SEV_ENABLED_BIT 0 +#define MSR_AMD64_SEV_ES_ENABLED_BIT 1 #define MSR_AMD64_SEV_ENABLED BIT_ULL(MSR_AMD64_SEV_ENABLED_BIT) +#define MSR_AMD64_SEV_ES_ENABLED BIT_ULL(MSR_AMD64_SEV_ES_ENABLED_BIT) #define MSR_AMD64_VIRT_SPEC_CTRL 0xc001011f @@ -857,11 +864,14 @@ #define MSR_CORE_PERF_FIXED_CTR0 0x00000309 #define MSR_CORE_PERF_FIXED_CTR1 0x0000030a #define MSR_CORE_PERF_FIXED_CTR2 0x0000030b +#define MSR_CORE_PERF_FIXED_CTR3 0x0000030c #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x0000038d #define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e #define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390 +#define MSR_PERF_METRICS 0x00000329 + /* PERF_GLOBAL_OVF_CTL bits */ #define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT 55 #define MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI (1ULL << MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT) diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 6847d85400a8..3ff0d48469f2 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -54,7 +54,7 @@ #endif #ifdef CONFIG_X86_64 -#ifdef CONFIG_PARAVIRT +#ifdef CONFIG_PARAVIRT_XXL /* Paravirtualized systems may not have PSE or PGE available */ #define NEED_PSE 0 #define NEED_PGE 0 diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 0780f97c1850..89e5f3d1bba8 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -192,6 +192,26 @@ struct kvm_msr_list { __u32 indices[0]; }; +/* Maximum size of any access bitmap in bytes */ +#define KVM_MSR_FILTER_MAX_BITMAP_SIZE 0x600 + +/* for KVM_X86_SET_MSR_FILTER */ +struct kvm_msr_filter_range { +#define KVM_MSR_FILTER_READ (1 << 0) +#define KVM_MSR_FILTER_WRITE (1 << 1) + __u32 flags; + __u32 nmsrs; /* number of msrs in bitmap */ + __u32 base; /* MSR index the bitmap starts at */ + __u8 *bitmap; /* a 1 bit allows the operations in flags, 0 denies */ +}; + +#define KVM_MSR_FILTER_MAX_RANGES 16 +struct kvm_msr_filter { +#define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0) +#define KVM_MSR_FILTER_DEFAULT_DENY (1 << 0) + __u32 flags; + struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES]; +}; struct kvm_cpuid_entry { __u32 function; diff --git a/tools/arch/x86/include/uapi/asm/svm.h b/tools/arch/x86/include/uapi/asm/svm.h index 2e8a30f06c74..f1d8307454e0 100644 --- a/tools/arch/x86/include/uapi/asm/svm.h +++ b/tools/arch/x86/include/uapi/asm/svm.h @@ -29,6 +29,7 @@ #define SVM_EXIT_WRITE_DR6 0x036 #define SVM_EXIT_WRITE_DR7 0x037 #define SVM_EXIT_EXCP_BASE 0x040 +#define SVM_EXIT_LAST_EXCP 0x05f #define SVM_EXIT_INTR 0x060 #define SVM_EXIT_NMI 0x061 #define SVM_EXIT_SMI 0x062 @@ -76,10 +77,21 @@ #define SVM_EXIT_MWAIT_COND 0x08c #define SVM_EXIT_XSETBV 0x08d #define SVM_EXIT_RDPRU 0x08e +#define SVM_EXIT_INVPCID 0x0a2 #define SVM_EXIT_NPF 0x400 #define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402 +/* SEV-ES software-defined VMGEXIT events */ +#define SVM_VMGEXIT_MMIO_READ 0x80000001 +#define SVM_VMGEXIT_MMIO_WRITE 0x80000002 +#define SVM_VMGEXIT_NMI_COMPLETE 0x80000003 +#define SVM_VMGEXIT_AP_HLT_LOOP 0x80000004 +#define SVM_VMGEXIT_AP_JUMP_TABLE 0x80000005 +#define SVM_VMGEXIT_SET_AP_JUMP_TABLE 0 +#define SVM_VMGEXIT_GET_AP_JUMP_TABLE 1 +#define SVM_VMGEXIT_UNSUPPORTED_EVENT 0x8000ffff + #define SVM_EXIT_ERR -1 #define SVM_EXIT_REASONS \ @@ -171,6 +183,7 @@ { SVM_EXIT_MONITOR, "monitor" }, \ { SVM_EXIT_MWAIT, "mwait" }, \ { SVM_EXIT_XSETBV, "xsetbv" }, \ + { SVM_EXIT_INVPCID, "invpcid" }, \ { SVM_EXIT_NPF, "npf" }, \ { SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \ { SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \ diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c index a04e81321c66..464873883396 100644 --- a/tools/build/feature/test-all.c +++ b/tools/build/feature/test-all.c @@ -185,7 +185,6 @@ int main(int argc, char *argv[]) main_test_libperl(); main_test_hello(); main_test_libelf(); - main_test_libelf_mmap(); main_test_get_current_dir_name(); main_test_gettid(); main_test_glibc(); diff --git a/tools/include/linux/compiler-gcc.h b/tools/include/linux/compiler-gcc.h index b9d4322e1e65..95c072b70d0e 100644 --- a/tools/include/linux/compiler-gcc.h +++ b/tools/include/linux/compiler-gcc.h @@ -27,18 +27,6 @@ #define __pure __attribute__((pure)) #endif #define noinline __attribute__((noinline)) -#ifdef __has_attribute -#if __has_attribute(disable_tail_calls) -#define __no_tail_call __attribute__((disable_tail_calls)) -#endif -#endif -#ifndef __no_tail_call -#if GCC_VERSION > 40201 -#define __no_tail_call __attribute__((optimize("no-optimize-sibling-calls"))) -#else -#define __no_tail_call -#endif -#endif #ifndef __packed #define __packed __attribute__((packed)) #endif diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 2b3f7353e891..d22a974372c0 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -47,9 +47,6 @@ #ifndef noinline #define noinline #endif -#ifndef __no_tail_call -#define __no_tail_call -#endif /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h index f2b5d72a46c2..2056318988f7 100644 --- a/tools/include/uapi/asm-generic/unistd.h +++ b/tools/include/uapi/asm-generic/unistd.h @@ -857,9 +857,11 @@ __SYSCALL(__NR_openat2, sys_openat2) __SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd) #define __NR_faccessat2 439 __SYSCALL(__NR_faccessat2, sys_faccessat2) +#define __NR_process_madvise 440 +__SYSCALL(__NR_process_madvise, sys_process_madvise) #undef __NR_syscalls -#define __NR_syscalls 440 +#define __NR_syscalls 441 /* * 32 bit systems traditionally used different diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h index 00546062e023..fa1f3d62f9a6 100644 --- a/tools/include/uapi/drm/i915_drm.h +++ b/tools/include/uapi/drm/i915_drm.h @@ -619,6 +619,12 @@ typedef struct drm_i915_irq_wait { */ #define I915_PARAM_PERF_REVISION 54 +/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of + * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See + * I915_EXEC_USE_EXTENSIONS. + */ +#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55 + /* Must be kept compact -- no holes and well documented */ typedef struct drm_i915_getparam { @@ -1046,6 +1052,38 @@ struct drm_i915_gem_exec_fence { __u32 flags; }; +/** + * See drm_i915_gem_execbuffer_ext_timeline_fences. + */ +#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0 + +/** + * This structure describes an array of drm_syncobj and associated points for + * timeline variants of drm_syncobj. It is invalid to append this structure to + * the execbuf if I915_EXEC_FENCE_ARRAY is set. + */ +struct drm_i915_gem_execbuffer_ext_timeline_fences { + struct i915_user_extension base; + + /** + * Number of element in the handles_ptr & value_ptr arrays. + */ + __u64 fence_count; + + /** + * Pointer to an array of struct drm_i915_gem_exec_fence of length + * fence_count. + */ + __u64 handles_ptr; + + /** + * Pointer to an array of u64 values of length fence_count. Values + * must be 0 for a binary drm_syncobj. A Value of 0 for a timeline + * drm_syncobj is invalid as it turns a drm_syncobj into a binary one. + */ + __u64 values_ptr; +}; + struct drm_i915_gem_execbuffer2 { /** * List of gem_exec_object2 structs @@ -1062,8 +1100,14 @@ struct drm_i915_gem_execbuffer2 { __u32 num_cliprects; /** * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY - * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a - * struct drm_i915_gem_exec_fence *fences. + * & I915_EXEC_USE_EXTENSIONS are not set. + * + * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array + * of struct drm_i915_gem_exec_fence and num_cliprects is the length + * of the array. + * + * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a + * single struct i915_user_extension and num_cliprects is 0. */ __u64 cliprects_ptr; #define I915_EXEC_RING_MASK (0x3f) @@ -1181,7 +1225,16 @@ struct drm_i915_gem_execbuffer2 { */ #define I915_EXEC_FENCE_SUBMIT (1 << 20) -#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SUBMIT << 1)) +/* + * Setting I915_EXEC_USE_EXTENSIONS implies that + * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked + * list of i915_user_extension. Each i915_user_extension node is the base of a + * larger structure. The list of supported structures are listed in the + * drm_i915_gem_execbuffer_ext enum. + */ +#define I915_EXEC_USE_EXTENSIONS (1 << 21) + +#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1)) #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff) #define i915_execbuffer2_set_context_id(eb2, context) \ diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h index 7875709ccfeb..e5de60336938 100644 --- a/tools/include/uapi/linux/fscrypt.h +++ b/tools/include/uapi/linux/fscrypt.h @@ -45,7 +45,6 @@ struct fscrypt_policy_v1 { __u8 flags; __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; }; -#define fscrypt_policy fscrypt_policy_v1 /* * Process-subscribed "logon" key description prefix and payload format. @@ -156,9 +155,9 @@ struct fscrypt_get_key_status_arg { __u32 __out_reserved[13]; }; -#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy) +#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy_v1) #define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16]) -#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy) +#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy_v1) #define FS_IOC_GET_ENCRYPTION_POLICY_EX _IOWR('f', 22, __u8[9]) /* size + version */ #define FS_IOC_ADD_ENCRYPTION_KEY _IOWR('f', 23, struct fscrypt_add_key_arg) #define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg) @@ -170,6 +169,7 @@ struct fscrypt_get_key_status_arg { /* old names; don't add anything new here! */ #ifndef __KERNEL__ +#define fscrypt_policy fscrypt_policy_v1 #define FS_KEY_DESCRIPTOR_SIZE FSCRYPT_KEY_DESCRIPTOR_SIZE #define FS_POLICY_FLAGS_PAD_4 FSCRYPT_POLICY_FLAGS_PAD_4 #define FS_POLICY_FLAGS_PAD_8 FSCRYPT_POLICY_FLAGS_PAD_8 diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 7d8eced6f459..ca41220b40b8 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -248,6 +248,8 @@ struct kvm_hyperv_exit { #define KVM_EXIT_IOAPIC_EOI 26 #define KVM_EXIT_HYPERV 27 #define KVM_EXIT_ARM_NISV 28 +#define KVM_EXIT_X86_RDMSR 29 +#define KVM_EXIT_X86_WRMSR 30 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -413,6 +415,17 @@ struct kvm_run { __u64 esr_iss; __u64 fault_ipa; } arm_nisv; + /* KVM_EXIT_X86_RDMSR / KVM_EXIT_X86_WRMSR */ + struct { + __u8 error; /* user -> kernel */ + __u8 pad[7]; +#define KVM_MSR_EXIT_REASON_INVAL (1 << 0) +#define KVM_MSR_EXIT_REASON_UNKNOWN (1 << 1) +#define KVM_MSR_EXIT_REASON_FILTER (1 << 2) + __u32 reason; /* kernel -> user */ + __u32 index; /* kernel -> user */ + __u64 data; /* kernel <-> user */ + } msr; /* Fix the size of the union. */ char padding[256]; }; @@ -1037,6 +1050,9 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_SMALLER_MAXPHYADDR 185 #define KVM_CAP_S390_DIAG318 186 #define KVM_CAP_STEAL_TIME 187 +#define KVM_CAP_X86_USER_SPACE_MSR 188 +#define KVM_CAP_X86_MSR_FILTER 189 +#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190 #ifdef KVM_CAP_IRQ_ROUTING @@ -1538,6 +1554,9 @@ struct kvm_pv_cmd { /* Available with KVM_CAP_S390_PROTECTED */ #define KVM_S390_PV_COMMAND _IOWR(KVMIO, 0xc5, struct kvm_pv_cmd) +/* Available with KVM_CAP_X86_MSR_FILTER */ +#define KVM_X86_SET_MSR_FILTER _IOW(KVMIO, 0xc6, struct kvm_msr_filter) + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ diff --git a/tools/include/uapi/linux/mman.h b/tools/include/uapi/linux/mman.h index 923cc162609c..f55bc680b5b0 100644 --- a/tools/include/uapi/linux/mman.h +++ b/tools/include/uapi/linux/mman.h @@ -27,6 +27,7 @@ #define MAP_HUGE_SHIFT HUGETLB_FLAG_ENCODE_SHIFT #define MAP_HUGE_MASK HUGETLB_FLAG_ENCODE_MASK +#define MAP_HUGE_16KB HUGETLB_FLAG_ENCODE_16KB #define MAP_HUGE_64KB HUGETLB_FLAG_ENCODE_64KB #define MAP_HUGE_512KB HUGETLB_FLAG_ENCODE_512KB #define MAP_HUGE_1MB HUGETLB_FLAG_ENCODE_1MB diff --git a/tools/include/uapi/linux/mount.h b/tools/include/uapi/linux/mount.h index 96a0240f23fe..dd8306ea336c 100644 --- a/tools/include/uapi/linux/mount.h +++ b/tools/include/uapi/linux/mount.h @@ -16,6 +16,7 @@ #define MS_REMOUNT 32 /* Alter flags of a mounted FS */ #define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */ #define MS_DIRSYNC 128 /* Directory modifications are synchronous */ +#define MS_NOSYMFOLLOW 256 /* Do not follow symlinks */ #define MS_NOATIME 1024 /* Do not update access times. */ #define MS_NODIRATIME 2048 /* Do not update directory access times */ #define MS_BIND 4096 diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 3e5dcdd48a49..b95d3c485d27 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1196,7 +1196,7 @@ union perf_mem_data_src { #define PERF_MEM_SNOOPX_FWD 0x01 /* forward */ /* 1 free */ -#define PERF_MEM_SNOOPX_SHIFT 38 +#define PERF_MEM_SNOOPX_SHIFT 38 /* locked instruction */ #define PERF_MEM_LOCK_NA 0x01 /* not available */ diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index 07b4f8131e36..7f0827705c9a 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -233,6 +233,15 @@ struct prctl_mm_map { #define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) +/* MTE tag check fault modes */ +# define PR_MTE_TCF_SHIFT 1 +# define PR_MTE_TCF_NONE (0UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_SYNC (1UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_ASYNC (2UL << PR_MTE_TCF_SHIFT) +# define PR_MTE_TCF_MASK (3UL << PR_MTE_TCF_SHIFT) +/* MTE tag inclusion mask */ +# define PR_MTE_TAG_SHIFT 3 +# define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 diff --git a/tools/include/uapi/linux/vhost.h b/tools/include/uapi/linux/vhost.h index 75232185324a..c998860d7bbc 100644 --- a/tools/include/uapi/linux/vhost.h +++ b/tools/include/uapi/linux/vhost.h @@ -146,4 +146,8 @@ /* Set event fd for config interrupt*/ #define VHOST_VDPA_SET_CONFIG_CALL _IOW(VHOST_VIRTIO, 0x77, int) + +/* Get the valid iova range */ +#define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ + struct vhost_vdpa_iova_range) #endif diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 6890fc4b063a..ce8516e4de34 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -749,6 +749,7 @@ else PERL_EMBED_LIBADD = $(call grep-libs,$(PERL_EMBED_LDOPTS)) PERL_EMBED_CCOPTS = $(shell perl -MExtUtils::Embed -e ccopts 2>/dev/null) PERL_EMBED_CCOPTS := $(filter-out -specs=%,$(PERL_EMBED_CCOPTS)) + PERL_EMBED_CCOPTS := $(filter-out -flto=auto -ffat-lto-objects, $(PERL_EMBED_CCOPTS)) PERL_EMBED_LDOPTS := $(filter-out -specs=%,$(PERL_EMBED_LDOPTS)) FLAGS_PERL_EMBED=$(PERL_EMBED_CCOPTS) $(PERL_EMBED_LDOPTS) diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 347809649ba2..379819244b91 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -361,12 +361,13 @@ 437 common openat2 sys_openat2 438 common pidfd_getfd sys_pidfd_getfd 439 common faccessat2 sys_faccessat2 +440 common process_madvise sys_process_madvise # -# x32-specific system call numbers start at 512 to avoid cache impact -# for native 64-bit operation. The __x32_compat_sys stubs are created -# on-the-fly for compat_sys_*() compatibility system calls if X86_X32 -# is defined. +# Due to a historical design error, certain syscalls are numbered differently +# in x32 as compared to native x86_64. These syscalls have numbers 512-547. +# Do not add new syscalls to this range. Numbers 548 and above are available +# for non-x32 use. # 512 x32 rt_sigaction compat_sys_rt_sigaction 513 x32 rt_sigreturn compat_sys_x32_rt_sigreturn @@ -404,3 +405,5 @@ 545 x32 execveat compat_sys_execveat 546 x32 preadv2 compat_sys_preadv64v2 547 x32 pwritev2 compat_sys_pwritev64v2 +# This is the end of the legacy x32 range. Numbers 548 and above are +# not special and are not to be used for x32-specific syscalls. diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 44a75f234db1..de80534473af 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -4639,9 +4639,9 @@ do_concat: err = 0; if (lists[0]) { - struct option o = OPT_CALLBACK('e', "event", &trace->evlist, "event", - "event selector. use 'perf list' to list available events", - parse_events_option); + struct option o = { + .value = &trace->evlist, + }; err = parse_events_option(&o, lists[0], 0); } out: @@ -4655,9 +4655,12 @@ static int trace__parse_cgroups(const struct option *opt, const char *str, int u { struct trace *trace = opt->value; - if (!list_empty(&trace->evlist->core.entries)) - return parse_cgroups(opt, str, unset); - + if (!list_empty(&trace->evlist->core.entries)) { + struct option o = { + .value = &trace->evlist, + }; + return parse_cgroups(&o, str, unset); + } trace->cgroup = evlist__findnew_cgroup(trace->evlist, str); return 0; diff --git a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json index de3193552277..00f4fcffa815 100644 --- a/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/cascadelakex/clx-metrics.json @@ -329,7 +329,7 @@ }, { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", - "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "MetricExpr": "( ( ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) * 1048576 ) / 1000000000 ) / duration_time", "MetricGroup": "Memory_BW;SoC", "MetricName": "DRAM_BW_Use" }, diff --git a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json index f31794d3b926..0dd8b13b5cfb 100644 --- a/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json +++ b/tools/perf/pmu-events/arch/x86/skylakex/skx-metrics.json @@ -323,7 +323,7 @@ }, { "BriefDescription": "Average external Memory Bandwidth Use for reads and writes [GB / sec]", - "MetricExpr": "( 64 * ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) / 1000000000 ) / duration_time", + "MetricExpr": "( ( ( uncore_imc@cas_count_read@ + uncore_imc@cas_count_write@ ) * 1048576 ) / 1000000000 ) / duration_time", "MetricGroup": "Memory_BW;SoC", "MetricName": "DRAM_BW_Use" }, diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c index 2491d167bf76..83638097c3bc 100644 --- a/tools/perf/tests/dwarf-unwind.c +++ b/tools/perf/tests/dwarf-unwind.c @@ -95,7 +95,7 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) return strcmp((const char *) symbol, funcs[idx]); } -__no_tail_call noinline int test_dwarf_unwind__thread(struct thread *thread) +noinline int test_dwarf_unwind__thread(struct thread *thread) { struct perf_sample sample; unsigned long cnt = 0; @@ -126,7 +126,7 @@ __no_tail_call noinline int test_dwarf_unwind__thread(struct thread *thread) static int global_unwind_retval = -INT_MAX; -__no_tail_call noinline int test_dwarf_unwind__compare(void *p1, void *p2) +noinline int test_dwarf_unwind__compare(void *p1, void *p2) { /* Any possible value should be 'thread' */ struct thread *thread = *(struct thread **)p1; @@ -145,7 +145,7 @@ __no_tail_call noinline int test_dwarf_unwind__compare(void *p1, void *p2) return p1 - p2; } -__no_tail_call noinline int test_dwarf_unwind__krava_3(struct thread *thread) +noinline int test_dwarf_unwind__krava_3(struct thread *thread) { struct thread *array[2] = {thread, thread}; void *fp = &bsearch; @@ -164,12 +164,12 @@ __no_tail_call noinline int test_dwarf_unwind__krava_3(struct thread *thread) return global_unwind_retval; } -__no_tail_call noinline int test_dwarf_unwind__krava_2(struct thread *thread) +noinline int test_dwarf_unwind__krava_2(struct thread *thread) { return test_dwarf_unwind__krava_3(thread); } -__no_tail_call noinline int test_dwarf_unwind__krava_1(struct thread *thread) +noinline int test_dwarf_unwind__krava_1(struct thread *thread) { return test_dwarf_unwind__krava_2(thread); } diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index a07626f07208..b0e1880cf992 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2963,7 +2963,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events, struct popup_action actions[MAX_OPTIONS]; int nr_options = 0; int key = -1; - char buf[64]; + char buf[128]; int delay_secs = hbt ? hbt->refresh : 0; #define HIST_BROWSER_HELP_COMMON \ diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 8763772f1095..6b410c3d52dc 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -102,6 +102,8 @@ int build_id__sprintf(const struct build_id *build_id, char *bf) const u8 *raw = build_id->data; size_t i; + bf[0] = 0x0; + for (i = 0; i < build_id->size; ++i) { sprintf(bid, "%02x", *raw); ++raw; diff --git a/tools/perf/util/hashmap.c b/tools/perf/util/hashmap.c index a405dad068f5..3c20b126d60d 100644 --- a/tools/perf/util/hashmap.c +++ b/tools/perf/util/hashmap.c @@ -15,6 +15,9 @@ /* make sure libbpf doesn't use kernel-only integer typedefs */ #pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 +/* prevent accidental re-addition of reallocarray() */ +#pragma GCC poison reallocarray + /* start with 4 buckets */ #define HASHMAP_MIN_CAP_BITS 2 diff --git a/tools/perf/util/hashmap.h b/tools/perf/util/hashmap.h index e0af36b0e5d8..d9b385fe808c 100644 --- a/tools/perf/util/hashmap.h +++ b/tools/perf/util/hashmap.h @@ -25,6 +25,18 @@ static inline size_t hash_bits(size_t h, int bits) #endif } +/* generic C-string hashing function */ +static inline size_t str_hash(const char *s) +{ + size_t h = 0; + + while (*s) { + h = h * 31 + *s; + s++; + } + return h; +} + typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx); diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 7d4194ffc5b0..15385ea00190 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -786,11 +786,20 @@ static int machine__process_ksymbol_unregister(struct machine *machine, union perf_event *event, struct perf_sample *sample __maybe_unused) { + struct symbol *sym; struct map *map; map = maps__find(&machine->kmaps, event->ksymbol.addr); - if (map) + if (!map) + return 0; + + if (map != machine->vmlinux_map) maps__remove(&machine->kmaps, map); + else { + sym = dso__find_symbol(map->dso, map->map_ip(map, map->start)); + if (sym) + dso__delete_symbol(map->dso, sym); + } return 0; } diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 7cbd024e3e63..c83c2c6564e0 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -1592,7 +1592,6 @@ static void _free_command_line(wchar_t **command_line, int num) static int python_start_script(const char *script, int argc, const char **argv) { struct tables *tables = &tables_global; - PyMODINIT_FUNC (*initfunc)(void); #if PY_MAJOR_VERSION < 3 const char **command_line; #else @@ -1607,20 +1606,18 @@ static int python_start_script(const char *script, int argc, const char **argv) FILE *fp; #if PY_MAJOR_VERSION < 3 - initfunc = initperf_trace_context; command_line = malloc((argc + 1) * sizeof(const char *)); command_line[0] = script; for (i = 1; i < argc + 1; i++) command_line[i] = argv[i - 1]; + PyImport_AppendInittab(name, initperf_trace_context); #else - initfunc = PyInit_perf_trace_context; command_line = malloc((argc + 1) * sizeof(wchar_t *)); command_line[0] = Py_DecodeLocale(script, NULL); for (i = 1; i < argc + 1; i++) command_line[i] = Py_DecodeLocale(argv[i - 1], NULL); + PyImport_AppendInittab(name, PyInit_perf_trace_context); #endif - - PyImport_AppendInittab(name, initfunc); Py_Initialize(); #if PY_MAJOR_VERSION < 3 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 7a5f03764702..098080287c68 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -595,6 +595,7 @@ static void perf_event__mmap2_swap(union perf_event *event, event->mmap2.maj = bswap_32(event->mmap2.maj); event->mmap2.min = bswap_32(event->mmap2.min); event->mmap2.ino = bswap_64(event->mmap2.ino); + event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation); if (sample_id_all) { void *data = &event->mmap2.filename; @@ -710,6 +711,18 @@ static void perf_event__namespaces_swap(union perf_event *event, swap_sample_id_all(event, &event->namespaces.link_info[i]); } +static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all) +{ + event->cgroup.id = bswap_64(event->cgroup.id); + + if (sample_id_all) { + void *data = &event->cgroup.path; + + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); + swap_sample_id_all(event, data); + } +} + static u8 revbyte(u8 b) { int rev = (b >> 4) | ((b & 0xf) << 4); @@ -952,6 +965,7 @@ static perf_event__swap_op perf_event__swap_ops[] = { [PERF_RECORD_SWITCH] = perf_event__switch_swap, [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, + [PERF_RECORD_CGROUP] = perf_event__cgroup_swap, [PERF_RECORD_TEXT_POKE] = perf_event__text_poke_swap, [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 6138866665df..0d14abdf3d72 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -515,6 +515,13 @@ void dso__insert_symbol(struct dso *dso, struct symbol *sym) } } +void dso__delete_symbol(struct dso *dso, struct symbol *sym) +{ + rb_erase_cached(&sym->rb_node, &dso->symbols); + symbol__delete(sym); + dso__reset_find_symbol_cache(dso); +} + struct symbol *dso__find_symbol(struct dso *dso, u64 addr) { if (dso->last_find_result.addr != addr || dso->last_find_result.symbol == NULL) { diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index f4801c488def..954d6a049ee2 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -131,6 +131,8 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map); void dso__insert_symbol(struct dso *dso, struct symbol *sym); +void dso__delete_symbol(struct dso *dso, + struct symbol *sym); struct symbol *dso__find_symbol(struct dso *dso, u64 addr); struct symbol *dso__find_symbol_by_name(struct dso *dso, const char *name); diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py index 8019e3dd4c32..84a1af2581f5 100644 --- a/tools/testing/kunit/kunit_parser.py +++ b/tools/testing/kunit/kunit_parser.py @@ -66,7 +66,6 @@ def isolate_kunit_output(kernel_output): def raw_output(kernel_output): for line in kernel_output: print(line) - yield line DIVIDER = '=' * 60 @@ -242,7 +241,7 @@ def parse_test_suite(lines: List[str], expected_suite_index: int) -> TestSuite: return None test_suite.name = name expected_test_case_num = parse_subtest_plan(lines) - if not expected_test_case_num: + if expected_test_case_num is None: return None while expected_test_case_num > 0: test_case = parse_test_case(lines) diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py index 99c3c5671ea4..0b60855fb819 100755 --- a/tools/testing/kunit/kunit_tool_test.py +++ b/tools/testing/kunit/kunit_tool_test.py @@ -179,7 +179,7 @@ class KUnitParserTest(unittest.TestCase): print_mock = mock.patch('builtins.print').start() result = kunit_parser.parse_run_tests( kunit_parser.isolate_kunit_output(file.readlines())) - print_mock.assert_any_call(StrContains("no kunit output detected")) + print_mock.assert_any_call(StrContains('no tests run!')) print_mock.stop() file.close() @@ -198,39 +198,57 @@ class KUnitParserTest(unittest.TestCase): 'test_data/test_config_printk_time.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + self.assertEqual('kunit-resource-test', result.suites[0].name) def test_ignores_multiple_prefixes(self): prefix_log = get_absolute_path( 'test_data/test_multiple_prefixes.log') with open(prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + self.assertEqual('kunit-resource-test', result.suites[0].name) def test_prefix_mixed_kernel_output(self): mixed_prefix_log = get_absolute_path( 'test_data/test_interrupted_tap_output.log') with open(mixed_prefix_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + self.assertEqual('kunit-resource-test', result.suites[0].name) def test_prefix_poundsign(self): pound_log = get_absolute_path('test_data/test_pound_sign.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + self.assertEqual('kunit-resource-test', result.suites[0].name) def test_kernel_panic_end(self): panic_log = get_absolute_path('test_data/test_kernel_panic_interrupt.log') with open(panic_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual( + kunit_parser.TestStatus.TEST_CRASHED, + result.status) + self.assertEqual('kunit-resource-test', result.suites[0].name) def test_pound_no_prefix(self): pound_log = get_absolute_path('test_data/test_pound_no_prefix.log') with open(pound_log) as file: result = kunit_parser.parse_run_tests(file.readlines()) - self.assertEqual('kunit-resource-test', result.suites[0].name) + self.assertEqual( + kunit_parser.TestStatus.SUCCESS, + result.status) + self.assertEqual('kunit-resource-test', result.suites[0].name) class KUnitJsonTest(unittest.TestCase): diff --git a/tools/testing/kunit/test_data/test_config_printk_time.log b/tools/testing/kunit/test_data/test_config_printk_time.log index c02ca773946d..6bdb57f76eac 100644 --- a/tools/testing/kunit/test_data/test_config_printk_time.log +++ b/tools/testing/kunit/test_data/test_config_printk_time.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources @@ -28,4 +29,4 @@ [ 0.060000] Stack: [ 0.060000] 602086f8 601bc260 705c0000 705c0000 [ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_interrupted_tap_output.log b/tools/testing/kunit/test_data/test_interrupted_tap_output.log index 5c73fb3a1c6f..1fb677728abe 100644 --- a/tools/testing/kunit/test_data/test_interrupted_tap_output.log +++ b/tools/testing/kunit/test_data/test_interrupted_tap_output.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources @@ -34,4 +35,4 @@ [ 0.060000] Stack: [ 0.060000] 602086f8 601bc260 705c0000 705c0000 [ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log index c045eee75f27..a014ffe9725e 100644 --- a/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log +++ b/tools/testing/kunit/test_data/test_kernel_panic_interrupt.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources @@ -22,4 +23,4 @@ [ 0.060000] Stack: [ 0.060000] 602086f8 601bc260 705c0000 705c0000 [ 0.060000] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_multiple_prefixes.log b/tools/testing/kunit/test_data/test_multiple_prefixes.log index bc48407dcc36..0ad78481a0b4 100644 --- a/tools/testing/kunit/test_data/test_multiple_prefixes.log +++ b/tools/testing/kunit/test_data/test_multiple_prefixes.log @@ -1,6 +1,7 @@ [ 0.060000][ T1] printk: console [mc-1] enabled [ 0.060000][ T1] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000][ T1] TAP version 14 +[ 0.060000][ T1] 1..3 [ 0.060000][ T1] # Subtest: kunit-resource-test [ 0.060000][ T1] 1..5 [ 0.060000][ T1] ok 1 - kunit_resource_test_init_resources @@ -28,4 +29,4 @@ [ 0.060000][ T1] Stack: [ 0.060000][ T1] 602086f8 601bc260 705c0000 705c0000 [ 0.060000][ T1] 602086f8 6005fcec 705c0000 6002c6ab -[ 0.060000][ T1] 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file +[ 0.060000][ T1] 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_pound_no_prefix.log b/tools/testing/kunit/test_data/test_pound_no_prefix.log index 2ceb360be7d5..dc4cf09a96d0 100644 --- a/tools/testing/kunit/test_data/test_pound_no_prefix.log +++ b/tools/testing/kunit/test_data/test_pound_no_prefix.log @@ -1,6 +1,7 @@ printk: console [mc-1] enabled random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 TAP version 14 + 1..3 # Subtest: kunit-resource-test 1..5 ok 1 - kunit_resource_test_init_resources @@ -30,4 +31,4 @@ Stack: 602086f8 601bc260 705c0000 705c0000 602086f8 6005fcec 705c0000 6002c6ab - 6005fcec 601bc260 705c0000 3000000010
\ No newline at end of file + 6005fcec 601bc260 705c0000 3000000010 diff --git a/tools/testing/kunit/test_data/test_pound_sign.log b/tools/testing/kunit/test_data/test_pound_sign.log index 28ffa5ba03bf..3f358e3a7ba0 100644 --- a/tools/testing/kunit/test_data/test_pound_sign.log +++ b/tools/testing/kunit/test_data/test_pound_sign.log @@ -1,6 +1,7 @@ [ 0.060000] printk: console [mc-1] enabled [ 0.060000] random: get_random_bytes called from init_oops_id+0x35/0x40 with crng_init=0 [ 0.060000] TAP version 14 +[ 0.060000] 1..3 [ 0.060000] # Subtest: kunit-resource-test [ 0.060000] 1..5 [ 0.060000] ok 1 - kunit_resource_test_init_resources diff --git a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c index 55bd387ce7ec..52d3f0364bda 100644 --- a/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c +++ b/tools/testing/selftests/clone3/clone3_cap_checkpoint_restore.c @@ -145,7 +145,7 @@ TEST(clone3_cap_checkpoint_restore) test_clone3_supported(); EXPECT_EQ(getuid(), 0) - XFAIL(return, "Skipping all tests as non-root\n"); + SKIP(return, "Skipping all tests as non-root"); memset(&set_tid, 0, sizeof(set_tid)); diff --git a/tools/testing/selftests/core/close_range_test.c b/tools/testing/selftests/core/close_range_test.c index c99b98b0d461..575b391ddc78 100644 --- a/tools/testing/selftests/core/close_range_test.c +++ b/tools/testing/selftests/core/close_range_test.c @@ -44,7 +44,7 @@ TEST(close_range) fd = open("/dev/null", O_RDONLY | O_CLOEXEC); ASSERT_GE(fd, 0) { if (errno == ENOENT) - XFAIL(return, "Skipping test since /dev/null does not exist"); + SKIP(return, "Skipping test since /dev/null does not exist"); } open_fds[i] = fd; @@ -52,7 +52,7 @@ TEST(close_range) EXPECT_EQ(-1, sys_close_range(open_fds[0], open_fds[100], -1)) { if (errno == ENOSYS) - XFAIL(return, "close_range() syscall not supported"); + SKIP(return, "close_range() syscall not supported"); } EXPECT_EQ(0, sys_close_range(open_fds[0], open_fds[50], 0)); @@ -108,7 +108,7 @@ TEST(close_range_unshare) fd = open("/dev/null", O_RDONLY | O_CLOEXEC); ASSERT_GE(fd, 0) { if (errno == ENOENT) - XFAIL(return, "Skipping test since /dev/null does not exist"); + SKIP(return, "Skipping test since /dev/null does not exist"); } open_fds[i] = fd; @@ -197,7 +197,7 @@ TEST(close_range_unshare_capped) fd = open("/dev/null", O_RDONLY | O_CLOEXEC); ASSERT_GE(fd, 0) { if (errno == ENOENT) - XFAIL(return, "Skipping test since /dev/null does not exist"); + SKIP(return, "Skipping test since /dev/null does not exist"); } open_fds[i] = fd; diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c index 1d27f52c61e6..477cbb042f5b 100644 --- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c +++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c @@ -74,7 +74,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata) ret = mount(NULL, binderfs_mntpt, "binder", 0, 0); EXPECT_EQ(ret, 0) { if (errno == ENODEV) - XFAIL(goto out, "binderfs missing"); + SKIP(goto out, "binderfs missing"); TH_LOG("%s - Failed to mount binderfs", strerror(errno)); goto rmdir; } @@ -475,10 +475,10 @@ TEST(binderfs_stress) TEST(binderfs_test_privileged) { if (geteuid() != 0) - XFAIL(return, "Tests are not run as root. Skipping privileged tests"); + SKIP(return, "Tests are not run as root. Skipping privileged tests"); if (__do_binderfs_test(_metadata)) - XFAIL(return, "The Android binderfs filesystem is not available"); + SKIP(return, "The Android binderfs filesystem is not available"); } TEST(binderfs_test_unprivileged) @@ -511,7 +511,7 @@ TEST(binderfs_test_unprivileged) ret = wait_for_pid(pid); if (ret) { if (ret == 2) - XFAIL(return, "The Android binderfs filesystem is not available"); + SKIP(return, "The Android binderfs filesystem is not available"); ASSERT_EQ(ret, 0) { TH_LOG("wait_for_pid() failed"); } diff --git a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c index d979ff14775a..8f82f99f7748 100644 --- a/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c +++ b/tools/testing/selftests/filesystems/epoll/epoll_wakeup_test.c @@ -3282,4 +3282,99 @@ TEST(epoll60) close(ctx.epfd); } +struct epoll61_ctx { + int epfd; + int evfd; +}; + +static void *epoll61_write_eventfd(void *ctx_) +{ + struct epoll61_ctx *ctx = ctx_; + int64_t l = 1; + + usleep(10950); + write(ctx->evfd, &l, sizeof(l)); + return NULL; +} + +static void *epoll61_epoll_with_timeout(void *ctx_) +{ + struct epoll61_ctx *ctx = ctx_; + struct epoll_event events[1]; + int n; + + n = epoll_wait(ctx->epfd, events, 1, 11); + /* + * If epoll returned the eventfd, write on the eventfd to wake up the + * blocking poller. + */ + if (n == 1) { + int64_t l = 1; + + write(ctx->evfd, &l, sizeof(l)); + } + return NULL; +} + +static void *epoll61_blocking_epoll(void *ctx_) +{ + struct epoll61_ctx *ctx = ctx_; + struct epoll_event events[1]; + + epoll_wait(ctx->epfd, events, 1, -1); + return NULL; +} + +TEST(epoll61) +{ + struct epoll61_ctx ctx; + struct epoll_event ev; + int i, r; + + ctx.epfd = epoll_create1(0); + ASSERT_GE(ctx.epfd, 0); + ctx.evfd = eventfd(0, EFD_NONBLOCK); + ASSERT_GE(ctx.evfd, 0); + + ev.events = EPOLLIN | EPOLLET | EPOLLERR | EPOLLHUP; + ev.data.ptr = NULL; + r = epoll_ctl(ctx.epfd, EPOLL_CTL_ADD, ctx.evfd, &ev); + ASSERT_EQ(r, 0); + + /* + * We are testing a race. Repeat the test case 1000 times to make it + * more likely to fail in case of a bug. + */ + for (i = 0; i < 1000; i++) { + pthread_t threads[3]; + int n; + + /* + * Start 3 threads: + * Thread 1 sleeps for 10.9ms and writes to the evenfd. + * Thread 2 calls epoll with a timeout of 11ms. + * Thread 3 calls epoll with a timeout of -1. + * + * The eventfd write by Thread 1 should either wakeup Thread 2 + * or Thread 3. If it wakes up Thread 2, Thread 2 writes on the + * eventfd to wake up Thread 3. + * + * If no events are missed, all three threads should eventually + * be joinable. + */ + ASSERT_EQ(pthread_create(&threads[0], NULL, + epoll61_write_eventfd, &ctx), 0); + ASSERT_EQ(pthread_create(&threads[1], NULL, + epoll61_epoll_with_timeout, &ctx), 0); + ASSERT_EQ(pthread_create(&threads[2], NULL, + epoll61_blocking_epoll, &ctx), 0); + + for (n = 0; n < ARRAY_SIZE(threads); ++n) + ASSERT_EQ(pthread_join(threads[n], NULL), 0); + } + + close(ctx.epfd); + close(ctx.evfd); +} + TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc index 3bcd4c3624ee..b4da41d126d5 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/add_remove_kprobe.tc @@ -6,7 +6,7 @@ echo 0 > events/enable echo > dynamic_events -PLACE=kernel_clone +PLACE=$FUNCTION_FORK echo "p:myevent1 $PLACE" >> dynamic_events echo "r:myevent2 $PLACE" >> dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc b/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc index 438961971b7e..3a0e2885fff5 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/clear_select_events.tc @@ -6,7 +6,7 @@ echo 0 > events/enable echo > dynamic_events -PLACE=kernel_clone +PLACE=$FUNCTION_FORK setup_events() { echo "p:myevent1 $PLACE" >> dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc b/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc index a8603bd23e0d..d3e138e8377f 100644 --- a/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc +++ b/tools/testing/selftests/ftrace/test.d/dynevent/generic_clear_event.tc @@ -6,7 +6,7 @@ echo 0 > events/enable echo > dynamic_events -PLACE=kernel_clone +PLACE=$FUNCTION_FORK setup_events() { echo "p:myevent1 $PLACE" >> dynamic_events diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc index acb17ce543d2..80541964b927 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-notrace-pid.tc @@ -39,7 +39,7 @@ do_test() { disable_tracing echo do_execve* > set_ftrace_filter - echo *do_fork >> set_ftrace_filter + echo $FUNCTION_FORK >> set_ftrace_filter echo $PID > set_ftrace_notrace_pid echo function > current_tracer diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc index 9f0a9687c773..2f7211254529 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc @@ -39,7 +39,7 @@ do_test() { disable_tracing echo do_execve* > set_ftrace_filter - echo *do_fork >> set_ftrace_filter + echo $FUNCTION_FORK >> set_ftrace_filter echo $PID > set_ftrace_pid echo function > current_tracer diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc index 98305d76bd04..191d116b7883 100644 --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-stacktrace.tc @@ -4,9 +4,9 @@ # requires: set_ftrace_filter # flags: instance -echo kernel_clone:stacktrace >> set_ftrace_filter +echo $FUNCTION_FORK:stacktrace >> set_ftrace_filter -grep -q "kernel_clone:stacktrace:unlimited" set_ftrace_filter +grep -q "$FUNCTION_FORK:stacktrace:unlimited" set_ftrace_filter (echo "forked"; sleep 1) diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions index c5dec55b7d95..a6fac927ee82 100644 --- a/tools/testing/selftests/ftrace/test.d/functions +++ b/tools/testing/selftests/ftrace/test.d/functions @@ -133,6 +133,13 @@ yield() { ping $LOCALHOST -c 1 || sleep .001 || usleep 1 || sleep 1 } +# The fork function in the kernel was renamed from "_do_fork" to +# "kernel_fork". As older tests should still work with older kernels +# as well as newer kernels, check which version of fork is used on this +# kernel so that the tests can use the fork function for the running kernel. +FUNCTION_FORK=`(if grep '\bkernel_clone\b' /proc/kallsyms > /dev/null; then + echo kernel_clone; else echo '_do_fork'; fi)` + # Since probe event command may include backslash, explicitly use printf "%s" # to NOT interpret it. ftrace_errlog_check() { # err-prefix command-with-error-pos-by-^ command-file diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc b/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc index 9737cd0578a7..2428a3ed78c9 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/add_and_remove.tc @@ -3,7 +3,7 @@ # description: Kprobe dynamic event - adding and removing # requires: kprobe_events -echo p:myevent kernel_clone > kprobe_events +echo p:myevent $FUNCTION_FORK > kprobe_events grep myevent kprobe_events test -d events/kprobes/myevent echo > kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc index f9a40af76888..010a8b1d6c1d 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/busy_check.tc @@ -3,7 +3,7 @@ # description: Kprobe dynamic event - busy event check # requires: kprobe_events -echo p:myevent kernel_clone > kprobe_events +echo p:myevent $FUNCTION_FORK > kprobe_events test -d events/kprobes/myevent echo 1 > events/kprobes/myevent/enable echo > kprobe_events && exit_fail # this must fail diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc index eb543d3cfe5f..a96a1dc7014f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args.tc @@ -3,13 +3,13 @@ # description: Kprobe dynamic event with arguments # requires: kprobe_events -echo 'p:testprobe kernel_clone $stack $stack0 +0($stack)' > kprobe_events +echo "p:testprobe $FUNCTION_FORK \$stack \$stack0 +0(\$stack)" > kprobe_events grep testprobe kprobe_events | grep -q 'arg1=\$stack arg2=\$stack0 arg3=+0(\$stack)' test -d events/kprobes/testprobe echo 1 > events/kprobes/testprobe/enable ( echo "forked") -grep testprobe trace | grep 'kernel_clone' | \ +grep testprobe trace | grep "$FUNCTION_FORK" | \ grep -q 'arg1=0x[[:xdigit:]]* arg2=0x[[:xdigit:]]* arg3=0x[[:xdigit:]]*$' echo 0 > events/kprobes/testprobe/enable diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc index 4e5b63be51c9..a053ee2e7d77 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_comm.tc @@ -5,7 +5,7 @@ grep -A1 "fetcharg:" README | grep -q "\$comm" || exit_unsupported # this is too old -echo 'p:testprobe kernel_clone comm=$comm ' > kprobe_events +echo "p:testprobe $FUNCTION_FORK comm=\$comm " > kprobe_events grep testprobe kprobe_events | grep -q 'comm=$comm' test -d events/kprobes/testprobe diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index a1d70588ab21..84285a6f60b0 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -30,13 +30,13 @@ esac : "Test get argument (1)" echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):string" > kprobe_events echo 1 > events/kprobes/testprobe/enable -echo "p:test kernel_clone" >> kprobe_events +echo "p:test $FUNCTION_FORK" >> kprobe_events grep -qe "testprobe.* arg1=\"test\"" trace echo 0 > events/kprobes/testprobe/enable : "Test get argument (2)" echo "p:testprobe tracefs_create_dir arg1=+0(${ARG1}):string arg2=+0(${ARG1}):string" > kprobe_events echo 1 > events/kprobes/testprobe/enable -echo "p:test kernel_clone" >> kprobe_events +echo "p:test $FUNCTION_FORK" >> kprobe_events grep -qe "testprobe.* arg1=\"test\" arg2=\"test\"" trace diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc index bd25dd0ba0d0..717130ed4feb 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_symbol.tc @@ -14,12 +14,12 @@ elif ! grep "$SYMBOL\$" /proc/kallsyms; then fi : "Test get basic types symbol argument" -echo "p:testprobe_u kernel_clone arg1=@linux_proc_banner:u64 arg2=@linux_proc_banner:u32 arg3=@linux_proc_banner:u16 arg4=@linux_proc_banner:u8" > kprobe_events -echo "p:testprobe_s kernel_clone arg1=@linux_proc_banner:s64 arg2=@linux_proc_banner:s32 arg3=@linux_proc_banner:s16 arg4=@linux_proc_banner:s8" >> kprobe_events +echo "p:testprobe_u $FUNCTION_FORK arg1=@linux_proc_banner:u64 arg2=@linux_proc_banner:u32 arg3=@linux_proc_banner:u16 arg4=@linux_proc_banner:u8" > kprobe_events +echo "p:testprobe_s $FUNCTION_FORK arg1=@linux_proc_banner:s64 arg2=@linux_proc_banner:s32 arg3=@linux_proc_banner:s16 arg4=@linux_proc_banner:s8" >> kprobe_events if grep -q "x8/16/32/64" README; then - echo "p:testprobe_x kernel_clone arg1=@linux_proc_banner:x64 arg2=@linux_proc_banner:x32 arg3=@linux_proc_banner:x16 arg4=@linux_proc_banner:x8" >> kprobe_events + echo "p:testprobe_x $FUNCTION_FORK arg1=@linux_proc_banner:x64 arg2=@linux_proc_banner:x32 arg3=@linux_proc_banner:x16 arg4=@linux_proc_banner:x8" >> kprobe_events fi -echo "p:testprobe_bf kernel_clone arg1=@linux_proc_banner:b8@4/32" >> kprobe_events +echo "p:testprobe_bf $FUNCTION_FORK arg1=@linux_proc_banner:b8@4/32" >> kprobe_events echo 1 > events/kprobes/enable (echo "forked") echo 0 > events/kprobes/enable @@ -27,7 +27,7 @@ grep "testprobe_[usx]:.* arg1=.* arg2=.* arg3=.* arg4=.*" trace grep "testprobe_bf:.* arg1=.*" trace : "Test get string symbol argument" -echo "p:testprobe_str kernel_clone arg1=@linux_proc_banner:string" > kprobe_events +echo "p:testprobe_str $FUNCTION_FORK arg1=@linux_proc_banner:string" > kprobe_events echo 1 > events/kprobes/enable (echo "forked") echo 0 > events/kprobes/enable diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc index 91fcce1c241c..25b7708eb559 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc @@ -4,7 +4,7 @@ # requires: kprobe_events "x8/16/32/64":README gen_event() { # Bitsize - echo "p:testprobe kernel_clone \$stack0:s$1 \$stack0:u$1 \$stack0:x$1 \$stack0:b4@4/$1" + echo "p:testprobe $FUNCTION_FORK \$stack0:s$1 \$stack0:u$1 \$stack0:x$1 \$stack0:b4@4/$1" } check_types() { # s-type u-type x-type bf-type width diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc index a30a9c07290d..d25d01a19778 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_user.tc @@ -9,12 +9,16 @@ grep -A10 "fetcharg:" README | grep -q '\[u\]<offset>' || exit_unsupported :;: "user-memory access syntax and ustring working on user memory";: echo 'p:myevent do_sys_open path=+0($arg2):ustring path2=+u0($arg2):string' \ > kprobe_events +echo 'p:myevent2 do_sys_openat2 path=+0($arg2):ustring path2=+u0($arg2):string' \ + >> kprobe_events grep myevent kprobe_events | \ grep -q 'path=+0($arg2):ustring path2=+u0($arg2):string' echo 1 > events/kprobes/myevent/enable +echo 1 > events/kprobes/myevent2/enable echo > /dev/null echo 0 > events/kprobes/myevent/enable +echo 0 > events/kprobes/myevent2/enable grep myevent trace | grep -q 'path="/dev/null" path2="/dev/null"' diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc index 0d179094191f..5556292601a4 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_ftrace.tc @@ -5,29 +5,29 @@ # prepare echo nop > current_tracer -echo kernel_clone > set_ftrace_filter -echo 'p:testprobe kernel_clone' > kprobe_events +echo $FUNCTION_FORK > set_ftrace_filter +echo "p:testprobe $FUNCTION_FORK" > kprobe_events # kprobe on / ftrace off echo 1 > events/kprobes/testprobe/enable echo > trace ( echo "forked") grep testprobe trace -! grep 'kernel_clone <-' trace +! grep "$FUNCTION_FORK <-" trace # kprobe on / ftrace on echo function > current_tracer echo > trace ( echo "forked") grep testprobe trace -grep 'kernel_clone <-' trace +grep "$FUNCTION_FORK <-" trace # kprobe off / ftrace on echo 0 > events/kprobes/testprobe/enable echo > trace ( echo "forked") ! grep testprobe trace -grep 'kernel_clone <-' trace +grep "$FUNCTION_FORK <-" trace # kprobe on / ftrace on echo 1 > events/kprobes/testprobe/enable @@ -35,11 +35,11 @@ echo function > current_tracer echo > trace ( echo "forked") grep testprobe trace -grep 'kernel_clone <-' trace +grep "$FUNCTION_FORK <-" trace # kprobe on / ftrace off echo nop > current_tracer echo > trace ( echo "forked") grep testprobe trace -! grep 'kernel_clone <-' trace +! grep "$FUNCTION_FORK <-" trace diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc index 45d90b6c763d..f0d5b7777ed7 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_multiprobe.tc @@ -4,7 +4,7 @@ # requires: kprobe_events "Create/append/":README # Choose 2 symbols for target -SYM1=kernel_clone +SYM1=$FUNCTION_FORK SYM2=do_exit EVENT_NAME=kprobes/testevent diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc index 1b5550ef8a9b..fa928b431555 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc @@ -86,15 +86,15 @@ esac # multiprobe errors if grep -q "Create/append/" README && grep -q "imm-value" README; then -echo 'p:kprobes/testevent kernel_clone' > kprobe_events +echo "p:kprobes/testevent $FUNCTION_FORK" > kprobe_events check_error '^r:kprobes/testevent do_exit' # DIFF_PROBE_TYPE # Explicitly use printf "%s" to not interpret \1 -printf "%s" 'p:kprobes/testevent kernel_clone abcd=\1' > kprobe_events -check_error 'p:kprobes/testevent kernel_clone ^bcd=\1' # DIFF_ARG_TYPE -check_error 'p:kprobes/testevent kernel_clone ^abcd=\1:u8' # DIFF_ARG_TYPE -check_error 'p:kprobes/testevent kernel_clone ^abcd=\"foo"' # DIFF_ARG_TYPE -check_error '^p:kprobes/testevent kernel_clone abcd=\1' # SAME_PROBE +printf "%s" "p:kprobes/testevent $FUNCTION_FORK abcd=\\1" > kprobe_events +check_error "p:kprobes/testevent $FUNCTION_FORK ^bcd=\\1" # DIFF_ARG_TYPE +check_error "p:kprobes/testevent $FUNCTION_FORK ^abcd=\\1:u8" # DIFF_ARG_TYPE +check_error "p:kprobes/testevent $FUNCTION_FORK ^abcd=\\\"foo\"" # DIFF_ARG_TYPE +check_error "^p:kprobes/testevent $FUNCTION_FORK abcd=\\1" # SAME_PROBE fi # %return suffix errors diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc index 7ae492c204a4..197cc2afd404 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kretprobe_args.tc @@ -4,14 +4,14 @@ # requires: kprobe_events # Add new kretprobe event -echo 'r:testprobe2 kernel_clone $retval' > kprobe_events +echo "r:testprobe2 $FUNCTION_FORK \$retval" > kprobe_events grep testprobe2 kprobe_events | grep -q 'arg1=\$retval' test -d events/kprobes/testprobe2 echo 1 > events/kprobes/testprobe2/enable ( echo "forked") -cat trace | grep testprobe2 | grep -q '<- kernel_clone' +cat trace | grep testprobe2 | grep -q "<- $FUNCTION_FORK" echo 0 > events/kprobes/testprobe2/enable echo '-:testprobe2' >> kprobe_events diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc index c4093fc1a773..98166fa3eb91 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/profile.tc @@ -4,7 +4,7 @@ # requires: kprobe_events ! grep -q 'myevent' kprobe_profile -echo p:myevent kernel_clone > kprobe_events +echo "p:myevent $FUNCTION_FORK" > kprobe_events grep -q 'myevent[[:space:]]*0[[:space:]]*0$' kprobe_profile echo 1 > events/kprobes/myevent/enable ( echo "forked" ) diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index f19804df244c..edce85420d19 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -126,7 +126,7 @@ snprintf(_metadata->results->reason, \ sizeof(_metadata->results->reason), fmt, ##__VA_ARGS__); \ if (TH_LOG_ENABLED) { \ - fprintf(TH_LOG_STREAM, "# SKIP %s\n", \ + fprintf(TH_LOG_STREAM, "# SKIP %s\n", \ _metadata->results->reason); \ } \ _metadata->passed = 1; \ @@ -432,7 +432,7 @@ */ /** - * ASSERT_EQ(expected, seen) + * ASSERT_EQ() * * @expected: expected value * @seen: measured value @@ -443,7 +443,7 @@ __EXPECT(expected, #expected, seen, #seen, ==, 1) /** - * ASSERT_NE(expected, seen) + * ASSERT_NE() * * @expected: expected value * @seen: measured value @@ -454,7 +454,7 @@ __EXPECT(expected, #expected, seen, #seen, !=, 1) /** - * ASSERT_LT(expected, seen) + * ASSERT_LT() * * @expected: expected value * @seen: measured value @@ -465,7 +465,7 @@ __EXPECT(expected, #expected, seen, #seen, <, 1) /** - * ASSERT_LE(expected, seen) + * ASSERT_LE() * * @expected: expected value * @seen: measured value @@ -476,7 +476,7 @@ __EXPECT(expected, #expected, seen, #seen, <=, 1) /** - * ASSERT_GT(expected, seen) + * ASSERT_GT() * * @expected: expected value * @seen: measured value @@ -487,7 +487,7 @@ __EXPECT(expected, #expected, seen, #seen, >, 1) /** - * ASSERT_GE(expected, seen) + * ASSERT_GE() * * @expected: expected value * @seen: measured value @@ -498,7 +498,7 @@ __EXPECT(expected, #expected, seen, #seen, >=, 1) /** - * ASSERT_NULL(seen) + * ASSERT_NULL() * * @seen: measured value * @@ -508,7 +508,7 @@ __EXPECT(NULL, "NULL", seen, #seen, ==, 1) /** - * ASSERT_TRUE(seen) + * ASSERT_TRUE() * * @seen: measured value * @@ -518,7 +518,7 @@ __EXPECT(0, "0", seen, #seen, !=, 1) /** - * ASSERT_FALSE(seen) + * ASSERT_FALSE() * * @seen: measured value * @@ -528,7 +528,7 @@ __EXPECT(0, "0", seen, #seen, ==, 1) /** - * ASSERT_STREQ(expected, seen) + * ASSERT_STREQ() * * @expected: expected value * @seen: measured value @@ -539,7 +539,7 @@ __EXPECT_STR(expected, seen, ==, 1) /** - * ASSERT_STRNE(expected, seen) + * ASSERT_STRNE() * * @expected: expected value * @seen: measured value @@ -550,7 +550,7 @@ __EXPECT_STR(expected, seen, !=, 1) /** - * EXPECT_EQ(expected, seen) + * EXPECT_EQ() * * @expected: expected value * @seen: measured value @@ -561,7 +561,7 @@ __EXPECT(expected, #expected, seen, #seen, ==, 0) /** - * EXPECT_NE(expected, seen) + * EXPECT_NE() * * @expected: expected value * @seen: measured value @@ -572,7 +572,7 @@ __EXPECT(expected, #expected, seen, #seen, !=, 0) /** - * EXPECT_LT(expected, seen) + * EXPECT_LT() * * @expected: expected value * @seen: measured value @@ -583,7 +583,7 @@ __EXPECT(expected, #expected, seen, #seen, <, 0) /** - * EXPECT_LE(expected, seen) + * EXPECT_LE() * * @expected: expected value * @seen: measured value @@ -594,7 +594,7 @@ __EXPECT(expected, #expected, seen, #seen, <=, 0) /** - * EXPECT_GT(expected, seen) + * EXPECT_GT() * * @expected: expected value * @seen: measured value @@ -605,7 +605,7 @@ __EXPECT(expected, #expected, seen, #seen, >, 0) /** - * EXPECT_GE(expected, seen) + * EXPECT_GE() * * @expected: expected value * @seen: measured value @@ -616,7 +616,7 @@ __EXPECT(expected, #expected, seen, #seen, >=, 0) /** - * EXPECT_NULL(seen) + * EXPECT_NULL() * * @seen: measured value * @@ -626,7 +626,7 @@ __EXPECT(NULL, "NULL", seen, #seen, ==, 0) /** - * EXPECT_TRUE(seen) + * EXPECT_TRUE() * * @seen: measured value * @@ -636,7 +636,7 @@ __EXPECT(0, "0", seen, #seen, !=, 0) /** - * EXPECT_FALSE(seen) + * EXPECT_FALSE() * * @seen: measured value * @@ -646,7 +646,7 @@ __EXPECT(0, "0", seen, #seen, ==, 0) /** - * EXPECT_STREQ(expected, seen) + * EXPECT_STREQ() * * @expected: expected value * @seen: measured value @@ -657,7 +657,7 @@ __EXPECT_STR(expected, seen, ==, 0) /** - * EXPECT_STRNE(expected, seen) + * EXPECT_STRNE() * * @expected: expected value * @seen: measured value diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index 30848ca36555..a5ce26d548e4 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -136,7 +136,7 @@ endif ifeq ($(OVERRIDE_TARGETS),) LOCAL_HDRS := $(selfdir)/kselftest_harness.h $(selfdir)/kselftest.h $(OUTPUT)/%:%.c $(LOCAL_HDRS) - $(LINK.c) $^ $(LDLIBS) -o $@ + $(LINK.c) $(filter-out $(LOCAL_HDRS),$^) $(LDLIBS) -o $@ $(OUTPUT)/%.o:%.S $(COMPILE.S) $^ -o $@ diff --git a/tools/testing/selftests/pidfd/config b/tools/testing/selftests/pidfd/config index bb11de90c0c9..f6f2965e17af 100644 --- a/tools/testing/selftests/pidfd/config +++ b/tools/testing/selftests/pidfd/config @@ -4,3 +4,4 @@ CONFIG_USER_NS=y CONFIG_PID_NS=y CONFIG_NET_NS=y CONFIG_CGROUPS=y +CONFIG_CHECKPOINT_RESTORE=y diff --git a/tools/testing/selftests/pidfd/pidfd_getfd_test.c b/tools/testing/selftests/pidfd/pidfd_getfd_test.c index 7758c98be015..0930e2411dfb 100644 --- a/tools/testing/selftests/pidfd/pidfd_getfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_getfd_test.c @@ -204,7 +204,10 @@ TEST_F(child, fetch_fd) fd = sys_pidfd_getfd(self->pidfd, self->remote_fd, 0); ASSERT_GE(fd, 0); - EXPECT_EQ(0, sys_kcmp(getpid(), self->pid, KCMP_FILE, fd, self->remote_fd)); + ret = sys_kcmp(getpid(), self->pid, KCMP_FILE, fd, self->remote_fd); + if (ret < 0 && errno == ENOSYS) + SKIP(return, "kcmp() syscall not supported"); + EXPECT_EQ(ret, 0); ret = fcntl(fd, F_GETFD); ASSERT_GE(ret, 0); diff --git a/tools/testing/selftests/pidfd/pidfd_open_test.c b/tools/testing/selftests/pidfd/pidfd_open_test.c index b9fe75fc3e51..8a59438ccc78 100644 --- a/tools/testing/selftests/pidfd/pidfd_open_test.c +++ b/tools/testing/selftests/pidfd/pidfd_open_test.c @@ -6,7 +6,6 @@ #include <inttypes.h> #include <limits.h> #include <linux/types.h> -#include <linux/wait.h> #include <sched.h> #include <signal.h> #include <stdbool.h> diff --git a/tools/testing/selftests/pidfd/pidfd_poll_test.c b/tools/testing/selftests/pidfd/pidfd_poll_test.c index 4b115444dfe9..610811275357 100644 --- a/tools/testing/selftests/pidfd/pidfd_poll_test.c +++ b/tools/testing/selftests/pidfd/pidfd_poll_test.c @@ -3,7 +3,6 @@ #define _GNU_SOURCE #include <errno.h> #include <linux/types.h> -#include <linux/wait.h> #include <poll.h> #include <signal.h> #include <stdbool.h> diff --git a/tools/testing/selftests/pidfd/pidfd_setns_test.c b/tools/testing/selftests/pidfd/pidfd_setns_test.c index 1f085b922c6e..6e2f2cd400ca 100644 --- a/tools/testing/selftests/pidfd/pidfd_setns_test.c +++ b/tools/testing/selftests/pidfd/pidfd_setns_test.c @@ -16,7 +16,6 @@ #include <unistd.h> #include <sys/socket.h> #include <sys/stat.h> -#include <linux/kcmp.h> #include "pidfd.h" #include "../clone3/clone3_selftests.h" diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index c585aaa2acd8..529eb700ac26 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -330,7 +330,7 @@ static int test_pidfd_send_signal_recycled_pid_fail(void) ksft_exit_fail_msg("%s test: Failed to recycle pid %d\n", test_name, PID_RECYCLE); case PIDFD_SKIP: - ksft_print_msg("%s test: Skipping test\n", test_name); + ksft_test_result_skip("%s test: Skipping test\n", test_name); ret = 0; break; case PIDFD_XFAIL: diff --git a/tools/testing/selftests/proc/proc-loadavg-001.c b/tools/testing/selftests/proc/proc-loadavg-001.c index 471e2aa28077..fb4fe9188806 100644 --- a/tools/testing/selftests/proc/proc-loadavg-001.c +++ b/tools/testing/selftests/proc/proc-loadavg-001.c @@ -14,7 +14,6 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Test that /proc/loadavg correctly reports last pid in pid namespace. */ -#define _GNU_SOURCE #include <errno.h> #include <sched.h> #include <sys/types.h> diff --git a/tools/testing/selftests/proc/proc-self-syscall.c b/tools/testing/selftests/proc/proc-self-syscall.c index 9f6d000c0245..8511dcfe67c7 100644 --- a/tools/testing/selftests/proc/proc-self-syscall.c +++ b/tools/testing/selftests/proc/proc-self-syscall.c @@ -13,7 +13,6 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#define _GNU_SOURCE #include <unistd.h> #include <sys/syscall.h> #include <sys/types.h> diff --git a/tools/testing/selftests/proc/proc-uptime-002.c b/tools/testing/selftests/proc/proc-uptime-002.c index 30e2b7849089..e7ceabed7f51 100644 --- a/tools/testing/selftests/proc/proc-uptime-002.c +++ b/tools/testing/selftests/proc/proc-uptime-002.c @@ -15,7 +15,6 @@ */ // Test that values in /proc/uptime increment monotonically // while shifting across CPUs. -#define _GNU_SOURCE #undef NDEBUG #include <assert.h> #include <unistd.h> diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh index d77f4829f1e0..74c69b75f6f5 100755 --- a/tools/testing/selftests/wireguard/netns.sh +++ b/tools/testing/selftests/wireguard/netns.sh @@ -316,6 +316,14 @@ pp sleep 3 n2 ping -W 1 -c 1 192.168.241.1 n1 wg set wg0 peer "$pub2" persistent-keepalive 0 +# Test that sk_bound_dev_if works +n1 ping -I wg0 -c 1 -W 1 192.168.241.2 +# What about when the mark changes and the packet must be rerouted? +n1 iptables -t mangle -I OUTPUT -j MARK --set-xmark 1 +n1 ping -c 1 -W 1 192.168.241.2 # First the boring case +n1 ping -I wg0 -c 1 -W 1 192.168.241.2 # Then the sk_bound_dev_if case +n1 iptables -t mangle -D OUTPUT -j MARK --set-xmark 1 + # Test that onion routing works, even when it loops n1 wg set wg0 peer "$pub3" allowed-ips 192.168.242.2/32 endpoint 192.168.241.2:5 ip1 addr add 192.168.242.1/24 dev wg0 diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config index d531de13c95b..4eecb432a66c 100644 --- a/tools/testing/selftests/wireguard/qemu/kernel.config +++ b/tools/testing/selftests/wireguard/qemu/kernel.config @@ -18,10 +18,12 @@ CONFIG_NF_NAT=y CONFIG_NETFILTER_XTABLES=y CONFIG_NETFILTER_XT_NAT=y CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MARK=y CONFIG_NF_CONNTRACK_IPV4=y CONFIG_NF_NAT_IPV4=y CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_NAT=y CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_MULTIPLE_TABLES=y |