summaryrefslogtreecommitdiff
path: root/lib/lru_cache.c
AgeCommit message (Expand)AuthorFilesLines
2024-09-01lib/lru_cache: fix spelling mistake "colision"->"collision"Deshan Zhang1-5/+5
2022-11-22lru_cache: remove unused lc_private, lc_set, lc_index_ofJoel Colledge1-44/+0
2022-11-22lru_cache: remove compiled out codeChristoph Böhmwalder1-11/+0
2022-11-22lru_cache: use atomic operations when accessing lc->flags, alwaysLars Ellenberg1-2/+2
2022-07-17lib/lru_cache: fix error free handing in lc_createwuchi1-2/+2
2021-04-16lib: remove "expecting prototype" kernel-doc warningsRandy Dunlap1-1/+2
2019-05-24treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 91Thomas Gleixner1-13/+1
2018-06-12treewide: kzalloc() -> kcalloc()Kees Cook1-1/+1
2015-11-25lru_cache: Converted lc_seq_printf_status to return voidRoland Kammerer1-3/+1
2015-04-15lru_cache: remove use of seq_printf return valueJoe Perches1-4/+5
2014-07-10drbd: debugfs: add per volume oldest_requestsLars Ellenberg1-9/+12
2014-07-10drbd: silence -Wmissing-prototypes warningsLars Ellenberg1-1/+1
2013-03-22lru_cache: introduce lc_get_cumulative()Lars Ellenberg1-10/+46
2013-02-27hlist: drop the node parameter from iteratorsSasha Levin1-2/+1
2011-10-14lru_cache: allow multiple changes per transactionLars Ellenberg1-76/+167
2011-10-14lru_cache: consolidate lc_get and lc_try_getLars Ellenberg1-59/+61
2011-10-14lru_cache.h: fix comments referring to ts_ instead of lc_Lars Ellenberg1-1/+1
2011-10-14drbd: use clear_bit_unlock() where appropriateLars Ellenberg1-6/+4
2011-05-25lru_cache: use correct type in sizeof for allocationIlia Mirkin1-1/+1
2009-10-01The DRBD driverPhilipp Reisner1-0/+560
>37
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c398
-rw-r--r--drivers/acpi/acpica/dsopcode.c9
-rw-r--r--drivers/acpi/acpica/dspkginit.c496
-rw-r--r--drivers/acpi/acpica/evgpeblk.c30
-rw-r--r--drivers/acpi/acpica/evxfgpe.c8
-rw-r--r--drivers/acpi/acpica/excreate.c62
-rw-r--r--drivers/acpi/acpica/exdump.c34
-rw-r--r--drivers/acpi/acpica/exmisc.c9
-rw-r--r--drivers/acpi/acpica/exoparg2.c3
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c9
-rw-r--r--drivers/acpi/acpica/nsaccess.c28
-rw-r--r--drivers/acpi/acpica/nsarguments.c21
-rw-r--r--drivers/acpi/acpica/nsinit.c14
-rw-r--r--drivers/acpi/acpica/nsnames.c9
-rw-r--r--drivers/acpi/acpica/nsprepkg.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c10
-rw-r--r--drivers/acpi/acpica/psloop.c14
-rw-r--r--drivers/acpi/acpica/psobject.c26
-rw-r--r--drivers/acpi/acpica/rsxface.c7
-rw-r--r--drivers/acpi/acpica/tbdata.c230
-rw-r--r--drivers/acpi/acpica/tbinstal.c161
-rw-r--r--drivers/acpi/acpica/tbxface.c39
-rw-r--r--drivers/acpi/acpica/tbxfload.c2
-rw-r--r--drivers/acpi/acpica/uthex.c4
-rw-r--r--drivers/acpi/acpica/utmath.c222
-rw-r--r--drivers/acpi/acpica/utmisc.c10
-rw-r--r--drivers/acpi/acpica/utobject.c5
-rw-r--r--drivers/acpi/acpica/utprint.c8
-rw-r--r--drivers/acpi/acpica/utresrc.c7
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utstrtoul64.c9
-rw-r--r--drivers/acpi/acpica/uttrack.c9
-rw-r--r--drivers/acpi/apei/apei-internal.h5
-rw-r--r--drivers/acpi/apei/einj.c2
-rw-r--r--drivers/acpi/apei/ghes.c10
-rw-r--r--drivers/acpi/apei/hest.c13
-rw-r--r--drivers/acpi/arm64/iort.c197
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/blacklist.c87
-rw-r--r--drivers/acpi/bus.c7
-rw-r--r--drivers/acpi/device_pm.c175
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec.c65
-rw-r--r--drivers/acpi/internal.h7
-rw-r--r--drivers/acpi/nfit/Kconfig2
-rw-r--r--drivers/acpi/nfit/core.c52
-rw-r--r--drivers/acpi/osi.c39
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/pci_slot.c2
-rw-r--r--drivers/acpi/pmic/intel_pmic_xpower.c21
-rw-r--r--drivers/acpi/processor_driver.c2
-rw-r--r--drivers/acpi/processor_idle.c25
-rw-r--r--drivers/acpi/processor_pdc.c2
-rw-r--r--drivers/acpi/property.c239
-rw-r--r--drivers/acpi/resource.c82
-rw-r--r--drivers/acpi/sbs.c27
-rw-r--r--drivers/acpi/scan.c108
-rw-r--r--drivers/acpi/sleep.c206
-rw-r--r--drivers/acpi/spcr.c36
-rw-r--r--drivers/acpi/sysfs.c91
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/utils.c36
-rw-r--r--drivers/acpi/video_detect.c14
-rw-r--r--drivers/acpi/x86/apple.c141
-rw-r--r--drivers/android/Kconfig14
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c3762
-rw-r--r--drivers/android/binder_alloc.c1009
-rw-r--r--drivers/android/binder_alloc.h187
-rw-r--r--drivers/android/binder_alloc_selftest.c310
-rw-r--r--drivers/android/binder_trace.h96
-rw-r--r--drivers/ata/Kconfig10
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c9
-rw-r--r--drivers/ata/ahci_da850.c8
-rw-r--r--drivers/ata/ahci_mtk.c196
-rw-r--r--drivers/ata/ahci_platform.c1
-rw-r--r--drivers/ata/libahci_platform.c34
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c8
-rw-r--r--drivers/ata/libata-zpodd.c4
-rw-r--r--drivers/ata/pata_amd.c1
-rw-r--r--drivers/ata/pata_cs5536.c1
-rw-r--r--drivers/ata/pata_octeon_cf.c10
-rw-r--r--drivers/ata/sata_gemini.c67
-rw-r--r--drivers/ata/sata_svw.c2
-rw-r--r--drivers/atm/adummy.c4
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/atmtcp.c2
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/firestream.c2
-rw-r--r--drivers/atm/fore200e.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/atm/horizon.c2
-rw-r--r--drivers/atm/idt77252.c4
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/lanai.c2
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/atm/solos-pci.c8
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/base/Kconfig5
-rw-r--r--drivers/base/arch_topology.c86
-rw-r--r--drivers/base/base.h5
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c141
-rw-r--r--drivers/base/cpu.c4
-rw-r--r--drivers/base/dd.c32
-rw-r--r--drivers/base/dma-coherent.c88
-rw-r--r--drivers/base/dma-mapping.c7
-rw-r--r--drivers/base/firmware_class.c112
-rw-r--r--drivers/base/memory.c30
-rw-r--r--drivers/base/node.c22
-rw-r--r--drivers/base/power/domain.c251
-rw-r--r--drivers/base/power/main.c103
-rw-r--r--drivers/base/power/opp/of.c37
-rw-r--r--drivers/base/power/wakeup.c10
-rw-r--r--drivers/base/property.c131
-rw-r--r--drivers/base/topology.c2
-rw-r--r--drivers/bcma/Kconfig9
-rw-r--r--drivers/bcma/driver_gpio.c1
-rw-r--r--drivers/block/DAC960.c12
-rw-r--r--drivers/block/Kconfig30
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/brd.c11
-rw-r--r--drivers/block/cciss.c5415
-rw-r--r--drivers/block/cciss.h433
-rw-r--r--drivers/block/cciss_cmd.h269
-rw-r--r--drivers/block/cciss_scsi.c1653
-rw-r--r--drivers/block/cciss_scsi.h79
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_int.h31
-rw-r--r--drivers/block/drbd/drbd_main.c113
-rw-r--r--drivers/block/drbd/drbd_nl.c60
-rw-r--r--drivers/block/drbd/drbd_proc.c10
-rw-r--r--drivers/block/drbd/drbd_receiver.c60
-rw-r--r--drivers/block/drbd/drbd_req.c86
-rw-r--r--drivers/block/drbd/drbd_req.h6
-rw-r--r--drivers/block/drbd/drbd_state.c48
-rw-r--r--drivers/block/drbd/drbd_state.h8
-rw-r--r--drivers/block/drbd/drbd_worker.c48
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c249
-rw-r--r--drivers/block/loop.h9
-rw-r--r--drivers/block/nbd.c15
-rw-r--r--drivers/block/null_blk.c1311
-rw-r--r--drivers/block/pktcdvd.c11
-rw-r--r--drivers/block/ps3vram.c10
-rw-r--r--drivers/block/rbd.c2
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/skd_main.c3164
-rw-r--r--drivers/block/skd_s1120.h38
-rw-r--r--drivers/block/virtio_blk.c18
-rw-r--r--drivers/block/xen-blkback/blkback.c9
-rw-r--r--drivers/block/xen-blkback/xenbus.c13
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/block/zram/Kconfig12
-rw-r--r--drivers/block/zram/zram_drv.c558
-rw-r--r--drivers/block/zram/zram_drv.h11
-rw-r--r--drivers/bluetooth/Kconfig2
-rw-r--r--drivers/bluetooth/ath3k.c3
-rw-r--r--drivers/bluetooth/bluecard_cs.c58
-rw-r--r--drivers/bluetooth/bt3c_cs.c8
-rw-r--r--drivers/bluetooth/btbcm.c69
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c6
-rw-r--r--drivers/bluetooth/btqca.c2
-rw-r--r--drivers/bluetooth/btrtl.c2
-rw-r--r--drivers/bluetooth/btsdio.c3
-rw-r--r--drivers/bluetooth/btuart_cs.c8
-rw-r--r--drivers/bluetooth/btusb.c68
-rw-r--r--drivers/bluetooth/btwilink.c8
-rw-r--r--drivers/bluetooth/hci_bcm.c133
-rw-r--r--drivers/bluetooth/hci_h4.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_ll.c11
-rw-r--r--drivers/bluetooth/hci_nokia.c10
-rw-r--r--drivers/bluetooth/hci_serdev.c13
-rw-r--r--drivers/bluetooth/hci_uart.h1
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/arm-cci.c12
-rw-r--r--drivers/bus/imx-weim.c8
-rw-r--r--drivers/bus/omap-ocp2scp.c9
-rw-r--r--drivers/bus/sunxi-rsb.c22
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd-k7-agp.c4
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/efficeon-agp.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/sis-agp.c2
-rw-r--r--drivers/char/agp/uninorth-agp.c2
-rw-r--r--drivers/char/applicom.c2
-rw-r--r--drivers/char/hw_random/Kconfig20
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/core.c42
-rw-r--r--drivers/char/hw_random/imx-rngc.c331
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c12
-rw-r--r--drivers/char/mwave/smapi.c48
-rw-r--r--drivers/char/ppdev.c3
-rw-r--r--drivers/char/sonypi.c2
-rw-r--r--drivers/char/tlclk.c2
-rw-r--r--drivers/char/tpm/tpm-chip.c11
-rw-r--r--drivers/char/virtio_console.c7
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c39
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.h13
-rw-r--r--drivers/clk/Kconfig17
-rw-r--r--drivers/clk/Makefile3
-rw-r--r--drivers/clk/at91/Makefile1
-rw-r--r--drivers/clk/at91/clk-audio-pll.c536
-rw-r--r--drivers/clk/at91/clk-generated.c101
-rw-r--r--drivers/clk/axs10x/Makefile1
-rw-r--r--drivers/clk/axs10x/pll_clock.c346
-rw-r--r--drivers/clk/berlin/bg2.c3
-rw-r--r--drivers/clk/berlin/bg2q.c7
-rw-r--r--drivers/clk/clk-asm9260.c4
-rw-r--r--drivers/clk/clk-conf.c16
-rw-r--r--drivers/clk/clk-cs2000-cp.c14
-rw-r--r--drivers/clk/clk-divider.c6
-rw-r--r--drivers/clk/clk-fractional-divider.c28
-rw-r--r--drivers/clk/clk-gate.c3
-rw-r--r--drivers/clk/clk-gemini.c7
-rw-r--r--drivers/clk/clk-hsdk-pll.c431
-rw-r--r--drivers/clk/clk-mb86s7x.c390
-rw-r--r--drivers/clk/clk-moxart.c16
-rw-r--r--drivers/clk/clk-qoriq.c26
-rw-r--r--drivers/clk/clk-si5351.c12
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/clk-stm32h7.c1410
-rw-r--r--drivers/clk/clk-versaclock5.c172
-rw-r--r--drivers/clk/clk-xgene.c15
-rw-r--r--drivers/clk/clk.c4
-rw-r--r--drivers/clk/clkdev.c4
-rw-r--r--drivers/clk/hisilicon/clk-hi6220.c6
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c8
-rw-r--r--drivers/clk/imx/clk-imx6sl.c6
-rw-r--r--drivers/clk/imx/clk-imx6sx.c6
-rw-r--r--drivers/clk/imx/clk-imx6ul.c6
-rw-r--r--drivers/clk/imx/clk-imx7d.c4
-rw-r--r--drivers/clk/imx/clk-vf610.c2
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c6
-rw-r--r--drivers/clk/mediatek/clk-mtk.c2
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/meson/Kconfig1
-rw-r--r--drivers/clk/meson/Makefile2
-rw-r--r--drivers/clk/meson/gxbb-aoclk-32k.c194
-rw-r--r--drivers/clk/meson/gxbb-aoclk-regmap.c46
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c65
-rw-r--r--drivers/clk/meson/gxbb-aoclk.h42
-rw-r--r--drivers/clk/meson/gxbb.c189
-rw-r--r--drivers/clk/meson/gxbb.h125
-rw-r--r--drivers/clk/meson/meson8b.c160
-rw-r--r--drivers/clk/meson/meson8b.h112
-rw-r--r--drivers/clk/mmp/clk.c2
-rw-r--r--drivers/clk/nxp/clk-lpc32xx.c12
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c28
-rw-r--r--drivers/clk/renesas/Kconfig48
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/clk-div6.c3
-rw-r--r--drivers/clk/renesas/clk-mstp.c2
-rw-r--r--drivers/clk/renesas/clk-rcar-gen2.c3
-rw-r--r--drivers/clk/renesas/r8a7792-cpg-mssr.c7
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c34
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c35
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c236
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c69
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h15
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c188
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c6
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c69
-rw-r--r--drivers/clk/rockchip/clk-rk3228.c2
-rw-r--r--drivers/clk/rockchip/clk-rv1108.c462
-rw-r--r--drivers/clk/rockchip/clk.c36
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c8
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c23
-rw-r--r--drivers/clk/sunxi-ng/Kconfig18
-rw-r--r--drivers/clk/sunxi-ng/Makefile3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c1456
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.h61
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c16
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c1290
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.h69
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h4
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h3
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c14
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mmc_timing.c70
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c80
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h30
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c10
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c22
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.h2
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c19
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c12
-rw-r--r--drivers/clk/sunxi/clk-sun8i-bus-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sunxi.c17
-rw-r--r--drivers/clk/tegra/clk-emc.c12
-rw-r--r--drivers/clk/tegra/clk-pll.c159
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c3
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c11
-rw-r--r--drivers/clk/tegra/clk-tegra210.c32
-rw-r--r--drivers/clk/tegra/clk.h6
-rw-r--r--drivers/clk/ti/adpll.c4
-rw-r--r--drivers/clk/ti/apll.c2
-rw-r--r--drivers/clk/ti/clockdomain.c4
-rw-r--r--drivers/clk/ti/fapll.c4
-rw-r--r--drivers/clk/uniphier/clk-uniphier-core.c26
-rw-r--r--drivers/clk/uniphier/clk-uniphier-mio.c4
-rw-r--r--drivers/clk/uniphier/clk-uniphier-sys.c98
-rw-r--r--drivers/clk/uniphier/clk-uniphier.h4
-rw-r--r--drivers/clk/ux500/clk-prcc.c6
-rw-r--r--drivers/clk/ux500/clk-prcmu.c14
-rw-r--r--drivers/clk/ux500/clk-sysctrl.c8
-rw-r--r--drivers/clk/versatile/clk-vexpress-osc.c2
-rw-r--r--drivers/clk/zte/clk-zx296718.c6
-rw-r--r--drivers/clocksource/Kconfig10
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c8
-rw-r--r--drivers/clocksource/bcm2835_timer.c1
-rw-r--r--drivers/clocksource/em_sti.c11
-rw-r--r--drivers/clocksource/mips-gic-timer.c37
-rw-r--r--drivers/clocksource/tango_xtal.c6
-rw-r--r--drivers/clocksource/timer-imx-tpm.c239
-rw-r--r--drivers/clocksource/timer-of.c15
-rw-r--r--drivers/clocksource/timer-probe.c3
-rw-r--r--drivers/clocksource/timer-stm32.c8
-rw-r--r--drivers/cpufreq/Kconfig.arm21
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/arm_big_little.c10
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c65
-rw-r--r--drivers/cpufreq/cpufreq-dt.c1
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c2
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.c20
-rw-r--r--drivers/cpufreq/cpufreq_governor.h3
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c12
-rw-r--r--drivers/cpufreq/dbx500-cpufreq.c103
-rw-r--r--drivers/cpufreq/elanfreq.c4
-rw-r--r--drivers/cpufreq/gx-suspmod.c2
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c389
-rw-r--r--drivers/cpufreq/longrun.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/cpufreq/mediatek-cpufreq.c (renamed from drivers/cpufreq/mt8173-cpufreq.c)29
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c7
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c3
-rw-r--r--drivers/cpufreq/sa1100-cpufreq.c5
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c5
-rw-r--r--drivers/cpufreq/sh-cpufreq.c3
-rw-r--r--drivers/cpufreq/speedstep-ich.c2
-rw-r--r--drivers/cpufreq/speedstep-lib.c4
-rw-r--r--drivers/cpufreq/speedstep-smi.c2
-rw-r--r--drivers/cpufreq/sti-cpufreq.c8
-rw-r--r--drivers/cpufreq/tango-cpufreq.c38
-rw-r--r--drivers/cpufreq/ti-cpufreq.c4
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c3
-rw-r--r--drivers/cpuidle/Makefile1
-rw-r--r--drivers/cpuidle/coupled.c10
-rw-r--r--drivers/cpuidle/cpuidle-cps.c2
-rw-r--r--drivers/cpuidle/cpuidle.c18
-rw-r--r--drivers/cpuidle/driver.c32
-rw-r--r--drivers/cpuidle/dt_idle_states.c24
-rw-r--r--drivers/cpuidle/governors/ladder.c14
-rw-r--r--drivers/cpuidle/governors/menu.c13
-rw-r--r--drivers/cpuidle/poll_state.c37
-rw-r--r--drivers/crypto/Kconfig49
-rw-r--r--drivers/crypto/Makefile4
-rw-r--r--drivers/crypto/atmel-ecc.c781
-rw-r--r--drivers/crypto/atmel-ecc.h128
-rw-r--r--drivers/crypto/atmel-sha.c2
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/axis/Makefile1
-rw-r--r--drivers/crypto/axis/artpec6_crypto.c3192
-rw-r--r--drivers/crypto/bcm/cipher.c114
-rw-r--r--drivers/crypto/bcm/cipher.h13
-rw-r--r--drivers/crypto/caam/caamalg.c66
-rw-r--r--drivers/crypto/caam/caamalg_desc.c5
-rw-r--r--drivers/crypto/caam/caamalg_qi.c55
-rw-r--r--drivers/crypto/caam/caamhash.c7
-rw-r--r--drivers/crypto/caam/caamrng.c6
-rw-r--r--drivers/crypto/caam/ctrl.c127
-rw-r--r--drivers/crypto/caam/ctrl.h2
-rw-r--r--drivers/crypto/caam/error.c40
-rw-r--r--drivers/crypto/caam/error.h4
-rw-r--r--drivers/crypto/caam/intern.h11
-rw-r--r--drivers/crypto/caam/jr.c7
-rw-r--r--drivers/crypto/caam/qi.c30
-rw-r--r--drivers/crypto/caam/qi.h3
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/caam/sg_sw_qm2.h81
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h43
-rw-r--r--drivers/crypto/cavium/cpt/cptpf_main.c13
-rw-r--r--drivers/crypto/cavium/nitrox/nitrox_main.c4
-rw-r--r--drivers/crypto/ccp/Kconfig22
-rw-r--r--drivers/crypto/ccp/Makefile7
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-galois.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-aes-xts.c96
-rw-r--r--drivers/crypto/ccp/ccp-crypto-des3.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto-main.c21
-rw-r--r--drivers/crypto/ccp/ccp-crypto-rsa.c299
-rw-r--r--drivers/crypto/ccp/ccp-crypto-sha.c2
-rw-r--r--drivers/crypto/ccp/ccp-crypto.h36
-rw-r--r--drivers/crypto/ccp/ccp-debugfs.c15
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c20
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c28
-rw-r--r--drivers/crypto/ccp/ccp-dev.c134
-rw-r--r--drivers/crypto/ccp/ccp-dev.h30
-rw-r--r--drivers/crypto/ccp/ccp-dmaengine.c25
-rw-r--r--drivers/crypto/ccp/ccp-ops.c133
-rw-r--r--drivers/crypto/ccp/ccp-pci.c356
-rw-r--r--drivers/crypto/ccp/ccp-platform.c293
-rw-r--r--drivers/crypto/ccp/sp-dev.c277
-rw-r--r--drivers/crypto/ccp/sp-dev.h133
-rw-r--r--drivers/crypto/ccp/sp-pci.c276
-rw-r--r--drivers/crypto/ccp/sp-platform.c256
-rw-r--r--drivers/crypto/geode-aes.c17
-rw-r--r--drivers/crypto/img-hash.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel.c5
-rw-r--r--drivers/crypto/ixp4xx_crypto.c6
-rw-r--r--drivers/crypto/mediatek/mtk-platform.c2
-rw-r--r--drivers/crypto/mxc-scc.c4
-rw-r--r--drivers/crypto/mxs-dcp.c8
-rw-r--r--drivers/crypto/n2_core.c60
-rw-r--r--drivers/crypto/nx/Kconfig1
-rw-r--r--drivers/crypto/nx/nx-842-powernv.c514
-rw-r--r--drivers/crypto/nx/nx-842.c2
-rw-r--r--drivers/crypto/nx/nx-842.h13
-rw-r--r--drivers/crypto/omap-aes.c1
-rw-r--r--drivers/crypto/omap-des.c3
-rw-r--r--drivers/crypto/omap-sham.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c2
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.c74
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto.h15
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ablkcipher.c103
-rw-r--r--drivers/crypto/rockchip/rk3288_crypto_ahash.c119
-rw-r--r--drivers/crypto/sahara.c14
-rw-r--r--drivers/crypto/stm32/Kconfig19
-rw-r--r--drivers/crypto/stm32/Makefile4
-rw-r--r--drivers/crypto/stm32/stm32-hash.c1575
-rw-r--r--drivers/crypto/stm32/stm32_crc32.c17
-rw-r--r--drivers/crypto/sunxi-ss/Makefile1
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-core.c30
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c56
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss.h11
-rw-r--r--drivers/crypto/virtio/virtio_crypto_algs.c109
-rw-r--r--drivers/crypto/virtio/virtio_crypto_common.h22
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c37
-rw-r--r--drivers/crypto/vmx/aes_ctr.c3
-rw-r--r--drivers/dax/super.c33
-rw-r--r--drivers/devfreq/Kconfig1
-rw-r--r--drivers/devfreq/devfreq-event.c4
-rw-r--r--drivers/devfreq/devfreq.c5
-rw-r--r--drivers/devfreq/governor.h4
-rw-r--r--drivers/dma-buf/dma-fence.c4
-rw-r--r--drivers/dma-buf/reservation.c99
-rw-r--r--drivers/dma-buf/sw_sync.c201
-rw-r--r--drivers/dma-buf/sync_debug.c19
-rw-r--r--drivers/dma-buf/sync_debug.h26
-rw-r--r--drivers/dma/Kconfig6
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/altera-msgdma.c927
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_hdmac.c140
-rw-r--r--drivers/dma/at_xdmac.c13
-rw-r--r--drivers/dma/bcm-sba-raid.c544
-rw-r--r--drivers/dma/dmaengine.c103
-rw-r--r--drivers/dma/dmatest.c110
-rw-r--r--drivers/dma/fsldma.c118
-rw-r--r--drivers/dma/ioat/dma.c10
-rw-r--r--drivers/dma/ioat/dma.h3
-rw-r--r--drivers/dma/ioat/init.c2
-rw-r--r--drivers/dma/ioat/sysfs.c42
-rw-r--r--drivers/dma/k3dma.c12
-rw-r--r--drivers/dma/mv_xor.c162
-rw-r--r--drivers/dma/nbpfaxi.c17
-rw-r--r--drivers/dma/of-dma.c8
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c37
-rw-r--r--drivers/dma/qcom/bam_dma.c6
-rw-r--r--drivers/dma/qcom/hidma.c37
-rw-r--r--drivers/dma/qcom/hidma.h7
-rw-r--r--drivers/dma/qcom/hidma_ll.c11
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c16
-rw-r--r--drivers/dma/sh/rcar-dmac.c85
-rw-r--r--drivers/dma/ste_dma40.c22
-rw-r--r--drivers/dma/sun6i-dma.c33
-rw-r--r--drivers/dma/tegra210-adma.c4
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/dma/xgene-dma.c160
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c30
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c94
-rw-r--r--drivers/edac/altera_edac.c6
-rw-r--r--drivers/edac/amd64_edac.c1
-rw-r--r--drivers/edac/amd76x_edac.c2
-rw-r--r--drivers/edac/cpc925_edac.c3
-rw-r--r--drivers/edac/e752x_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c14
-rw-r--r--drivers/edac/ghes_edac.c3
-rw-r--r--drivers/edac/highbank_mc_edac.c1
-rw-r--r--drivers/edac/i3000_edac.c3
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/i5000_edac.c1
-rw-r--r--drivers/edac/i5100_edac.c1
-rw-r--r--drivers/edac/i5400_edac.c1
-rw-r--r--drivers/edac/i7300_edac.c1
-rw-r--r--drivers/edac/i7core_edac.c9
-rw-r--r--drivers/edac/i82443bxgx_edac.c3
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/edac/i82875p_edac.c2
-rw-r--r--drivers/edac/i82975x_edac.c2
-rw-r--r--drivers/edac/ie31200_edac.c2
-rw-r--r--drivers/edac/mce_amd.c36
-rw-r--r--drivers/edac/mv64x60_edac.c1
-rw-r--r--drivers/edac/pnd2_edac.c106
-rw-r--r--drivers/edac/ppc4xx_edac.c9
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c64
-rw-r--r--drivers/edac/skx_edac.c3
-rw-r--r--drivers/edac/synopsys_edac.c1
-rw-r--r--drivers/edac/thunderx_edac.c6
-rw-r--r--drivers/edac/x38_edac.c3
-rw-r--r--drivers/edac/xgene_edac.c1
-rw-r--r--drivers/extcon/Kconfig7
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/devres.c50
-rw-r--r--drivers/extcon/extcon-intel-int3496.c2
-rw-r--r--drivers/extcon/extcon-max77693.c5
-rw-r--r--drivers/extcon/extcon-usbc-cros-ec.c417
-rw-r--r--drivers/extcon/extcon.c279
-rw-r--r--drivers/firmware/arm_scpi.c4
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/dmi-sysfs.c5
-rw-r--r--drivers/firmware/efi/Kconfig10
-rw-r--r--drivers/firmware/efi/apple-properties.c5
-rw-r--r--drivers/firmware/efi/arm-init.c8
-rw-r--r--drivers/firmware/efi/cper.c22
-rw-r--r--drivers/firmware/efi/efi-bgrt.c22
-rw-r--r--drivers/firmware/efi/efi.c75
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c3
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c16
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c4
-rw-r--r--drivers/firmware/efi/libstub/random.c10
-rw-r--r--drivers/firmware/efi/libstub/tpm.c58
-rw-r--r--drivers/firmware/efi/reboot.c12
-rw-r--r--drivers/firmware/google/gsmi.c2
-rw-r--r--drivers/firmware/google/memconsole-x86-legacy.c2
-rw-r--r--drivers/firmware/google/vpd.c10
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/firmware/psci.c4
-rw-r--r--drivers/firmware/tegra/bpmp.c4
-rw-r--r--drivers/fmc/Makefile1
-rw-r--r--drivers/fmc/fmc-chardev.c3
-rw-r--r--drivers/fmc/fmc-core.c95
-rw-r--r--drivers/fmc/fmc-debug.c173
-rw-r--r--drivers/fmc/fmc-dump.c41
-rw-r--r--drivers/fmc/fmc-match.c2
-rw-r--r--drivers/fmc/fmc-private.h9
-rw-r--r--drivers/fmc/fmc-sdb.c119
-rw-r--r--drivers/fmc/fmc-trivial.c20
-rw-r--r--drivers/fmc/fmc-write-eeprom.c8
-rw-r--r--drivers/fmc/fru-parse.c3
-rw-r--r--drivers/fpga/Kconfig20
-rw-r--r--drivers/fpga/Makefile2
-rw-r--r--drivers/fpga/altera-cvp.c500
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-ps-spi.c308
-rw-r--r--drivers/fpga/fpga-region.c4
-rw-r--r--drivers/fsi/fsi-core.c4
-rw-r--r--drivers/fsi/fsi-scom.c10
-rw-r--r--drivers/gpio/Kconfig26
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/devres.c3
-rw-r--r--drivers/gpio/gpio-74x164.c10
-rw-r--r--drivers/gpio/gpio-altera-a10sr.c2
-rw-r--r--drivers/gpio/gpio-altera.c4
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c11
-rw-r--r--drivers/gpio/gpio-davinci.c22
-rw-r--r--drivers/gpio/gpio-ge.c6
-rw-r--r--drivers/gpio/gpio-grgpio.c2
-rw-r--r--drivers/gpio/gpio-it87.c3
-rw-r--r--drivers/gpio/gpio-max77620.c2
-rw-r--r--drivers/gpio/gpio-mb86s7x.c4
-rw-r--r--drivers/gpio/gpio-ml-ioh.c12
-rw-r--r--drivers/gpio/gpio-mockup.c79
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c4
-rw-r--r--drivers/gpio/gpio-msic.c4
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c27
-rw-r--r--drivers/gpio/gpio-mxs.c15
-rw-r--r--drivers/gpio/gpio-omap.c2
-rw-r--r--drivers/gpio/gpio-pca953x.c8
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c8
-rw-r--r--drivers/gpio/gpio-rcar.c10
-rw-r--r--drivers/gpio/gpio-sta2x11.c14
-rw-r--r--drivers/gpio/gpio-tb10x.c3
-rw-r--r--drivers/gpio/gpio-tegra.c129
-rw-r--r--drivers/gpio/gpio-thunderx.c639
-rw-r--r--drivers/gpio/gpio-tps68470.c176
-rw-r--r--drivers/gpio/gpio-twl4030.c2
-rw-r--r--drivers/gpio/gpio-twl6040.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c10
-rw-r--r--drivers/gpio/gpio-vf610.c47
-rw-r--r--drivers/gpio/gpio-xilinx.c4
-rw-r--r--drivers/gpio/gpio-zevio.c2
-rw-r--r--drivers/gpio/gpio-zynq.c160
-rw-r--r--drivers/gpio/gpiolib-acpi.c4
-rw-r--r--drivers/gpio/gpiolib-of.c36
-rw-r--r--drivers/gpio/gpiolib-sysfs.c18
-rw-r--r--drivers/gpio/gpiolib.c130
-rw-r--r--drivers/gpio/gpiolib.h2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h221
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c72
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c226
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c161
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c138
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c257
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c64
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c278
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c69
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vf_error.h62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c533
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c60
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c168
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c113
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c243
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c184
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c154
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c96
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15d.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c183
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c102
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c21
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.h27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c123
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c318
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c33
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c65
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c46
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h330
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_vi.h140
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c25
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c72
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c46
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h63
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h6
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h30
-rw-r--r--drivers/gpu/drm/amd/include/vi_structs.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c113
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c25
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c241
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/rv_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c313
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c1291
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.h16
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c88
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_soc15.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9.h13
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu9_driver_if.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega10_ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.c19
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c184
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smc.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c34
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.h12
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c30
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h9
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c23
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c43
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c61
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c11
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c4
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c3
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c20
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h2
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c4
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c3
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c25
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c6
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c23
-rw-r--r--drivers/gpu/drm/ast/ast_main.c13
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c48
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c17
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c32
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h16
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c45
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c7
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c11
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c10
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c10
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c1
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c7
-rw-r--r--drivers/gpu/drm/bridge/panel.c36
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c7
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c9
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Kconfig16
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c2
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c327
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.h19
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c3
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c107
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.h46
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c981
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c7
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c7
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h8
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c10
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c71
-rw-r--r--drivers/gpu/drm/drm_atomic.c219
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c611
-rw-r--r--drivers/gpu/drm/drm_blend.c2
-rw-r--r--drivers/gpu/drm/drm_color_mgmt.c3
-rw-r--r--drivers/gpu/drm/drm_connector.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c3
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h7
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c59
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c2
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c89
-rw-r--r--drivers/gpu/drm/drm_drv.c56
-rw-r--r--drivers/gpu/drm/drm_dumb_buffers.c26
-rw-r--r--drivers/gpu/drm/drm_edid.c440
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c184
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c681
-rw-r--r--drivers/gpu/drm/drm_file.c9
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_gem.c49
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c39
-rw-r--r--drivers/gpu/drm/drm_gem_framebuffer_helper.c283
-rw-r--r--drivers/gpu/drm/drm_internal.h20
-rw-r--r--drivers/gpu/drm/drm_ioc32.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c23
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c19
-rw-r--r--drivers/gpu/drm/drm_mode_config.c7
-rw-r--r--drivers/gpu/drm/drm_mode_object.c159
-rw-r--r--drivers/gpu/drm/drm_modes.c91
-rw-r--r--drivers/gpu/drm/drm_modeset_helper.c1
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c12
-rw-r--r--drivers/gpu/drm/drm_of.c4
-rw-r--r--drivers/gpu/drm/drm_pci.c40
-rw-r--r--drivers/gpu/drm/drm_plane.c120
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c4
-rw-r--r--drivers/gpu/drm/drm_property.c23
-rw-r--r--drivers/gpu/drm/drm_scdc_helper.c35
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c23
-rw-r--r--drivers/gpu/drm/drm_syncobj.c531
-rw-r--r--drivers/gpu/drm/drm_vblank.c187
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c2
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c8
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c45
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c125
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c43
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c222
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c46
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c48
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c22
-rw-r--r--drivers/gpu/drm/gma500/gem.c30
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c32
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c4
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c12
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c1
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c8
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c67
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c31
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c30
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h2
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c5
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug1
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c113
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c128
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h26
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c54
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c113
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c211
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c209
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h370
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c464
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c290
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h79
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c659
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c113
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c104
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c380
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.c5310
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bdw.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.c2624
-rw-r--r--drivers/gpu/drm/i915/i915_oa_bxt.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.c2808
-rw-r--r--drivers/gpu/drm/i915/i915_oa_chv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.c2536
-rw-r--r--drivers/gpu/drm/i915/i915_oa_glk.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.c765
-rw-r--r--drivers/gpu/drm/i915/i915_oa_hsw.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.c2924
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt2.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.c2973
-rw-r--r--drivers/gpu/drm/i915/i915_oa_kblgt3.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.c3413
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt2.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.c2972
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt3.h8
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.c3026
-rw-r--r--drivers/gpu/drm/i915/i915_oa_sklgt4.h8
-rw-r--r--drivers/gpu/drm/i915/i915_params.c10
-rw-r--r--drivers/gpu/drm/i915/i915_params.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c7
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c809
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h521
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h4
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c7
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h3
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c31
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h12
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c15
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c24
-rw-r--r--drivers/gpu/drm/i915/intel_color.c47
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c121
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1814
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c218
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c98
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c8
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c52
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c11
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h57
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c15
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c4
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c45
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c8
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c72
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c204
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c57
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c36
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c61
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c45
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c11
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c13
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c245
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1028
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c81
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c161
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c22
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c343
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c36
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h5
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.h3
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c30
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c3
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c10
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c1
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c63
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c1
-rw-r--r--drivers/gpu/drm/lib/drm_random.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_color.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c17
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c32
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_fb.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.c27
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_gem.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c11
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c7
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c1
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c1
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_cursor.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c7
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c10
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c62
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c51
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c53
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c94
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c64
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c38
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c27
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c54
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c63
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c59
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c34
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c37
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h12
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c58
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c46
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c85
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c7
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c62
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c71
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/dcb.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/therm.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c157
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/headgf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv40.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gpio/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gf119.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk104.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk110.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk208.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gk20a.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp102.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gt215.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/gm200.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c104
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c81
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c190
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c824
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c88
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c329
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c406
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h49
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c905
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h109
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h16
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c38
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c7
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c12
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h25
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c29
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c86
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c38
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c137
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c124
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c3
-rw-r--r--drivers/gpu/drm/panel/panel-lvds.c16
-rw-r--r--drivers/gpu/drm/pl111/pl111_connector.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c5
-rw-r--r--drivers/gpu/drm/pl111/pl111_drv.c9
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c35
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h6
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c95
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c11
-rw-r--r--drivers/gpu/drm/radeon/vce_v2_0.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c199
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c30
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c38
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c129
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c119
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c56
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_dw_hdmi.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c1
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c110
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c31
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h3
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c174
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c375
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h905
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c5
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c5
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c10
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c3
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c3
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c3
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c3
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c8
-rw-r--r--drivers/gpu/drm/stm/Kconfig9
-rw-r--r--drivers/gpu/drm/stm/Makefile2
-rw-r--r--drivers/gpu/drm/stm/drv.c23
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c352
-rw-r--r--drivers/gpu/drm/stm/ltdc.c470
-rw-r--r--drivers/gpu/drm/stm/ltdc.h4
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig16
-rw-r--r--drivers/gpu/drm/sun4i/Makefile1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h32
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c159
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_i2c.c220
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_layer.c2
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c5
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/Makefile2
-rw-r--r--drivers/gpu/drm/tegra/dc.c22
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c12
-rw-r--r--drivers/gpu/drm/tegra/drm.c116
-rw-r--r--drivers/gpu/drm/tegra/drm.h12
-rw-r--r--drivers/gpu/drm/tegra/dsi.c15
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/gem.c78
-rw-r--r--drivers/gpu/drm/tegra/gem.h2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c15
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c15
-rw-r--r--drivers/gpu/drm/tegra/trace.c2
-rw-r--r--drivers/gpu/drm/tegra/trace.h68
-rw-r--r--drivers/gpu/drm/tegra/vic.c15
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c20
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_panel.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_plane.c1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/tinydrm/Kconfig23
-rw-r--r--drivers/gpu/drm/tinydrm/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c60
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c5
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c8
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c17
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c1117
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c428
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c68
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c86
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c11
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c13
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c4
-rw-r--r--drivers/gpu/drm/udl/udl_main.c2
-rw-r--r--drivers/gpu/drm/vc4/Kconfig8
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c291
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c50
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h40
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c24
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c44
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c291
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c85
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c19
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h113
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c63
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c78
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c72
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c86
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h4
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c5
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c11
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c242
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c148
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c104
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c111
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c35
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c41
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c6
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c3
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c2
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c1
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c1
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c10
-rw-r--r--drivers/gpu/host1x/bus.c19
-rw-r--r--drivers/gpu/host1x/dev.c4
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c24
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c2
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/ipu-v3/Kconfig1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-asus.c218
-rw-r--r--drivers/hid/hid-core.c16
-rw-r--r--drivers/hid/hid-ids.h5
-rw-r--r--drivers/hid/hid-input.c196
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-multitouch.c50
-rw-r--r--drivers/hid/hid-ntrig.c2
-rw-r--r--drivers/hid/hid-picolcd_cir.c4
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-sensor-custom.c4
-rw-r--r--drivers/hid/hid-sensor-hub.c94
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/uhid.c3
-rw-r--r--drivers/hid/usbhid/hid-core.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/usbkbd.c2
-rw-r--r--drivers/hid/usbhid/usbmouse.c2
-rw-r--r--drivers/hid/wacom_sys.c63
-rw-r--r--drivers/hid/wacom_wac.c8
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c143
-rw-r--r--drivers/hv/channel_mgmt.c49
-rw-r--r--drivers/hv/connection.c7
-rw-r--r--drivers/hv/hv.c9
-rw-r--r--drivers/hv/hv_balloon.c12
-rw-r--r--drivers/hv/hv_kvp.c2
-rw-r--r--drivers/hv/hyperv_vmbus.h11
-rw-r--r--drivers/hv/ring_buffer.c169
-rw-r--r--drivers/hv/vmbus_drv.c20
-rw-r--r--drivers/hwmon/Kconfig8
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c2
-rw-r--r--drivers/hwmon/adc128d818.c2
-rw-r--r--drivers/hwmon/ads1015.c14
-rw-r--r--drivers/hwmon/adt7475.c16
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c116
-rw-r--r--drivers/hwmon/da9052-hwmon.c285
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/hwmon/ftsteutates.c4
-rw-r--r--drivers/hwmon/hwmon.c4
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/it87.c214
-rw-r--r--drivers/hwmon/jc42.c19
-rw-r--r--drivers/hwmon/ltq-cputemp.c163
-rw-r--r--drivers/hwmon/nct7802.c10
-rw-r--r--drivers/hwmon/pmbus/Kconfig18
-rw-r--r--drivers/hwmon/pmbus/Makefile2
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c151
-rw-r--r--drivers/hwmon/pmbus/lm25066.c47
-rw-r--r--drivers/hwmon/pmbus/pmbus.h2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c292
-rw-r--r--drivers/hwmon/pmbus/tps53679.c113
-rw-r--r--drivers/hwmon/scpi-hwmon.c2
-rw-r--r--drivers/hwmon/stts751.c4
-rw-r--r--drivers/hwtracing/coresight/Kconfig10
-rw-r--r--drivers/hwtracing/coresight/Makefile2
-rw-r--r--drivers/hwtracing/coresight/coresight-cpu-debug.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-dynamic-replicator.c (renamed from drivers/hwtracing/coresight/coresight-replicator-qcom.c)34
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c68
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c26
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c22
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c24
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c7
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h39
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c42
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c49
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c108
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h85
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c7
-rw-r--r--drivers/hwtracing/coresight/coresight.c8
-rw-r--r--drivers/hwtracing/intel_th/core.c359
-rw-r--r--drivers/hwtracing/intel_th/gth.c40
-rw-r--r--drivers/hwtracing/intel_th/gth.h5
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h104
-rw-r--r--drivers/hwtracing/intel_th/msu.c12
-rw-r--r--drivers/hwtracing/intel_th/pci.c67
-rw-r--r--drivers/hwtracing/intel_th/pti.c115
-rw-r--r--drivers/hwtracing/intel_th/pti.h8
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/Kconfig35
-rw-r--r--drivers/i2c/busses/Makefile4
-rw-r--r--drivers/i2c/busses/i2c-altera.c511
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c86
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm-iproc.c6
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c1
-rw-r--r--drivers/i2c/busses/i2c-cadence.c6
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c363
-rw-r--r--drivers/i2c/busses/i2c-cpm.c2
-rw-r--r--drivers/i2c/busses/i2c-davinci.c10
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c39
-rw-r--r--drivers/i2c/busses/i2c-designware-slave.c8
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c6
-rw-r--r--drivers/i2c/busses/i2c-gpio.c4
-rw-r--r--drivers/i2c/busses/i2c-hix5hd2.c6
-rw-r--r--drivers/i2c/busses/i2c-i801.c12
-rw-r--r--drivers/i2c/busses/i2c-ismt.c6
-rw-r--r--drivers/i2c/busses/i2c-kempld.c2
-rw-r--r--drivers/i2c/busses/i2c-lpc2k.c6
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c79
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c5
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-octeon-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-opal.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-pnx.c2
-rw-r--r--drivers/i2c/busses/i2c-powermac.c12
-rw-r--r--drivers/i2c/busses/i2c-puv3.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c6
-rw-r--r--drivers/i2c/busses/i2c-qup.c2
-rw-r--r--drivers/i2c/busses/i2c-rcar.c5
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c9
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c6
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-simtec.c6
-rw-r--r--drivers/i2c/busses/i2c-sirf.c6
-rw-r--r--drivers/i2c/busses/i2c-sprd.c646
-rw-r--r--drivers/i2c/busses/i2c-st.c3
-rw-r--r--drivers/i2c/busses/i2c-stm32.h20
-rw-r--r--drivers/i2c/busses/i2c-stm32f4.c22
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c972
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c6
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c4
-rw-r--r--drivers/i2c/busses/i2c-thunderx-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c46
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c40
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-xiic.c8
-rw-r--r--drivers/i2c/i2c-core-base.c4
-rw-r--r--drivers/i2c/i2c-core-of.c24
-rw-r--r--drivers/i2c/muxes/Kconfig3
-rw-r--r--drivers/i2c/muxes/i2c-demux-pinctrl.c4
-rw-r--r--drivers/i2c/muxes/i2c-mux-mlxcpld.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c9
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c225
-rw-r--r--drivers/ide/ide-floppy.c2
-rw-r--r--drivers/ide/pmac.c18
-rw-r--r--drivers/idle/intel_idle.c190
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c1
-rw-r--r--drivers/iio/accel/da311.c2
-rw-r--r--drivers/iio/accel/sca3000.c6
-rw-r--r--drivers/iio/accel/st_accel.h5
-rw-r--r--drivers/iio/accel/st_accel_core.c6
-rw-r--r--drivers/iio/accel/st_accel_i2c.c8
-rw-r--r--drivers/iio/accel/st_accel_spi.c86
-rw-r--r--drivers/iio/adc/Kconfig33
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ad7766.c6
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c329
-rw-r--r--drivers/iio/adc/at91_adc.c2
-rw-r--r--drivers/iio/adc/dln2-adc.c722
-rw-r--r--drivers/iio/adc/ep93xx_adc.c255
-rw-r--r--drivers/iio/adc/ina2xx-adc.c38
-rw-r--r--drivers/iio/adc/ltc2471.c160
-rw-r--r--drivers/iio/adc/ltc2497.c54
-rw-r--r--drivers/iio/adc/max9611.c4
-rw-r--r--drivers/iio/adc/mcp3422.c6
-rw-r--r--drivers/iio/adc/meson_saradc.c13
-rw-r--r--drivers/iio/adc/mt6577_auxadc.c37
-rw-r--r--drivers/iio/adc/rockchip_saradc.c8
-rw-r--r--drivers/iio/adc/stm32-adc-core.c12
-rw-r--r--drivers/iio/adc/stm32-adc.c154
-rw-r--r--drivers/iio/adc/ti-ads1015.c611
-rw-r--r--drivers/iio/adc/ti-ads7950.c42
-rw-r--r--drivers/iio/adc/twl4030-madc.c2
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c2
-rw-r--r--drivers/iio/adc/xilinx-xadc-events.c38
-rw-r--r--drivers/iio/adc/xilinx-xadc.h10
-rw-r--r--drivers/iio/chemical/Kconfig9
-rw-r--r--drivers/iio/chemical/Makefile1
-rw-r--r--drivers/iio/chemical/ccs811.c405
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-attributes.c3
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c31
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_i2c.c29
-rw-r--r--drivers/iio/counter/Kconfig9
-rw-r--r--drivers/iio/counter/Makefile1
-rw-r--r--drivers/iio/counter/stm32-lptimer-cnt.c383
-rw-r--r--drivers/iio/dac/stm32-dac-core.c40
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c10
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c13
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c8
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c54
-rw-r--r--drivers/iio/humidity/Kconfig3
-rw-r--r--drivers/iio/humidity/hdc100x.c22
-rw-r--r--drivers/iio/humidity/hts221.h11
-rw-r--r--drivers/iio/humidity/hts221_buffer.c43
-rw-r--r--drivers/iio/humidity/hts221_core.c144
-rw-r--r--drivers/iio/humidity/htu21.c8
-rw-r--r--drivers/iio/imu/adis16400_core.c4
-rw-r--r--drivers/iio/imu/adis16480.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c17
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/apds9300.c2
-rw-r--r--drivers/iio/light/rpr0521.c336
-rw-r--r--drivers/iio/light/tcs3472.c4
-rw-r--r--drivers/iio/light/tsl2583.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig4
-rw-r--r--drivers/iio/magnetometer/ak8974.c133
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/st_magn.h1
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c7
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c8
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c30
-rw-r--r--drivers/iio/orientation/hid-sensor-rotation.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c27
-rw-r--r--drivers/iio/pressure/bmp280.h5
-rw-r--r--drivers/iio/pressure/ms5637.c12
-rw-r--r--drivers/iio/pressure/st_pressure_core.c4
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c3
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c33
-rw-r--r--drivers/iio/pressure/zpa2326.c12
-rw-r--r--drivers/iio/proximity/Kconfig8
-rw-r--r--drivers/iio/proximity/srf08.c227
-rw-r--r--drivers/iio/temperature/tsys01.c7
-rw-r--r--drivers/iio/trigger/Kconfig11
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/iio/trigger/stm32-lptimer-trigger.c118
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c160
-rw-r--r--drivers/infiniband/Kconfig9
-rw-r--r--drivers/infiniband/core/Makefile6
-rw-r--r--drivers/infiniband/core/addr.c12
-rw-r--r--drivers/infiniband/core/cache.c23
-rw-r--r--drivers/infiniband/core/cm.c224
-rw-r--r--drivers/infiniband/core/cma.c35
-rw-r--r--drivers/infiniband/core/core_priv.h27
-rw-r--r--drivers/infiniband/core/device.c149
-rw-r--r--drivers/infiniband/core/iwcm.c16
-rw-r--r--drivers/infiniband/core/iwpm_msg.c20
-rw-r--r--drivers/infiniband/core/iwpm_util.c15
-rw-r--r--drivers/infiniband/core/mad_rmpp.c2
-rw-r--r--drivers/infiniband/core/netlink.c321
-rw-r--r--drivers/infiniband/core/nldev.c325
-rw-r--r--drivers/infiniband/core/rdma_core.c179
-rw-r--r--drivers/infiniband/core/rdma_core.h42
-rw-r--r--drivers/infiniband/core/roce_gid_mgmt.c2
-rw-r--r--drivers/infiniband/core/rw.c24
-rw-r--r--drivers/infiniband/core/sa_query.c42
-rw-r--r--drivers/infiniband/core/sysfs.c4
-rw-r--r--drivers/infiniband/core/ucm.c2
-rw-r--r--drivers/infiniband/core/ucma.c10
-rw-r--r--drivers/infiniband/core/umem_odp.c19
-rw-r--r--drivers/infiniband/core/umem_rbtree.c4
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs.h3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c283
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c364
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c665
-rw-r--r--drivers/infiniband/core/uverbs_main.c39
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c48
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c281
-rw-r--r--drivers/infiniband/core/verbs.c176
-rw-r--r--drivers/infiniband/hw/bnxt_re/Kconfig1
-rw-r--r--drivers/infiniband/hw/bnxt_re/Makefile2
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h5
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c114
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h62
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h3
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c168
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c486
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h29
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c26
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c77
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c1
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c5
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig7
-rw-r--r--drivers/infiniband/hw/hfi1/Makefile2
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c18
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.h14
-rw-r--r--drivers/infiniband/hw/hfi1/aspm.h41
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c808
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h29
-rw-r--r--drivers/infiniband/hw/hfi1/common.h11
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c94
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c513
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c11
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.c114
-rw-r--r--drivers/infiniband/hw/hfi1/exp_rcv.h190
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c437
-rw-r--r--drivers/infiniband/hw/hfi1/firmware.c73
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h554
-rw-r--r--drivers/infiniband/hw/hfi1/init.c393
-rw-r--r--drivers/infiniband/hw/hfi1/intr.c3
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h70
-rw-r--r--drivers/infiniband/hw/hfi1/mad.c805
-rw-r--r--drivers/infiniband/hw/hfi1/mad.h5
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.c46
-rw-r--r--drivers/infiniband/hw/hfi1/mmu_rb.h5
-rw-r--r--drivers/infiniband/hw/hfi1/opa_compat.h21
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c384
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c15
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c102
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c222
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h23
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c426
-rw-r--r--drivers/infiniband/hw/hfi1/ruc.c321
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.c42
-rw-r--r--drivers/infiniband/hw/hfi1/sdma.h3
-rw-r--r--drivers/infiniband/hw/hfi1/sysfs.c4
-rw-r--r--drivers/infiniband/hw/hfi1/trace.c191
-rw-r--r--drivers/infiniband/hw/hfi1/trace.h3
-rw-r--r--drivers/infiniband/hw/hfi1/trace_ibhdrs.h456
-rw-r--r--drivers/infiniband/hw/hfi1/trace_misc.h20
-rw-r--r--drivers/infiniband/hw/hfi1/trace_mmu.h95
-rw-r--r--drivers/infiniband/hw/hfi1/trace_rx.h102
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h136
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c60
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c487
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c371
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.h58
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c626
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.h169
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c384
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.h59
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c11
-rw-r--r--drivers/infiniband/hw/hfi1/vnic.h16
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_main.c39
-rw-r--r--drivers/infiniband/hw/hfi1/vnic_sdma.c27
-rw-r--r--drivers/infiniband/hw/hns/Kconfig2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_eq.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c27
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c134
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_d.h4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_pble.c9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c8
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_status.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_type.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_uk.c14
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c22
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c7
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c4
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c8
-rw-r--r--drivers/infiniband/hw/mlx4/main.c54
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h42
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c1080
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c17
-rw-r--r--drivers/infiniband/hw/mlx4/sysfs.c2
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile2
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c20
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h4
-rw-r--r--drivers/infiniband/hw/mlx5/cong.c421
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/ib_virt.c9
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c464
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h86
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c121
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c172
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c33
-rw-r--r--drivers/infiniband/hw/mthca/mthca_av.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c7
-rw-r--r--drivers/infiniband/hw/nes/nes.c70
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c10
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c6
-rw-r--r--drivers/infiniband/hw/qedr/main.c7
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h3
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c5
-rw-r--r--drivers/infiniband/hw/qib/qib.h8
-rw-r--r--drivers/infiniband/hw/qib/qib_debugfs.c18
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c29
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c92
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c23
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c149
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c51
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ruc.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_ud.c43
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h14
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c18
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c43
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.c6
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.c15
-rw-r--r--drivers/infiniband/hw/usnic/usnic_uiom_interval_tree.h12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h2
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c20
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h37
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c44
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_misc.c7
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h17
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c9
-rw-r--r--drivers/infiniband/sw/rdmavt/ah.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/cq.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c170
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c348
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_mr.h62
-rw-r--r--drivers/infiniband/sw/rdmavt/trace_tx.h11
-rw-r--r--drivers/infiniband/sw/rdmavt/vt.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c7
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c19
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mmap.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c26
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c67
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c8
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c1
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c1
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c35
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c1
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h4
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/joystick/adi.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-serio.c2
-rw-r--r--drivers/input/joystick/iforce/iforce-usb.c2
-rw-r--r--drivers/input/joystick/magellan.c2
-rw-r--r--drivers/input/joystick/spaceball.c2
-rw-r--r--drivers/input/joystick/spaceorb.c2
-rw-r--r--drivers/input/joystick/stinger.c2
-rw-r--r--drivers/input/joystick/twidjoy.c2
-rw-r--r--drivers/input/joystick/warrior.c2
-rw-r--r--drivers/input/joystick/xpad.c36
-rw-r--r--drivers/input/joystick/zhenhua.c2
-rw-r--r--drivers/input/keyboard/atkbd.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c18
-rw-r--r--drivers/input/keyboard/hil_kbd.c2
-rw-r--r--drivers/input/keyboard/hilkbd.c10
-rw-r--r--drivers/input/keyboard/lkkbd.c2
-rw-r--r--drivers/input/keyboard/newtonkbd.c2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c15
-rw-r--r--drivers/input/keyboard/stowaway.c2
-rw-r--r--drivers/input/keyboard/sunkbd.c2
-rw-r--r--drivers/input/keyboard/tegra-kbc.c5
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c2
-rw-r--r--drivers/input/keyboard/xtkbd.c2
-rw-r--r--drivers/input/misc/Kconfig23
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ati_remote2.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c167
-rw-r--r--drivers/input/misc/dm355evm_keys.c2
-rw-r--r--drivers/input/misc/ims-pcu.c4
-rw-r--r--drivers/input/misc/keyspan_remote.c2
-rw-r--r--drivers/input/misc/pcspkr.c17
-rw-r--r--drivers/input/misc/powermate.c2
-rw-r--r--drivers/input/misc/pwm-vibra.c267
-rw-r--r--drivers/input/misc/rk805-pwrkey.c111
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c2
-rw-r--r--drivers/input/misc/twl4030-vibra.c2
-rw-r--r--drivers/input/misc/xen-kbdfront.c5
-rw-r--r--drivers/input/misc/yealink.c2
-rw-r--r--drivers/input/mouse/alps.c41
-rw-r--r--drivers/input/mouse/alps.h8
-rw-r--r--drivers/input/mouse/appletouch.c2
-rw-r--r--drivers/input/mouse/byd.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c14
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c13
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/psmouse-base.c2
-rw-r--r--drivers/input/mouse/synaptics.c35
-rw-r--r--drivers/input/mouse/synaptics_usb.c2
-rw-r--r--drivers/input/mouse/trackpoint.c7
-rw-r--r--drivers/input/mouse/trackpoint.h3
-rw-r--r--drivers/input/mousedev.c62
-rw-r--r--drivers/input/rmi4/rmi_f01.c13
-rw-r--r--drivers/input/rmi4/rmi_f34.c2
-rw-r--r--drivers/input/serio/Kconfig11
-rw-r--r--drivers/input/serio/Makefile1
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/gscps2.c10
-rw-r--r--drivers/input/serio/hp_sdc.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/serio/ps2-gpio.c453
-rw-r--r--drivers/input/serio/serio.c4
-rw-r--r--drivers/input/serio/serio_raw.c2
-rw-r--r--drivers/input/serio/xilinx_ps2.c12
-rw-r--r--drivers/input/tablet/acecad.c2
-rw-r--r--drivers/input/tablet/aiptek.c2
-rw-r--r--drivers/input/tablet/kbtab.c2
-rw-r--r--drivers/input/tablet/wacom_serial4.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c4
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c46
-rw-r--r--drivers/input/touchscreen/dynapro.c2
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c3
-rw-r--r--drivers/input/touchscreen/elants_i2c.c2
-rw-r--r--drivers/input/touchscreen/elo.c2
-rw-r--r--drivers/input/touchscreen/fujitsu_ts.c2
-rw-r--r--drivers/input/touchscreen/goodix.c9
-rw-r--r--drivers/input/touchscreen/gunze.c2
-rw-r--r--drivers/input/touchscreen/hampshire.c2
-rw-r--r--drivers/input/touchscreen/htcpen.c2
-rw-r--r--drivers/input/touchscreen/inexio.c2
-rw-r--r--drivers/input/touchscreen/mtouch.c2
-rw-r--r--drivers/input/touchscreen/mxs-lradc-ts.c8
-rw-r--r--drivers/input/touchscreen/penmount.c2
-rw-r--r--drivers/input/touchscreen/raydium_i2c_ts.c2
-rw-r--r--drivers/input/touchscreen/sun4i-ts.c2
-rw-r--r--drivers/input/touchscreen/sur40.c46
-rw-r--r--drivers/input/touchscreen/surface3_spi.c2
-rw-r--r--drivers/input/touchscreen/touchit213.c2
-rw-r--r--drivers/input/touchscreen/touchright.c2
-rw-r--r--drivers/input/touchscreen/touchwin.c2
-rw-r--r--drivers/input/touchscreen/tsc40.c2
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c4
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c2
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c341
-rw-r--r--drivers/iommu/amd_iommu_init.c257
-rw-r--r--drivers/iommu/amd_iommu_proto.h12
-rw-r--r--drivers/iommu/amd_iommu_types.h61
-rw-r--r--drivers/iommu/amd_iommu_v2.c26
-rw-r--r--drivers/iommu/arm-smmu-regs.h220
-rw-r--r--drivers/iommu/arm-smmu-v3.c7
-rw-r--r--drivers/iommu/arm-smmu.c384
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/exynos-iommu.c44
-rw-r--r--drivers/iommu/fsl_pamu.c27
-rw-r--r--drivers/iommu/fsl_pamu_domain.c28
-rw-r--r--drivers/iommu/intel-iommu.c291
-rw-r--r--drivers/iommu/intel-svm.c23
-rw-r--r--drivers/iommu/iommu-sysfs.c32
-rw-r--r--drivers/iommu/iommu.c59
-rw-r--r--drivers/iommu/iova.c183
-rw-r--r--drivers/iommu/ipmmu-vmsa.c242
-rw-r--r--drivers/iommu/msm_iommu.c15
-rw-r--r--drivers/iommu/mtk_iommu.c214
-rw-r--r--drivers/iommu/mtk_iommu.h9
-rw-r--r--drivers/iommu/of_iommu.c144
-rw-r--r--drivers/iommu/omap-iommu.c125
-rw-r--r--drivers/iommu/omap-iommu.h1
-rw-r--r--drivers/iommu/qcom_iommu.c930
-rw-r--r--drivers/iommu/rockchip-iommu.c52
-rw-r--r--drivers/iommu/s390-iommu.c37
-rw-r--r--drivers/iommu/tegra-gart.c45
-rw-r--r--drivers/iommu/tegra-smmu.c39
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c5
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c13
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.h4
-rw-r--r--drivers/irqchip/irq-atmel-aic.c14
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c4
-rw-r--r--drivers/irqchip/irq-bcm2835.c9
-rw-r--r--drivers/irqchip/irq-bcm2836.c5
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c10
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c1
-rw-r--r--drivers/irqchip/irq-crossbar.c6
-rw-r--r--drivers/irqchip/irq-digicolor.c8
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c12
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1536
-rw-r--r--drivers/irqchip/irq-gic-v3.c125
-rw-r--r--drivers/irqchip/irq-gic-v4.c225
-rw-r--r--drivers/irqchip/irq-gic.c19
-rw-r--r--drivers/irqchip/irq-hip04.c3
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c256
-rw-r--r--drivers/irqchip/irq-metag-ext.c4
-rw-r--r--drivers/irqchip/irq-mips-cpu.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c613
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c3
-rw-r--r--drivers/irqchip/irq-mxs.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c8
-rw-r--r--drivers/irqchip/irq-sun4i.c6
-rw-r--r--drivers/irqchip/irq-tegra.c16
-rw-r--r--drivers/irqchip/irq-uniphier-aidet.c261
-rw-r--r--drivers/irqchip/irq-xilinx-intc.c4
-rw-r--r--drivers/irqchip/irq-xtensa-mx.c6
-rw-r--r--drivers/isdn/capi/kcapi.c2
-rw-r--r--drivers/isdn/hardware/eicon/divacapi.h16
-rw-r--r--drivers/isdn/hardware/eicon/message.c247
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.h2
-rw-r--r--drivers/isdn/hisax/hfc_usb.c2
-rw-r--r--drivers/isdn/isdnloop/isdnloop.c2
-rw-r--r--drivers/isdn/mISDN/fsm.c5
-rw-r--r--drivers/isdn/mISDN/fsm.h2
-rw-r--r--drivers/isdn/mISDN/layer1.c3
-rw-r--r--drivers/isdn/mISDN/layer2.c15
-rw-r--r--drivers/isdn/mISDN/tei.c20
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-aat1290.c10
-rw-r--r--drivers/leds/leds-as3645a.c763
-rw-r--r--drivers/leds/leds-blinkm.c2
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-gpio.c7
-rw-r--r--drivers/leds/leds-is31fl32xx.c4
-rw-r--r--drivers/leds/leds-lm3533.c2
-rw-r--r--drivers/leds/leds-lp5521.c8
-rw-r--r--drivers/leds/leds-lp5562.c10
-rw-r--r--drivers/leds/leds-lp8501.c6
-rw-r--r--drivers/leds/leds-max77693.c4
-rw-r--r--drivers/leds/leds-pca955x.c350
-rw-r--r--drivers/leds/leds-powernv.c6
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-tlc591xx.c11
-rw-r--r--drivers/lguest/Kconfig13
-rw-r--r--drivers/lguest/Makefile26
-rw-r--r--drivers/lguest/README47
-rw-r--r--drivers/lguest/core.c398
-rw-r--r--drivers/lguest/hypercalls.c304
-rw-r--r--drivers/lguest/interrupts_and_traps.c706
-rw-r--r--drivers/lguest/lg.h258
-rw-r--r--drivers/lguest/lguest_user.c446
-rw-r--r--drivers/lguest/page_tables.c1239
-rw-r--r--drivers/lguest/segments.c228
-rw-r--r--drivers/lguest/x86/core.c724
-rw-r--r--drivers/lguest/x86/switcher_32.S388
-rw-r--r--drivers/macintosh/macio_sysfs.c2
-rw-r--r--drivers/macintosh/rack-meter.c14
-rw-r--r--drivers/macintosh/smu.c8
-rw-r--r--drivers/macintosh/via-cuda.c4
-rw-r--r--drivers/macintosh/windfarm_cpufreq_clamp.c2
-rw-r--r--drivers/macintosh/windfarm_fcu_controls.c4
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_lm87_sensor.c6
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c2
-rw-r--r--drivers/macintosh/windfarm_rm31.c4
-rw-r--r--drivers/macintosh/windfarm_smu_controls.c2
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sensors.c10
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c252
-rw-r--r--drivers/mailbox/pcc.c4
-rw-r--r--drivers/mcb/mcb-core.c20
-rw-r--r--drivers/mcb/mcb-lpc.c15
-rw-r--r--drivers/mcb/mcb-parse.c6
-rw-r--r--drivers/md/bcache/alloc.c4
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/closure.c15
-rw-r--r--drivers/md/bcache/closure.h4
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c2
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/request.c33
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/sysfs.c19
-rw-r--r--drivers/md/bcache/util.c50
-rw-r--r--drivers/md/bcache/writeback.c25
-rw-r--r--drivers/md/bcache/writeback.h21
-rw-r--r--drivers/md/bitmap.c9
-rw-r--r--drivers/md/dm-bio-record.h9
-rw-r--r--drivers/md/dm-bufio.c97
-rw-r--r--drivers/md/dm-bufio.h9
-rw-r--r--drivers/md/dm-cache-target.c8
-rw-r--r--drivers/md/dm-crypt.c20
-rw-r--r--drivers/md/dm-delay.c4
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-flakey.c6
-rw-r--r--drivers/md/dm-integrity.c53
-rw-r--r--drivers/md/dm-io.c2
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-linear.c17
-rw-r--r--drivers/md/dm-log-writes.c52
-rw-r--r--drivers/md/dm-mpath.c19
-rw-r--r--drivers/md/dm-raid1.c12
-rw-r--r--drivers/md/dm-rq.c27
-rw-r--r--drivers/md/dm-rq.h1
-rw-r--r--drivers/md/dm-snap.c16
-rw-r--r--drivers/md/dm-stripe.c30
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/dm-thin.c8
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm-zoned-metadata.c6
-rw-r--r--drivers/md/dm-zoned-target.c4
-rw-r--r--drivers/md/dm.c47
-rw-r--r--drivers/md/faulty.c4
-rw-r--r--drivers/md/linear.c6
-rw-r--r--drivers/md/md.c35
-rw-r--r--drivers/md/md.h10
-rw-r--r--drivers/md/multipath.c8
-rw-r--r--drivers/md/raid0.c11
-rw-r--r--drivers/md/raid1.c56
-rw-r--r--drivers/md/raid10.c85
-rw-r--r--drivers/md/raid5-cache.c79
-rw-r--r--drivers/md/raid5-ppl.c177
-rw-r--r--drivers/md/raid5.c45
-rw-r--r--drivers/media/Kconfig20
-rw-r--r--drivers/media/cec/Makefile4
-rw-r--r--drivers/media/cec/cec-adap.c281
-rw-r--r--drivers/media/cec/cec-api.c92
-rw-r--r--drivers/media/cec/cec-core.c27
-rw-r--r--drivers/media/cec/cec-pin.c802
-rw-r--r--drivers/media/common/saa7146/saa7146_i2c.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_vbi.c2
-rw-r--r--drivers/media/common/saa7146/saa7146_video.c2
-rw-r--r--drivers/media/common/siano/smsir.c6
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-colors.c150
-rw-r--r--drivers/media/common/v4l2-tpg/v4l2-tpg-core.c2
-rw-r--r--drivers/media/dvb-core/demux.h2
-rw-r--r--drivers/media/dvb-core/dmxdev.c24
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h1
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.c945
-rw-r--r--drivers/media/dvb-core/dvb_ca_en50221.h10
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c15
-rw-r--r--drivers/media/dvb-frontends/Kconfig27
-rw-r--r--drivers/media/dvb-frontends/Makefile3
-rw-r--r--drivers/media/dvb-frontends/cx24123.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2841er.c75
-rw-r--r--drivers/media/dvb-frontends/dib0090.c11
-rw-r--r--drivers/media/dvb-frontends/dib7000p.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c20
-rw-r--r--drivers/media/dvb-frontends/dib8000.h1
-rw-r--r--drivers/media/dvb-frontends/dib9000.c22
-rw-r--r--drivers/media/dvb-frontends/dib9000.h7
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drxj.c35
-rw-r--r--drivers/media/dvb-frontends/drxd_hard.c6
-rw-r--r--drivers/media/dvb-frontends/isl6421.c76
-rw-r--r--drivers/media/dvb-frontends/mb86a16.c25
-rw-r--r--drivers/media/dvb-frontends/mn88472.c4
-rw-r--r--drivers/media/dvb-frontends/mn88473.c4
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c1873
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.h41
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h731
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_regs.h367
-rw-r--r--drivers/media/dvb-frontends/s5h1420.c2
-rw-r--r--drivers/media/dvb-frontends/stv0367.c156
-rw-r--r--drivers/media/dvb-frontends/stv0910.c1813
-rw-r--r--drivers/media/dvb-frontends/stv0910.h32
-rw-r--r--drivers/media/dvb-frontends/stv0910_regs.h4760
-rw-r--r--drivers/media/dvb-frontends/stv6111.c681
-rw-r--r--drivers/media/dvb-frontends/stv6111.h21
-rw-r--r--drivers/media/dvb-frontends/zd1301_demod.c2
-rw-r--r--drivers/media/i2c/Kconfig36
-rw-r--r--drivers/media/i2c/Makefile3
-rw-r--r--drivers/media/i2c/ad9389b.c2
-rw-r--r--drivers/media/i2c/adv7180.c2
-rw-r--r--drivers/media/i2c/adv748x/Makefile7
-rw-r--r--drivers/media/i2c/adv748x/adv748x-afe.c552
-rw-r--r--drivers/media/i2c/adv748x/adv748x-core.c833
-rw-r--r--drivers/media/i2c/adv748x/adv748x-csi2.c326
-rw-r--r--drivers/media/i2c/adv748x/adv748x-hdmi.c768
-rw-r--r--drivers/media/i2c/adv748x/adv748x.h425
-rw-r--r--drivers/media/i2c/adv7511.c5
-rw-r--r--drivers/media/i2c/adv7604.c7
-rw-r--r--drivers/media/i2c/adv7842.c5
-rw-r--r--drivers/media/i2c/dw9714.c26
-rw-r--r--drivers/media/i2c/et8ek8/et8ek8_driver.c26
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c59
-rw-r--r--drivers/media/i2c/m5mols/m5mols_core.c2
-rw-r--r--drivers/media/i2c/max2175.c2
-rw-r--r--drivers/media/i2c/mt9m111.c6
-rw-r--r--drivers/media/i2c/mt9t001.c8
-rw-r--r--drivers/media/i2c/ov13858.c101
-rw-r--r--drivers/media/i2c/ov5640.c3
-rw-r--r--drivers/media/i2c/ov5645.c49
-rw-r--r--drivers/media/i2c/ov5670.c2601
-rw-r--r--drivers/media/i2c/ov6650.c (renamed from drivers/media/i2c/soc_camera/ov6650.c)77
-rw-r--r--drivers/media/i2c/ov7670.c6
-rw-r--r--drivers/media/i2c/ov9650.c67
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c3
-rw-r--r--drivers/media/i2c/s5k5baf.c9
-rw-r--r--drivers/media/i2c/saa7127.c2
-rw-r--r--drivers/media/i2c/saa717x.c2
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c16
-rw-r--r--drivers/media/i2c/smiapp/smiapp-quirk.c8
-rw-r--r--drivers/media/i2c/soc_camera/Kconfig6
-rw-r--r--drivers/media/i2c/soc_camera/Makefile1
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c2
-rw-r--r--drivers/media/i2c/tc358743.c2
-rw-r--r--drivers/media/i2c/ths8200.c2
-rw-r--r--drivers/media/i2c/vs6624.c2
-rw-r--r--drivers/media/media-device.c16
-rw-r--r--drivers/media/media-entity.c2
-rw-r--r--drivers/media/pci/b2c2/flexcop-pci.c2
-rw-r--r--drivers/media/pci/bt8xx/bt878.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-i2c.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-input.c18
-rw-r--r--drivers/media/pci/bt8xx/dst_ca.c70
-rw-r--r--drivers/media/pci/cobalt/cobalt-alsa-pcm.c4
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c2
-rw-r--r--drivers/media/pci/cobalt/cobalt-i2c.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-mixer.c2
-rw-r--r--drivers/media/pci/cx18/cx18-alsa-pcm.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-i2c.c8
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c10
-rw-r--r--drivers/media/pci/cx23885/cx23885-i2c.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-input.c16
-rw-r--r--drivers/media/pci/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821-audio-upstream.c13
-rw-r--r--drivers/media/pci/cx25821/cx25821-core.c5
-rw-r--r--drivers/media/pci/cx25821/cx25821-i2c.c2
-rw-r--r--drivers/media/pci/cx25821/cx25821.h2
-rw-r--r--drivers/media/pci/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/pci/cx88/cx88-blackbird.c2
-rw-r--r--drivers/media/pci/cx88/cx88-input.c30
-rw-r--r--drivers/media/pci/ddbridge/Kconfig21
-rw-r--r--drivers/media/pci/ddbridge/Makefile3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-core.c4070
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.c376
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-hw.h43
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.c230
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-i2c.h112
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-io.h71
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c346
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-maxs8.c444
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-maxs8.h29
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-regs.h159
-rw-r--r--drivers/media/pci/ddbridge/ddbridge.h341
-rw-r--r--drivers/media/pci/dm1105/dm1105.c8
-rw-r--r--drivers/media/pci/dt3155/dt3155.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-mixer.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-alsa-pcm.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-i2c.c18
-rw-r--r--drivers/media/pci/mantis/hopper_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_cards.c2
-rw-r--r--drivers/media/pci/mantis/mantis_common.h2
-rw-r--r--drivers/media/pci/mantis/mantis_i2c.c2
-rw-r--r--drivers/media/pci/mantis/mantis_input.c6
-rw-r--r--drivers/media/pci/meye/meye.c4
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c2
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c2
-rw-r--r--drivers/media/pci/ngene/ngene-i2c.c2
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/pt3/pt3.c11
-rw-r--r--drivers/media/pci/saa7134/saa7134-alsa.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-empress.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-i2c.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-input.c81
-rw-r--r--drivers/media/pci/saa7146/hexium_gemini.c2
-rw-r--r--drivers/media/pci/saa7146/hexium_orion.c2
-rw-r--r--drivers/media/pci/saa7146/mxb.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-i2c.c2
-rw-r--r--drivers/media/pci/smipcie/smipcie-ir.c6
-rw-r--r--drivers/media/pci/smipcie/smipcie.h2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-g723.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-gpio.c97
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-tw28.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-v4l2.c2
-rw-r--r--drivers/media/pci/solo6x10/solo6x10.h5
-rw-r--r--drivers/media/pci/sta2x11/sta2x11_vip.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.c2
-rw-r--r--drivers/media/pci/ttpci/av7110.h2
-rw-r--r--drivers/media/pci/ttpci/av7110_ca.c12
-rw-r--r--drivers/media/pci/ttpci/av7110_v4l.c2
-rw-r--r--drivers/media/pci/ttpci/budget-av.c2
-rw-r--r--drivers/media/pci/ttpci/budget-ci.c9
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c2
-rw-r--r--drivers/media/pci/ttpci/budget.c2
-rw-r--r--drivers/media/pci/tw68/tw68-video.c2
-rw-r--r--drivers/media/pci/zoran/zoran_card.c2
-rw-r--r--drivers/media/platform/Kconfig21
-rw-r--r--drivers/media/platform/Makefile4
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c4
-rw-r--r--drivers/media/platform/atmel/atmel-isc.c6
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c4
-rw-r--r--drivers/media/platform/coda/coda-bit.c29
-rw-r--r--drivers/media/platform/coda/coda-common.c78
-rw-r--r--drivers/media/platform/coda/coda_regs.h1
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c2
-rw-r--r--drivers/media/platform/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c4
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c2
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c2
-rw-r--r--drivers/media/platform/davinci/vpfe_capture.c2
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c28
-rw-r--r--drivers/media/platform/davinci/vpif_display.c2
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c8
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-m2m.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c8
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/fsl-viu.c6
-rw-r--r--drivers/media/platform/m2m-deinterlace.c4
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c4
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c2
-rw-r--r--drivers/media/platform/meson/Makefile1
-rw-r--r--drivers/media/platform/meson/ao-cec.c744
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_comp.c10
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_core.c8
-rw-r--r--drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c37
-rw-r--r--drivers/media/platform/mx2_emmaprp.c6
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c3
-rw-r--r--drivers/media/platform/omap3isp/isp.c161
-rw-r--r--drivers/media/platform/omap3isp/isp.h4
-rw-r--r--drivers/media/platform/omap3isp/ispccdc.c22
-rw-r--r--drivers/media/platform/omap3isp/ispccp2.c18
-rw-r--r--drivers/media/platform/omap3isp/ispcsi2.c6
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.c91
-rw-r--r--drivers/media/platform/omap3isp/ispcsiphy.h7
-rw-r--r--drivers/media/platform/omap3isp/ispreg.h4
-rw-r--r--drivers/media/platform/omap3isp/omap3isp.h6
-rw-r--r--drivers/media/platform/pxa_camera.c9
-rw-r--r--drivers/media/platform/qcom/camss-8x16/Makefile11
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csid.c1092
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csid.h82
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csiphy.c890
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-csiphy.h77
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-ispif.c1175
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-ispif.h85
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.c3088
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-vfe.h123
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.c860
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss-video.h70
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.c746
-rw-r--r--drivers/media/platform/qcom/camss-8x16/camss.h106
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c51
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h1
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c31
-rw-r--r--drivers/media/platform/qcom/venus/venc.c47
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c4
-rw-r--r--drivers/media/platform/rcar_fdp1.c2
-rw-r--r--drivers/media/platform/rcar_jpu.c2
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c1
-rw-r--r--drivers/media/platform/s5p-cec/s5p_cec.c7
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c200
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.h8
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-hw-exynos4.c9
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-regs.h2
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c5
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c8
-rw-r--r--drivers/media/platform/soc_camera/soc_mediabus.c3
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c2
-rw-r--r--drivers/media/platform/sti/cec/stih-cec.c4
-rw-r--r--drivers/media/platform/sti/delta/delta-v4l2.c6
-rw-r--r--drivers/media/platform/stm32/stm32-cec.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c495
-rw-r--r--drivers/media/platform/ti-vpe/cal.c4
-rw-r--r--drivers/media/platform/ti-vpe/vpe.c4
-rw-r--r--drivers/media/platform/via-camera.c2
-rw-r--r--drivers/media/platform/video-mux.c53
-rw-r--r--drivers/media/platform/vim2m.c4
-rw-r--r--drivers/media/platform/vimc/vimc-debayer.c2
-rw-r--r--drivers/media/platform/vimc/vimc-scaler.c2
-rw-r--r--drivers/media/platform/vimc/vimc-sensor.c2
-rw-r--r--drivers/media/platform/vivid/vivid-cec.c66
-rw-r--r--drivers/media/platform/vivid/vivid-core.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1.h7
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.c45
-rw-r--r--drivers/media/platform/vsp1/vsp1_bru.h4
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.c205
-rw-r--r--drivers/media/platform/vsp1/vsp1_dl.h1
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c286
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.h38
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c115
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c40
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h12
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c5
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h2
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.c49
-rw-r--r--drivers/media/platform/vsp1/vsp1_pipe.h48
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h60
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c27
-rw-r--r--drivers/media/platform/vsp1/vsp1_sru.c26
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c57
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c251
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c28
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c52
-rw-r--r--drivers/media/radio/dsbr100.c2
-rw-r--r--drivers/media/radio/radio-cadet.c2
-rw-r--r--drivers/media/radio/radio-gemtek.c2
-rw-r--r--drivers/media/radio/radio-keene.c2
-rw-r--r--drivers/media/radio/radio-ma901.c2
-rw-r--r--drivers/media/radio/radio-maxiradio.c2
-rw-r--r--drivers/media/radio/radio-mr800.c2
-rw-r--r--drivers/media/radio/radio-raremono.c2
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-shark.c2
-rw-r--r--drivers/media/radio/radio-shark2.c2
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/radio/si4713/radio-platform-si4713.c2
-rw-r--r--drivers/media/radio/si4713/radio-usb-si4713.c4
-rw-r--r--drivers/media/radio/wl128x/fmdrv_v4l2.c2
-rw-r--r--drivers/media/rc/Kconfig53
-rw-r--r--drivers/media/rc/Makefile3
-rw-r--r--drivers/media/rc/ati_remote.c7
-rw-r--r--drivers/media/rc/ene_ir.c6
-rw-r--r--drivers/media/rc/fintek-cir.c4
-rw-r--r--drivers/media/rc/gpio-ir-recv.c31
-rw-r--r--drivers/media/rc/gpio-ir-tx.c176
-rw-r--r--drivers/media/rc/igorplugusb.c11
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-hw.h4
-rw-r--r--drivers/media/rc/img-ir/img-ir-jvc.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-nec.c20
-rw-r--r--drivers/media/rc/img-ir/img-ir-raw.c6
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc5.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-rc6.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sanyo.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sharp.c4
-rw-r--r--drivers/media/rc/img-ir/img-ir-sony.c27
-rw-r--r--drivers/media/rc/imon.c55
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/ir-jvc-decoder.c6
-rw-r--r--drivers/media/rc/ir-mce_kbd-decoder.c12
-rw-r--r--drivers/media/rc/ir-nec-decoder.c57
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c25
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c30
-rw-r--r--drivers/media/rc/ir-sanyo-decoder.c16
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c6
-rw-r--r--drivers/media/rc/ir-sony-decoder.c23
-rw-r--r--drivers/media/rc/ir-spi.c1
-rw-r--r--drivers/media/rc/ir-xmp-decoder.c4
-rw-r--r--drivers/media/rc/ite-cir.c4
-rw-r--r--drivers/media/rc/keymaps/Makefile3
-rw-r--r--drivers/media/rc/keymaps/rc-adstech-dvb-t-pci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-alink-dtu-m.c8
-rw-r--r--drivers/media/rc/keymaps/rc-anysee.c8
-rw-r--r--drivers/media/rc/keymaps/rc-apac-viewcomp.c8
-rw-r--r--drivers/media/rc/keymaps/rc-asus-pc39.c8
-rw-r--r--drivers/media/rc/keymaps/rc-asus-ps3-100.c8
-rw-r--r--drivers/media/rc/keymaps/rc-ati-tv-wonder-hd-600.c8
-rw-r--r--drivers/media/rc/keymaps/rc-ati-x10.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-a16d.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-cardbus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-dvbt.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m135a.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-m733a-rm-k6.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia-rm-ks.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avermedia.c8
-rw-r--r--drivers/media/rc/keymaps/rc-avertv-303.c8
-rw-r--r--drivers/media/rc/keymaps/rc-azurewave-ad-tu700.c8
-rw-r--r--drivers/media/rc/keymaps/rc-behold-columbus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-behold.c8
-rw-r--r--drivers/media/rc/keymaps/rc-budget-ci-old.c8
-rw-r--r--drivers/media/rc/keymaps/rc-cec.c2
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy-1400.c8
-rw-r--r--drivers/media/rc/keymaps/rc-cinergy.c8
-rw-r--r--drivers/media/rc/keymaps/rc-d680-dmb.c8
-rw-r--r--drivers/media/rc/keymaps/rc-delock-61959.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dib0700-rc5.c8
-rw-r--r--drivers/media/rc/keymaps/rc-digitalnow-tinytwin.c8
-rw-r--r--drivers/media/rc/keymaps/rc-digittrade.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dm1105-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvb-t.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dntv-live-dvbt-pro.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dtt200u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvbsky.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-dvico-portable.c8
-rw-r--r--drivers/media/rc/keymaps/rc-em-terratec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv-fm53.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-encore-enltv2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-evga-indtube.c8
-rw-r--r--drivers/media/rc/keymaps/rc-eztv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-flydvb.c8
-rw-r--r--drivers/media/rc/keymaps/rc-flyvideo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-fusionhdtv-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-gadmei-rm008z.c8
-rw-r--r--drivers/media/rc/keymaps/rc-geekbox.c8
-rw-r--r--drivers/media/rc/keymaps/rc-genius-tvgo-a11mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-gotview7135.c8
-rw-r--r--drivers/media/rc/keymaps/rc-hauppauge.c8
-rw-r--r--drivers/media/rc/keymaps/rc-imon-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-imon-pad.c8
-rw-r--r--drivers/media/rc/keymaps/rc-iodata-bctv7e.c8
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v1.c8
-rw-r--r--drivers/media/rc/keymaps/rc-it913x-v2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kaiomy.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-315u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-pc150u.c8
-rw-r--r--drivers/media/rc/keymaps/rc-kworld-plus-tv-analog.c8
-rw-r--r--drivers/media/rc/keymaps/rc-leadtek-y04g0051.c8
-rw-r--r--drivers/media/rc/keymaps/rc-lme2510.c8
-rw-r--r--drivers/media/rc/keymaps/rc-manli.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-digitainer.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10-or2x.c8
-rw-r--r--drivers/media/rc/keymaps/rc-medion-x10.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-ii.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-digivox-iii.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere-plus.c8
-rw-r--r--drivers/media/rc/keymaps/rc-msi-tvanywhere.c8
-rw-r--r--drivers/media/rc/keymaps/rc-nebula.c8
-rw-r--r--drivers/media/rc/keymaps/rc-nec-terratec-cinergy-xs.c8
-rw-r--r--drivers/media/rc/keymaps/rc-norwood.c8
-rw-r--r--drivers/media/rc/keymaps/rc-npgtech.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pctv-sedna.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-color.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-grey.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-002t.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-mk12.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview-new.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pixelview.c8
-rw-r--r--drivers/media/rc/keymaps/rc-powercolor-real-angel.c8
-rw-r--r--drivers/media/rc/keymaps/rc-proteus-2309.c8
-rw-r--r--drivers/media/rc/keymaps/rc-purpletv.c8
-rw-r--r--drivers/media/rc/keymaps/rc-pv951.c8
-rw-r--r--drivers/media/rc/keymaps/rc-rc6-mce.c8
-rw-r--r--drivers/media/rc/keymaps/rc-real-audio-220-32-keys.c8
-rw-r--r--drivers/media/rc/keymaps/rc-reddo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-snapstream-firefly.c8
-rw-r--r--drivers/media/rc/keymaps/rc-streamzap.c8
-rw-r--r--drivers/media/rc/keymaps/rc-su3000.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tbs-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-ts35.c8
-rw-r--r--drivers/media/rc/keymaps/rc-technisat-usb2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-c-pci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-s2-hd.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-cinergy-xs.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim-2.c8
-rw-r--r--drivers/media/rc/keymaps/rc-terratec-slim.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tevii-nec.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tivo.c8
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand-02.c8
-rw-r--r--drivers/media/rc/keymaps/rc-total-media-in-hand.c8
-rw-r--r--drivers/media/rc/keymaps/rc-trekstor.c8
-rw-r--r--drivers/media/rc/keymaps/rc-tt-1500.c8
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan-dtv-cab-ci.c8
-rw-r--r--drivers/media/rc/keymaps/rc-twinhan1027.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-m1f.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-s350.c8
-rw-r--r--drivers/media/rc/keymaps/rc-videomate-tv-pvr.c8
-rw-r--r--drivers/media/rc/keymaps/rc-winfast-usbii-deluxe.c8
-rw-r--r--drivers/media/rc/keymaps/rc-winfast.c8
-rw-r--r--drivers/media/rc/keymaps/rc-zx-irdec.c79
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/mceusb.c40
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c246
-rw-r--r--drivers/media/rc/nuvoton-cir.c120
-rw-r--r--drivers/media/rc/nuvoton-cir.h24
-rw-r--r--drivers/media/rc/pwm-ir-tx.c138
-rw-r--r--drivers/media/rc/rc-core-priv.h5
-rw-r--r--drivers/media/rc/rc-ir-raw.c68
-rw-r--r--drivers/media/rc/rc-loopback.c6
-rw-r--r--drivers/media/rc/rc-main.c265
-rw-r--r--drivers/media/rc/redrat3.c4
-rw-r--r--drivers/media/rc/serial_ir.c46
-rw-r--r--drivers/media/rc/sir_ir.c6
-rw-r--r--drivers/media/rc/st_rc.c6
-rw-r--r--drivers/media/rc/streamzap.c4
-rw-r--r--drivers/media/rc/sunxi-cir.c6
-rw-r--r--drivers/media/rc/ttusbir.c4
-rw-r--r--drivers/media/rc/winbond-cir.c37
-rw-r--r--drivers/media/rc/zx-irdec.c184
-rw-r--r--drivers/media/tuners/fc0012.c2
-rw-r--r--drivers/media/tuners/fc0013.c2
-rw-r--r--drivers/media/tuners/tda18271-maps.c4
-rw-r--r--drivers/media/tuners/tuner-simple.c2
-rw-r--r--drivers/media/usb/airspy/airspy.c4
-rw-r--r--drivers/media/usb/as102/as102_usb_drv.c2
-rw-r--r--drivers/media/usb/au0828/Kconfig1
-rw-r--r--drivers/media/usb/au0828/au0828-core.c2
-rw-r--r--drivers/media/usb/au0828/au0828-i2c.c4
-rw-r--r--drivers/media/usb/au0828/au0828-input.c6
-rw-r--r--drivers/media/usb/au0828/au0828-video.c2
-rw-r--r--drivers/media/usb/b2c2/flexcop-usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_usb.c2
-rw-r--r--drivers/media/usb/cpia2/cpia2_v4l.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-417.c4
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-audio.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-core.c3
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-i2c.c10
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-input.c6
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-video.c2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9015.c11
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c16
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/az6007.c13
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvbsky.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/rtl28xxu.c13
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c30
-rw-r--r--drivers/media/usb/dvb-usb/dib0700.h2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_core.c28
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c152
-rw-r--r--drivers/media/usb/dvb-usb/dtt200u.c12
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb-remote.c2
-rw-r--r--drivers/media/usb/dvb-usb/dvb-usb.h2
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c74
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c4
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c6
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/dvb-usb/ttusb2.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-audio.c4
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c126
-rw-r--r--drivers/media/usb/go7007/go7007-v4l2.c4
-rw-r--r--drivers/media/usb/go7007/snd-go7007.c2
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/xirlink_cit.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c4
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-i2c.c5
-rw-r--r--drivers/media/usb/msi2500/msi2500.c4
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c7
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-encoder.c6
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c14
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/rainshadow-cec/rainshadow-cec.c7
-rw-r--r--drivers/media/usb/s2255/s2255drv.c4
-rw-r--r--drivers/media/usb/stk1160/stk1160-core.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-i2c.c2
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c6
-rw-r--r--drivers/media/usb/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-cards.c2
-rw-r--r--drivers/media/usb/tm6000/tm6000-input.c40
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c4
-rw-r--r--drivers/media/usb/ttusb-budget/dvb-ttusb-budget.c4
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-audio.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-core.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv-video.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-i2c.c11
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c19
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c7
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c28
-rw-r--r--drivers/media/usb/uvc/uvc_entity.c2
-rw-r--r--drivers/media/usb/uvc/uvc_queue.c9
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h4
-rw-r--r--drivers/media/usb/zr364xx/zr364xx.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c7
-rw-r--r--drivers/media/v4l2-core/v4l2-clk.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c13
-rw-r--r--drivers/media/v4l2-core/v4l2-flash-led-class.c139
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c139
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c12
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c27
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c5
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c8
-rw-r--r--drivers/media/v4l2-core/videobuf2-vmalloc.c8
-rw-r--r--drivers/memory/atmel-ebi.c40
-rw-r--r--drivers/memory/jz4780-nemc.c12
-rw-r--r--drivers/memory/mtk-smi.c96
-rw-r--r--drivers/memory/mvebu-devbus.c12
-rw-r--r--drivers/memory/omap-gpmc.c16
-rw-r--r--drivers/message/fusion/mptbase.c8
-rw-r--r--drivers/message/fusion/mptfc.c10
-rw-r--r--drivers/message/fusion/mptsas.c86
-rw-r--r--drivers/mfd/Kconfig57
-rw-r--r--drivers/mfd/Makefile3
-rw-r--r--drivers/mfd/ab8500-core.c6
-rw-r--r--drivers/mfd/atmel-smc.c69
-rw-r--r--drivers/mfd/axp20x-rsb.c1
-rw-r--r--drivers/mfd/axp20x.c32
-rw-r--r--drivers/mfd/bd9571mwv.c230
-rw-r--r--drivers/mfd/da9052-core.c28
-rw-r--r--drivers/mfd/da9052-spi.c2
-rw-r--r--drivers/mfd/da9055-i2c.c2
-rw-r--r--drivers/mfd/da9062-core.c6
-rw-r--r--drivers/mfd/db8500-prcmu.c62
-rw-r--r--drivers/mfd/dm355evm_msp.c2
-rw-r--r--drivers/mfd/hi6421-pmic-core.c89
-rw-r--r--drivers/mfd/intel-lpss-pci.c1
-rw-r--r--drivers/mfd/intel-lpss.c8
-rw-r--r--drivers/mfd/intel_soc_pmic_core.c34
-rw-r--r--drivers/mfd/intel_soc_pmic_core.h3
-rw-r--r--drivers/mfd/intel_soc_pmic_crc.c27
-rw-r--r--drivers/mfd/kempld-core.c2
-rw-r--r--drivers/mfd/lp87565.c7
-rw-r--r--drivers/mfd/lpc_ich.c10
-rw-r--r--drivers/mfd/max8925-i2c.c2
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/retu-mfd.c12
-rw-r--r--drivers/mfd/rk808.c147
-rw-r--r--drivers/mfd/rtsx_pcr.c4
-rw-r--r--drivers/mfd/stm32-lptimer.c107
-rw-r--r--drivers/mfd/t7l66xb.c17
-rw-r--r--drivers/mfd/tps6105x.c8
-rw-r--r--drivers/mfd/tps65010.c2
-rw-r--r--drivers/mfd/tps68470.c106
-rw-r--r--drivers/mfd/twl-core.c10
-rw-r--r--drivers/mfd/twl4030-audio.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mfd/twl4030-power.c2
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/apds9802als.c4
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/aspeed-lpc-snoop.c34
-rw-r--r--drivers/misc/bh1770glc.c2
-rw-r--r--drivers/misc/cxl/api.c4
-rw-r--r--drivers/misc/cxl/fault.c16
-rw-r--r--drivers/misc/cxl/file.c8
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/ds1682.c2
-rw-r--r--drivers/misc/eeprom/eeprom.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c24
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c126
-rw-r--r--drivers/misc/eeprom/max6875.c2
-rw-r--r--drivers/misc/hmc6352.c2
-rw-r--r--drivers/misc/hpilo.c2
-rw-r--r--drivers/misc/ioc4.c2
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/lkdtm.h30
-rw-r--r--drivers/misc/lkdtm_bugs.c134
-rw-r--r--drivers/misc/lkdtm_core.c28
-rw-r--r--drivers/misc/lkdtm_refcount.c400
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/mei/hw-me.c45
-rw-r--r--drivers/misc/mei/hw-me.h39
-rw-r--r--drivers/misc/mei/pci-me.c109
-rw-r--r--drivers/misc/mic/scif/scif_dma.c11
-rw-r--r--drivers/misc/pch_phub.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c132
-rw-r--r--drivers/misc/sgi-gru/grutlbpurge.c12
-rw-r--r--drivers/misc/sram.c12
-rw-r--r--drivers/misc/ti-st/st_kim.c2
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c10
-rw-r--r--drivers/mmc/Kconfig7
-rw-r--r--drivers/mmc/Makefile2
-rw-r--r--drivers/mmc/core/block.c326
-rw-r--r--drivers/mmc/core/core.c68
-rw-r--r--drivers/mmc/core/core.h6
-rw-r--r--drivers/mmc/core/debugfs.c89
-rw-r--r--drivers/mmc/core/host.c6
-rw-r--r--drivers/mmc/core/host.h1
-rw-r--r--drivers/mmc/core/mmc.c31
-rw-r--r--drivers/mmc/core/mmc_ops.c3
-rw-r--r--drivers/mmc/core/mmc_test.c97
-rw-r--r--drivers/mmc/core/queue.c7
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/core/sd.c12
-rw-r--r--drivers/mmc/host/Kconfig34
-rw-r--r--drivers/mmc/host/Makefile13
-rw-r--r--drivers/mmc/host/android-goldfish.c8
-rw-r--r--drivers/mmc/host/atmel-mci.c10
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/cavium-octeon.c13
-rw-r--r--drivers/mmc/host/cavium-thunderx.c6
-rw-r--r--drivers/mmc/host/cavium.c6
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/dw_mmc-k3.c298
-rw-r--r--drivers/mmc/host/dw_mmc.c62
-rw-r--r--drivers/mmc/host/dw_mmc.h4
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c714
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/mtk-sd.c3
-rw-r--r--drivers/mmc/host/mxcmmc.c34
-rw-r--r--drivers/mmc/host/of_mmc_spi.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c8
-rw-r--r--drivers/mmc/host/renesas_sdhi.h2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c31
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c287
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c34
-rw-r--r--drivers/mmc/host/rtsx_usb_sdmmc.c2
-rw-r--r--drivers/mmc/host/s3cmci.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c7
-rw-r--r--drivers/mmc/host/sdhci-bcm-kona.c4
-rw-r--r--drivers/mmc/host/sdhci-brcmstb.c39
-rw-r--r--drivers/mmc/host/sdhci-cadence.c88
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h3
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c8
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c177
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c23
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c50
-rw-r--r--drivers/mmc/host/sdhci-pic32.c2
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c28
-rw-r--r--drivers/mmc/host/sdhci-pltfm.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c30
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c2
-rw-r--r--drivers/mmc/host/sdhci-s3c.c34
-rw-r--r--drivers/mmc/host/sdhci-sirf.c43
-rw-r--r--drivers/mmc/host/sdhci-st.c28
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci-xenon-phy.c34
-rw-r--r--drivers/mmc/host/sdhci-xenon.c121
-rw-r--r--drivers/mmc/host/sdhci-xenon.h2
-rw-r--r--drivers/mmc/host/sdhci.c94
-rw-r--r--drivers/mmc/host/sdhci.h5
-rw-r--r--drivers/mmc/host/sdricoh_cs.c2
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mmc/host/sunxi-mmc.c95
-rw-r--r--drivers/mmc/host/tmio_mmc.h9
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c32
-rw-r--r--drivers/mmc/host/toshsd.c2
-rw-r--r--drivers/mmc/host/usdhi6rol0.c2
-rw-r--r--drivers/mmc/host/via-sdmmc.c2
-rw-r--r--drivers/mmc/host/vub300.c4
-rw-r--r--drivers/mmc/host/wbsd.c2
-rw-r--r--drivers/mmc/host/wmt-sdmmc.c8
-rw-r--r--drivers/mtd/devices/docg3.c49
-rw-r--r--drivers/mtd/devices/docg3.h2
-rw-r--r--drivers/mtd/devices/spear_smi.c2
-rw-r--r--drivers/mtd/devices/st_spi_fsm.c20
-rw-r--r--drivers/mtd/inftlcore.c2
-rw-r--r--drivers/mtd/maps/amd76xrom.c4
-rw-r--r--drivers/mtd/maps/ck804xrom.c4
-rw-r--r--drivers/mtd/maps/esb2rom.c4
-rw-r--r--drivers/mtd/maps/ichxrom.c4
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c2
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/pci.c2
-rw-r--r--drivers/mtd/maps/physmap_of_core.c8
-rw-r--r--drivers/mtd/maps/physmap_of_gemini.c16
-rw-r--r--drivers/mtd/maps/physmap_of_versatile.c2
-rw-r--r--drivers/mtd/maps/sun_uflash.c4
-rw-r--r--drivers/mtd/mtdcore.c18
-rw-r--r--drivers/mtd/mtdswap.c21
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/ams-delta.c2
-rw-r--r--drivers/mtd/nand/atmel/nand-controller.c38
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h2
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c2
-rw-r--r--drivers/mtd/nand/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/cafe_nand.c2
-rw-r--r--drivers/mtd/nand/cmx270_nand.c2
-rw-r--r--drivers/mtd/nand/cs553x_nand.c2
-rw-r--r--drivers/mtd/nand/davinci_nand.c2
-rw-r--r--drivers/mtd/nand/denali.c3
-rw-r--r--drivers/mtd/nand/denali.h2
-rw-r--r--drivers/mtd/nand/denali_dt.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c2
-rw-r--r--drivers/mtd/nand/docg4.c2
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c2
-rw-r--r--drivers/mtd/nand/fsl_upm.c2
-rw-r--r--drivers/mtd/nand/fsmc_nand.c2
-rw-r--r--drivers/mtd/nand/gpio.c2
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h2
-rw-r--r--drivers/mtd/nand/hisi504_nand.c2
-rw-r--r--drivers/mtd/nand/jz4740_nand.c2
-rw-r--r--drivers/mtd/nand/jz4780_nand.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c12
-rw-r--r--drivers/mtd/nand/lpc32xx_slc.c11
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c2
-rw-r--r--drivers/mtd/nand/mtk_ecc.c4
-rw-r--r--drivers/mtd/nand/mtk_nand.c2
-rw-r--r--drivers/mtd/nand/mxc_nand.c9
-rw-r--r--drivers/mtd/nand/nand_amd.c2
-rw-r--r--drivers/mtd/nand/nand_base.c307
-rw-r--r--drivers/mtd/nand/nand_bbt.c2
-rw-r--r--drivers/mtd/nand/nand_bch.c2
-rw-r--r--drivers/mtd/nand/nand_ecc.c2
-rw-r--r--drivers/mtd/nand/nand_hynix.c6
-rw-r--r--drivers/mtd/nand/nand_ids.c2
-rw-r--r--drivers/mtd/nand/nand_macronix.c2
-rw-r--r--drivers/mtd/nand/nand_micron.c2
-rw-r--r--drivers/mtd/nand/nand_samsung.c2
-rw-r--r--drivers/mtd/nand/nand_timings.c2
-rw-r--r--drivers/mtd/nand/nand_toshiba.c2
-rw-r--r--drivers/mtd/nand/nandsim.c54
-rw-r--r--drivers/mtd/nand/ndfc.c2
-rw-r--r--drivers/mtd/nand/nuc900_nand.c2
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/orion_nand.c9
-rw-r--r--drivers/mtd/nand/oxnas_nand.c27
-rw-r--r--drivers/mtd/nand/pasemi_nand.c2
-rw-r--r--drivers/mtd/nand/plat_nand.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c2
-rw-r--r--drivers/mtd/nand/qcom_nandc.c919
-rw-r--r--drivers/mtd/nand/r852.h2
-rw-r--r--drivers/mtd/nand/s3c2410.c2
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sharpsl.c4
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c2
-rw-r--r--drivers/mtd/nand/sunxi_nand.c4
-rw-r--r--drivers/mtd/nand/tango_nand.c2
-rw-r--r--drivers/mtd/nand/tmio_nand.c6
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c2
-rw-r--r--drivers/mtd/nand/vf610_nfc.c11
-rw-r--r--drivers/mtd/nand/xway_nand.c2
-rw-r--r--drivers/mtd/nftlcore.c2
-rw-r--r--drivers/mtd/nftlmount.c2
-rw-r--r--drivers/mtd/ofpart.c23
-rw-r--r--drivers/mtd/spi-nor/Kconfig16
-rw-r--r--drivers/mtd/spi-nor/Makefile3
-rw-r--r--drivers/mtd/spi-nor/aspeed-smc.c13
-rw-r--r--drivers/mtd/spi-nor/atmel-quadspi.c1
-rw-r--r--drivers/mtd/spi-nor/hisi-sfc.c8
-rw-r--r--drivers/mtd/spi-nor/intel-spi-pci.c82
-rw-r--r--drivers/mtd/spi-nor/mtk-quadspi.c1
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c844
-rw-r--r--drivers/mtd/ssfdc.c2
-rw-r--r--drivers/mtd/tests/nandbiterrs.c2
-rw-r--r--drivers/mtd/ubi/block.c6
-rw-r--r--drivers/mtd/ubi/build.c20
-rw-r--r--drivers/mtd/ubi/fastmap.c2
-rw-r--r--drivers/mtd/ubi/ubi-media.h4
-rw-r--r--drivers/mux/Makefile5
-rw-r--r--drivers/mux/adg792a.c (renamed from drivers/mux/mux-adg792a.c)0
-rw-r--r--drivers/mux/core.c (renamed from drivers/mux/mux-core.c)14
-rw-r--r--drivers/mux/gpio.c (renamed from drivers/mux/mux-gpio.c)0
-rw-r--r--drivers/mux/mmio.c (renamed from drivers/mux/mux-mmio.c)0
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/appletalk/ipddp.c4
-rw-r--r--drivers/net/arcnet/arcdevice.h2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/bonding/bond_main.c30
-rw-r--r--drivers/net/bonding/bond_options.c3
-rw-r--r--drivers/net/bonding/bond_sysfs.c2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c83
-rw-r--r--drivers/net/dsa/b53/b53_priv.h16
-rw-r--r--drivers/net/dsa/bcm_sf2.c52
-rw-r--r--drivers/net/dsa/bcm_sf2.h13
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c8
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h3
-rw-r--r--drivers/net/dsa/dsa_loop.c42
-rw-r--r--drivers/net/dsa/lan9303-core.c137
-rw-r--r--drivers/net/dsa/lan9303.h11
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c23
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c124
-rw-r--r--drivers/net/dsa/mt7530.c43
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c416
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h146
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c104
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h41
-rw-r--r--drivers/net/dsa/mv88e6xxx/phy.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.h3
-rw-r--r--drivers/net/dsa/qca8k.c112
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c509.c4
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/amd/a2065.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c18
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h33
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c25
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c207
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c501
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c86
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c97
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c81
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v1.c54
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c352
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h92
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c7
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c31
-rw-r--r--drivers/net/ethernet/apple/mace.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c92
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c12
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h3
-rw-r--r--drivers/net/ethernet/arc/emac_main.c13
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig12
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c112
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c478
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h95
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h500
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c834
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h158
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c513
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.h89
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c81
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h14
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c256
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c12
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/cadence/macb_pci.c2
-rwxr-xr-xdrivers/net/ethernet/cadence/macb_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c82
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn23xx_vf_device.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c728
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c352
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c956
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c598
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h11
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c153
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c27
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h20
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c10
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h35
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h59
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c192
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c211
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c978
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h177
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c68
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c456
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip.h2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c4
-rw-r--r--drivers/net/ethernet/ec_bhf.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c3
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c95
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c118
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c13
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/Makefile2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c114
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.h77
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c118
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.c783
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_keygen.h46
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c63
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.h7
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c52
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c135
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c300
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h444
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c356
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h740
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c4265
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h519
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c213
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c1015
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h106
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c2891
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.h593
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_ethtool.c493
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/huawei/Kconfig19
-rw-r--r--drivers/net/ethernet/huawei/Makefile5
-rw-r--r--drivers/net/ethernet/huawei/hinic/Kconfig12
-rw-r--r--drivers/net/ethernet/huawei/hinic/Makefile6
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.c80
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_common.h38
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_dev.h64
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.c978
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_api_cmd.h208
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c946
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h187
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h149
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1013
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h239
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.c886
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_eqs.h265
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.c351
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h272
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.c533
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_io.h97
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c597
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h153
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c887
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h201
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp_ctxt.h214
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c878
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h117
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h368
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c1112
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c379
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.h198
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c509
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.h55
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c504
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.h62
-rw-r--r--drivers/net/ethernet/i825xx/lasi_82596.c16
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c9
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c63
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/debug.h2
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c8
-rw-r--r--drivers/net/ethernet/ibm/emac/rgmii.c18
-rw-r--r--drivers/net/ethernet/ibm/emac/tah.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/zmii.c17
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c214
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h44
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c8
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c160
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c318
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c134
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c96
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c124
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c75
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h31
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c116
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c44
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c57
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c23
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c47
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c12
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c132
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c102
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c1591
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig6
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c183
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h80
-rw-r--r--drivers/net/ethernet/mellanox/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw_qos.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c271
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c73
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c261
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h282
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c298
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c229
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c213
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c476
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c201
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.h95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c62
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h422
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c238
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c578
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c214
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h79
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c3226
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h26
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c15
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h8
-rw-r--r--drivers/net/ethernet/neterion/s2io.c45
-rw-r--r--drivers/net/ethernet/netronome/Kconfig1
-rw-r--r--drivers/net/ethernet/netronome/nfp/Makefile1
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/jit.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/verifier.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c75
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c144
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c139
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.c22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app.h45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_app_nic.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c111
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c35
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c593
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c65
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c88
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c243
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h86
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.c39
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_port.h60
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c18
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c45
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/main.c14
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c1
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c5
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c145
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h49
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c115
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c58
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c157
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c75
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h53
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h19
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c205
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c483
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_dbg.c49
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig2
-rw-r--r--drivers/net/ethernet/qualcomm/Makefile2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Kconfig12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Makefile10
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c356
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h55
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c271
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h26
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h86
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c106
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c102
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h44
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c174
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h29
-rw-r--r--drivers/net/ethernet/realtek/r8169.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb.h2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c131
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.h4
-rw-r--r--drivers/net/ethernet/sfc/falcon/tx.c13
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c232
-rw-r--r--drivers/net/ethernet/sfc/tx.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c152
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c193
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c2
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c2
-rw-r--r--drivers/net/ethernet/sun/niu.c24
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c90
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c10
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c14
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c48
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/geneve.c322
-rw-r--r--drivers/net/hamradio/baycom_par.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/baycom_ser_hdx.c2
-rw-r--r--drivers/net/hamradio/dmascc.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h77
-rw-r--r--drivers/net/hyperv/netvsc.c462
-rw-r--r--drivers/net/hyperv/netvsc_drv.c822
-rw-r--r--drivers/net/hyperv/rndis_filter.c274
-rw-r--r--drivers/net/ieee802154/ca8210.c6
-rw-r--r--drivers/net/ieee802154/mrf24j40.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c1
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/Kconfig74
-rw-r--r--drivers/net/phy/Makefile7
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c7
-rw-r--r--drivers/net/phy/marvell.c320
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c103
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/mdio-i2c.c109
-rw-r--r--drivers/net/phy/mdio-i2c.h19
-rw-r--r--drivers/net/phy/mdio-mux-bcm-iproc.c2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c21
-rw-r--r--drivers/net/phy/mdio-mux.c34
-rw-r--r--drivers/net/phy/phy-core.c180
-rw-r--r--drivers/net/phy/phy.c238
-rw-r--r--drivers/net/phy/phy_device.c43
-rw-r--r--drivers/net/phy/phylink.c1462
-rw-r--r--drivers/net/phy/rockchip.c233
-rw-r--r--drivers/net/phy/sfp-bus.c475
-rw-r--r--drivers/net/phy/sfp.c915
-rw-r--r--drivers/net/phy/sfp.h28
-rw-r--r--drivers/net/tap.c11
-rw-r--r--drivers/net/tun.c270
-rw-r--r--drivers/net/usb/catc.c2
-rw-r--r--drivers/net/usb/cdc-phonet.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c9
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc95xx.c11
-rw-r--r--drivers/net/virtio_net.c352
-rw-r--r--drivers/net/vrf.c145
-rw-r--r--drivers/net/vxlan.c162
-rw-r--r--drivers/net/wan/dscc4.c129
-rw-r--r--drivers/net/wan/z85230.c30
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c299
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h30
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c29
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c31
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c150
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h14
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c1106
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.h128
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c166
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h271
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.h1
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c5
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c52
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h3
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig12
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile2
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c84
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c42
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c27
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c14
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h720
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c18
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/debug.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c11
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h3
-rw-r--r--drivers/net/wireless/cisco/airo.c2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c34
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c17
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/8000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/9000.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/a000.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h206
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h144
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h (renamed from drivers/net/wireless/intel/iwlwifi/fw/api.h)78
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-coex.h)73
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h184
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/context.h94
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h)11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h345
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h183
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/led.h71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h152
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-mac.h)33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h101
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/paging.h108
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h164
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h258
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-power.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h)31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-scan.h)11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sf.h138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h)15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/stats.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-stats.h)13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tdls.h208
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h386
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tof.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tof.h)9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h)66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/txq.h163
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/common_rx.c88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c)474
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h (renamed from drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.h)125
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/notif-wait.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/nvm.c162
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c414
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h158
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/smem.c155
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c138
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c310
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c75
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h2846
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c559
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/led.c60
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c413
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c229
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h158
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c200
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c157
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c148
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c565
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tof.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c176
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c48
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c63
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c22
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c35
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/debugfs.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c32
-rw-r--r--drivers/net/wireless/marvell/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c173
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h14
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c126
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c19
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c121
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c34
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c5
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Makefile4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h1
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c315
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.c486
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/commands.h5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c5
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.h27
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c67
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c408
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h15
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_ipc.h11
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_regs_pearl.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink.h202
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.c26
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/qlink_util.h10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2500usb.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c5
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800mmio.c13
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c17
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbt_precomp.h16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c40
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.c28
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/rtl_btc.h1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/trx.h3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/dm.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c7
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c365
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.c192
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/table.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c15
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c43
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c21
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h49
-rw-r--r--drivers/net/wireless/rsi/Makefile1
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c80
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_debugfs.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c368
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c495
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c741
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_ps.c146
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c157
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c84
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c138
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c6
-rw-r--r--drivers/net/wireless/rsi/rsi_common.h1
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h66
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h88
-rw-r--r--drivers/net/wireless/rsi/rsi_mgmt.h258
-rw-r--r--drivers/net/wireless/rsi/rsi_ps.h64
-rw-r--r--drivers/net/wireless/rsi/rsi_sdio.h7
-rw-r--r--drivers/net/wireless/rsi/rsi_usb.h6
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c23
-rw-r--r--drivers/net/wireless/ti/wlcore/sdio.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c1
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h3
-rw-r--r--drivers/net/wireless/wl3501_cs.c2
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_rf_rf2959.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/ntb/ntb_transport.c6
-rw-r--r--drivers/ntb/test/ntb_tool.c2
-rw-r--r--drivers/nvdimm/btt.c201
-rw-r--r--drivers/nvdimm/btt.h11
-rw-r--r--drivers/nvdimm/btt_devs.c4
-rw-r--r--drivers/nvdimm/bus.c27
-rw-r--r--drivers/nvdimm/claim.c9
-rw-r--r--drivers/nvdimm/core.c10
-rw-r--r--drivers/nvdimm/label.c30
-rw-r--r--drivers/nvdimm/namespace_devs.c6
-rw-r--r--drivers/nvdimm/nd.h25
-rw-r--r--drivers/nvdimm/pfn_devs.c53
-rw-r--r--drivers/nvdimm/pmem.c48
-rw-r--r--drivers/nvdimm/pmem.h14
-rw-r--r--drivers/nvdimm/region_devs.c6
-rw-r--r--drivers/nvme/host/core.c359
-rw-r--r--drivers/nvme/host/fabrics.c26
-rw-r--r--drivers/nvme/host/fc.c149
-rw-r--r--drivers/nvme/host/lightnvm.c41
-rw-r--r--drivers/nvme/host/nvme.h30
-rw-r--r--drivers/nvme/host/pci.c110
-rw-r--r--drivers/nvme/host/rdma.c603
-rw-r--r--drivers/nvme/target/admin-cmd.c39
-rw-r--r--drivers/nvme/target/configfs.c2
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fabrics-cmd.c1
-rw-r--r--drivers/nvme/target/fc.c57
-rw-r--r--drivers/nvme/target/fcloop.c3
-rw-r--r--drivers/nvme/target/io-cmd.c6
-rw-r--r--drivers/nvme/target/loop.c1
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c5
-rw-r--r--drivers/nvmem/core.c43
-rw-r--r--drivers/nvmem/lpc18xx_eeprom.c2
-rw-r--r--drivers/of/address.c24
-rw-r--r--drivers/of/base.c46
-rw-r--r--drivers/of/device.c131
-rw-r--r--drivers/of/dynamic.c33
-rw-r--r--drivers/of/irq.c77
-rw-r--r--drivers/of/of_mdio.c43
-rw-r--r--drivers/of/of_pci.c72
-rw-r--r--drivers/of/overlay.c142
-rw-r--r--drivers/of/platform.c34
-rw-r--r--drivers/of/property.c89
-rw-r--r--drivers/of/unittest-data/Makefile19
-rw-r--r--drivers/of/unittest-data/overlay.dts31
-rw-r--r--drivers/of/unittest-data/overlay_bad_symbol.dts22
-rw-r--r--drivers/of/unittest-data/overlay_base.dts11
-rw-r--r--drivers/of/unittest.c77
-rw-r--r--drivers/parisc/asp.c4
-rw-r--r--drivers/parisc/ccio-dma.c4
-rw-r--r--drivers/parisc/ccio-rm-dma.c6
-rw-r--r--drivers/parisc/dino.c6
-rw-r--r--drivers/parisc/eisa.c4
-rw-r--r--drivers/parisc/hppb.c6
-rw-r--r--drivers/parisc/lasi.c4
-rw-r--r--drivers/parisc/lba_pci.c46
-rw-r--r--drivers/parisc/sba_iommu.c6
-rw-r--r--drivers/parisc/superio.c4
-rw-r--r--drivers/parisc/wax.c4
-rw-r--r--drivers/parport/daisy.c2
-rw-r--r--drivers/parport/parport_atari.c2
-rw-r--r--drivers/parport/parport_ax88796.c6
-rw-r--r--drivers/parport/parport_gsc.c10
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c24
-rw-r--r--drivers/pci/dwc/Kconfig12
-rw-r--r--drivers/pci/dwc/pci-dra7xx.c26
-rw-r--r--drivers/pci/dwc/pci-exynos.c12
-rw-r--r--drivers/pci/dwc/pci-imx6.c11
-rw-r--r--drivers/pci/dwc/pci-keystone-dw.c14
-rw-r--r--drivers/pci/dwc/pci-keystone.c10
-rw-r--r--drivers/pci/dwc/pci-keystone.h4
-rw-r--r--drivers/pci/dwc/pci-layerscape.c102
-rw-r--r--drivers/pci/dwc/pcie-armada8k.c12
-rw-r--r--drivers/pci/dwc/pcie-artpec6.c14
-rw-r--r--drivers/pci/dwc/pcie-designware-ep.c9
-rw-r--r--drivers/pci/dwc/pcie-designware-host.c17
-rw-r--r--drivers/pci/dwc/pcie-designware-plat.c4
-rw-r--r--drivers/pci/dwc/pcie-designware.c14
-rw-r--r--drivers/pci/dwc/pcie-designware.h30
-rw-r--r--drivers/pci/dwc/pcie-hisi.c5
-rw-r--r--drivers/pci/dwc/pcie-kirin.c6
-rw-r--r--drivers/pci/dwc/pcie-qcom.c409
-rw-r--r--drivers/pci/dwc/pcie-spear13xx.c8
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c99
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c11
-rw-r--r--drivers/pci/endpoint/pci-epc-mem.c59
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c25
-rw-r--r--drivers/pci/host/Kconfig7
-rw-r--r--drivers/pci/host/pci-aardvark.c5
-rw-r--r--drivers/pci/host/pci-ftpci100.c6
-rw-r--r--drivers/pci/host/pci-hyperv.c62
-rw-r--r--drivers/pci/host/pci-mvebu.c11
-rw-r--r--drivers/pci/host/pci-tegra.c9
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pci-xgene.c41
-rw-r--r--drivers/pci/host/pcie-altera-msi.c6
-rw-r--r--drivers/pci/host/pcie-altera.c13
-rw-r--r--drivers/pci/host/pcie-iproc-msi.c2
-rw-r--r--drivers/pci/host/pcie-iproc-platform.c8
-rw-r--r--drivers/pci/host/pcie-iproc.c400
-rw-r--r--drivers/pci/host/pcie-iproc.h1
-rw-r--r--drivers/pci/host/pcie-mediatek.c756
-rw-r--r--drivers/pci/host/pcie-rcar.c12
-rw-r--r--drivers/pci/host/pcie-rockchip.c426
-rw-r--r--drivers/pci/host/pcie-xilinx-nwl.c11
-rw-r--r--drivers/pci/host/pcie-xilinx.c62
-rw-r--r--drivers/pci/host/vmd.c19
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c2
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/ibmphp_ebda.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c8
-rw-r--r--drivers/pci/hotplug/pnv_php.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c4
-rw-r--r--drivers/pci/hotplug/rpadlpar_sysfs.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c4
-rw-r--r--drivers/pci/hotplug/rpaphp_slot.c4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp_hpc.c2
-rw-r--r--drivers/pci/iov.c7
-rw-r--r--drivers/pci/msi.c40
-rw-r--r--drivers/pci/pci-acpi.c4
-rw-r--r--drivers/pci/pci-driver.c5
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci-sysfs.c21
-rw-r--r--drivers/pci/pci.c67
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c25
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c4
-rw-r--r--drivers/pci/pcie/pcie-dpc.c187
-rw-r--r--drivers/pci/pcie/portdrv_pci.c107
-rw-r--r--drivers/pci/probe.c164
-rw-r--r--drivers/pci/quirks.c160
-rw-r--r--drivers/pci/setup-irq.c32
-rw-r--r--drivers/pci/setup-res.c13
-rw-r--r--drivers/pcmcia/db1xxx_ss.c33
-rw-r--r--drivers/perf/arm_pmu.c6
-rw-r--r--drivers/perf/xgene_pmu.c74
-rw-r--r--drivers/phy/Kconfig11
-rw-r--r--drivers/phy/Makefile5
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c112
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c8
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c2
-rw-r--r--drivers/phy/lantiq/Kconfig9
-rw-r--r--drivers/phy/lantiq/Makefile1
-rw-r--r--drivers/phy/lantiq/phy-lantiq-rcu-usb2.c254
-rw-r--r--drivers/phy/marvell/Kconfig11
-rw-r--r--drivers/phy/marvell/Makefile1
-rw-r--r--drivers/phy/marvell/phy-mvebu-cp110-comphy.c644
-rw-r--r--drivers/phy/mediatek/Kconfig14
-rw-r--r--drivers/phy/mediatek/Makefile5
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c (renamed from drivers/phy/phy-mt65xx-usb3.c)557
-rw-r--r--drivers/phy/motorola/phy-cpcap-usb.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c162
-rw-r--r--drivers/phy/qualcomm/phy-qcom-usb-hs.c14
-rw-r--r--drivers/phy/ralink/Kconfig11
-rw-r--r--drivers/phy/ralink/Makefile1
-rw-r--r--drivers/phy/ralink/phy-ralink-usb.c249
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c225
-rw-r--r--drivers/phy/rockchip/phy-rockchip-pcie.c131
-rw-r--r--drivers/phy/rockchip/phy-rockchip-typec.c3
-rw-r--r--drivers/phy/samsung/phy-exynos-dp-video.c5
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c7
-rw-r--r--drivers/phy/samsung/phy-samsung-usb2.c9
-rw-r--r--drivers/phy/ti/phy-ti-pipe3.c10
-rw-r--r--drivers/phy/ti/phy-twl4030-usb.c4
-rw-r--r--drivers/pinctrl/Kconfig17
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g4.c70
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c64
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.c21
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed.h1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm281xx.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c31
-rw-r--r--drivers/pinctrl/berlin/berlin.c4
-rw-r--r--drivers/pinctrl/core.c17
-rw-r--r--drivers/pinctrl/core.h6
-rw-r--r--drivers/pinctrl/devicetree.c9
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c131
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.h20
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx23.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx28.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx7ulp.c364
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c25
-rw-r--r--drivers/pinctrl/intel/Kconfig19
-rw-r--r--drivers/pinctrl/intel/Makefile2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c4
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c424
-rw-r--r--drivers/pinctrl/intel/pinctrl-denverton.c302
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c32
-rw-r--r--drivers/pinctrl/intel/pinctrl-lewisburg.c343
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-mt2701.h12
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c47
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c2
-rw-r--r--drivers/pinctrl/pinconf-generic.c9
-rw-r--r--drivers/pinctrl/pinconf.c14
-rw-r--r--drivers/pinctrl/pinconf.h24
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c4
-rw-r--r--drivers/pinctrl/pinctrl-amd.c79
-rw-r--r--drivers/pinctrl/pinctrl-amd.h1
-rw-r--r--drivers/pinctrl/pinctrl-artpec6.c2
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c11
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-gemini.c2359
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c6
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c493
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c315
-rw-r--r--drivers/pinctrl/pinctrl-rza1.c10
-rw-r--r--drivers/pinctrl/pinctrl-st.c12
-rw-r--r--drivers/pinctrl/pinctrl-tb10x.c8
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c6
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c6
-rw-r--r--drivers/pinctrl/pinctrl-zynq.c34
-rw-r--r--drivers/pinctrl/pinmux.c16
-rw-r--r--drivers/pinctrl/pinmux.h29
-rw-r--r--drivers/pinctrl/qcom/pinctrl-apq8064.c42
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq4019.c432
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c27
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c323
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c2
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c32
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.h1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c37
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c40
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c18
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h15
-rw-r--r--drivers/pinctrl/sh-pfc/Kconfig5
-rw-r--r--drivers/pinctrl/sh-pfc/Makefile1
-rw-r--r--drivers/pinctrl/sh-pfc/core.c6
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c15
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1082
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7796.c146
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77995.c1812
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c11
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h23
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c13
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c10
-rw-r--r--drivers/pinctrl/sprd/Kconfig20
-rw-r--r--drivers/pinctrl/sprd/Makefile2
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd-sc9860.c972
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.c1117
-rw-r--r--drivers/pinctrl/sprd/pinctrl-sprd.h67
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c2
-rw-r--r--drivers/pinctrl/sunxi/Kconfig2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c273
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h5.c26
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun6i-a31-r.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-a23-r.c6
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3-r.c4
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-v3s.c1
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra-xusb.c2
-rw-r--r--drivers/pinctrl/ti/pinctrl-ti-iodelay.c4
-rw-r--r--drivers/pinctrl/uniphier/Kconfig4
-rw-r--r--drivers/pinctrl/uniphier/Makefile1
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-core.c279
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c665
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c714
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld4.c273
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld6b.c386
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro4.c453
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pro5.c458
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs2.c386
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-pxs3.c989
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-sld8.c273
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier.h42
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c8
-rw-r--r--drivers/pinctrl/zte/pinctrl-zx.c7
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/chromeos_pstore.c2
-rw-r--r--drivers/platform/chrome/cros_ec_lpc.c2
-rw-r--r--drivers/platform/x86/alienware-wmi.c40
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c69
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c30
-rw-r--r--drivers/platform/x86/ibm_rtl.c4
-rw-r--r--drivers/platform/x86/ideapad-laptop.c69
-rw-r--r--drivers/platform/x86/intel-hid.c21
-rw-r--r--drivers/platform/x86/intel-vbtn.c2
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c10
-rw-r--r--drivers/platform/x86/intel_oaktrail.c2
-rw-r--r--drivers/platform/x86/intel_pmc_core.c31
-rw-r--r--drivers/platform/x86/intel_pmc_core.h30
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_debugfs.c1
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c36
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c2
-rw-r--r--drivers/platform/x86/msi-wmi.c2
-rw-r--r--drivers/platform/x86/mxm-wmi.c4
-rw-r--r--drivers/platform/x86/peaq-wmi.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c2
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c38
-rw-r--r--drivers/platform/x86/toshiba-wmi.c2
-rw-r--r--drivers/platform/x86/wmi.c6
-rw-r--r--drivers/pnp/pnpbios/core.c2
-rw-r--r--drivers/power/avs/rockchip-io-domain.c38
-rw-r--r--drivers/power/reset/at91-sama5d2_shdwc.c4
-rw-r--r--drivers/power/supply/Kconfig23
-rw-r--r--drivers/power/supply/Makefile2
-rw-r--r--drivers/power/supply/act8945a_charger.c4
-rw-r--r--drivers/power/supply/bq24190_charger.c346
-rw-r--r--drivers/power/supply/bq27xxx_battery.c575
-rw-r--r--drivers/power/supply/bq27xxx_battery_hdq.c135
-rw-r--r--drivers/power/supply/bq27xxx_battery_i2c.c16
-rw-r--r--drivers/power/supply/charger-manager.c9
-rw-r--r--drivers/power/supply/ds2780_battery.c4
-rw-r--r--drivers/power/supply/ds2781_battery.c4
-rw-r--r--drivers/power/supply/lp8788-charger.c18
-rw-r--r--drivers/power/supply/ltc2941-battery-gauge.c156
-rw-r--r--drivers/power/supply/max17042_battery.c42
-rw-r--r--drivers/power/supply/max1721x_battery.c448
-rw-r--r--drivers/power/supply/olpc_battery.c4
-rw-r--r--drivers/power/supply/pcf50633-charger.c2
-rw-r--r--drivers/power/supply/power_supply_core.c54
-rw-r--r--drivers/power/supply/sbs-battery.c26
-rw-r--r--drivers/power/supply/twl4030_charger.c2
-rw-r--r--drivers/power/supply/wm831x_power.c72
-rw-r--r--drivers/pps/Kconfig7
-rw-r--r--drivers/pps/clients/Kconfig7
-rw-r--r--drivers/pps/generators/Kconfig3
-rw-r--r--drivers/ptp/ptp_dte.c2
-rw-r--r--drivers/ptp/ptp_ixp46x.c2
-rw-r--r--drivers/ptp/ptp_kvm.c2
-rw-r--r--drivers/ptp/ptp_pch.c2
-rw-r--r--drivers/pwm/Kconfig23
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/pwm-bcm2835.c2
-rw-r--r--drivers/pwm/pwm-hibvt.c2
-rw-r--r--drivers/pwm/pwm-mediatek.c78
-rw-r--r--drivers/pwm/pwm-meson.c2
-rw-r--r--drivers/pwm/pwm-pca9685.c14
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c1
-rw-r--r--drivers/pwm/pwm-rockchip.c281
-rw-r--r--drivers/pwm/pwm-samsung.c70
-rw-r--r--drivers/pwm/pwm-stm32-lp.c246
-rw-r--r--drivers/pwm/pwm-tegra.c2
-rw-r--r--drivers/pwm/pwm-tiecap.c90
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c122
-rw-r--r--drivers/pwm/pwm-twl-led.c2
-rw-r--r--drivers/pwm/pwm-twl.c2
-rw-r--r--drivers/pwm/pwm-vt8500.c1
-rw-r--r--drivers/pwm/pwm-zx.c282
-rw-r--r--drivers/regulator/Kconfig29
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/axp20x-regulator.c6
-rw-r--r--drivers/regulator/core.c16
-rw-r--r--drivers/regulator/cpcap-regulator.c21
-rw-r--r--drivers/regulator/da9063-regulator.c2
-rw-r--r--drivers/regulator/fan53555.c15
-rw-r--r--drivers/regulator/ltc3589.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/mt6380-regulator.c352
-rw-r--r--drivers/regulator/of_regulator.c4
-rw-r--r--drivers/regulator/pv88090-regulator.c11
-rw-r--r--drivers/regulator/pv88090-regulator.h8
-rw-r--r--drivers/regulator/pwm-regulator.c6
-rw-r--r--drivers/regulator/qcom_rpm-regulator.c5
-rw-r--r--drivers/regulator/qcom_smd-regulator.c5
-rw-r--r--drivers/regulator/rk808-regulator.c130
-rw-r--r--drivers/regulator/rn5t618-regulator.c35
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/regulator/stm32-vrefbuf.c202
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/remoteproc/Kconfig10
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c98
-rw-r--r--drivers/remoteproc/imx_rproc.c426
-rw-r--r--drivers/remoteproc/keystone_remoteproc.c3
-rw-r--r--drivers/remoteproc/qcom_adsp_pil.c14
-rw-r--r--drivers/remoteproc/qcom_common.c122
-rw-r--r--drivers/remoteproc/qcom_common.h21
-rw-r--r--drivers/remoteproc/qcom_q6v5_pil.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c35
-rw-r--r--drivers/remoteproc/remoteproc_internal.h1
-rw-r--r--drivers/remoteproc/st_remoteproc.c6
-rw-r--r--drivers/reset/Kconfig15
-rw-r--r--drivers/reset/Makefile3
-rw-r--r--drivers/reset/core.c238
-rw-r--r--drivers/reset/reset-gemini.c110
-rw-r--r--drivers/reset/reset-hsdk-v1.c137
-rw-r--r--drivers/reset/reset-lantiq.c212
-rw-r--r--drivers/reset/reset-socfpga.c4
-rw-r--r--drivers/reset/reset-sunxi.c4
-rw-r--r--drivers/reset/reset-uniphier.c117
-rw-r--r--drivers/reset/reset-zx2967.c2
-rw-r--r--drivers/rpmsg/Kconfig16
-rw-r--r--drivers/rpmsg/Makefile2
-rw-r--r--drivers/rpmsg/qcom_glink_native.c1612
-rw-r--r--drivers/rpmsg/qcom_glink_native.h45
-rw-r--r--drivers/rpmsg/qcom_glink_rpm.c1026
-rw-r--r--drivers/rpmsg/qcom_glink_smem.c316
-rw-r--r--drivers/rpmsg/qcom_smd.c1
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c47
-rw-r--r--drivers/rtc/Kconfig30
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-dev.c20
-rw-r--r--drivers/rtc/rtc-dm355evm.c2
-rw-r--r--drivers/rtc/rtc-ds1307.c458
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-em3027.c2
-rw-r--r--drivers/rtc/rtc-goldfish.c237
-rw-r--r--drivers/rtc/rtc-m41t80.c67
-rw-r--r--drivers/rtc/rtc-max6900.c2
-rw-r--r--drivers/rtc/rtc-max8925.c2
-rw-r--r--drivers/rtc/rtc-mxc.c21
-rw-r--r--drivers/rtc/rtc-puv3.c72
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rtd119x.c242
-rw-r--r--drivers/rtc/rtc-rv3029c2.c2
-rw-r--r--drivers/rtc/rtc-s35390a.c104
-rw-r--r--drivers/rtc/rtc-sa1100.c65
-rw-r--r--drivers/rtc/rtc-sun6i.c34
-rw-r--r--drivers/rtc/rtc-twl.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c18
-rw-r--r--drivers/s390/block/dasd.c386
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_devmap.c11
-rw-r--r--drivers/s390/block/dasd_diag.c2
-rw-r--r--drivers/s390/block/dasd_eckd.c8
-rw-r--r--drivers/s390/block/dasd_eckd.h2
-rw-r--r--drivers/s390/block/dasd_erp.c2
-rw-r--r--drivers/s390/block/dasd_fba.c202
-rw-r--r--drivers/s390/block/dasd_int.h38
-rw-r--r--drivers/s390/block/dasd_proc.c2
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/s390/block/scm_blk.c13
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/Kconfig11
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c1
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_early.c6
-rw-r--r--drivers/s390/char/sclp_ocf.c2
-rw-r--r--drivers/s390/char/tape_core.c2
-rw-r--r--drivers/s390/char/vmcp.c112
-rw-r--r--drivers/s390/char/vmcp.h30
-rw-r--r--drivers/s390/cio/chp.c4
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c2
-rw-r--r--drivers/s390/crypto/ap_asm.h9
-rw-r--r--drivers/s390/crypto/ap_bus.c49
-rw-r--r--drivers/s390/crypto/ap_bus.h47
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_card.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c2
-rw-r--r--drivers/s390/crypto/zcrypt_queue.c2
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/lcs.c28
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h17
-rw-r--r--drivers/s390/net/qeth_core_main.c205
-rw-r--r--drivers/s390/net/qeth_core_sys.c2
-rw-r--r--drivers/s390/net/qeth_l2_main.c343
-rw-r--r--drivers/s390/net/qeth_l3_main.c67
-rw-r--r--drivers/s390/net/qeth_l3_sys.c25
-rw-r--r--drivers/s390/scsi/zfcp_aux.c1
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c95
-rw-r--r--drivers/s390/scsi/zfcp_dbf.h25
-rw-r--r--drivers/s390/scsi/zfcp_erp.c5
-rw-r--r--drivers/s390/scsi/zfcp_ext.h1
-rw-r--r--drivers/s390/scsi/zfcp_fc.c52
-rw-r--r--drivers/s390/scsi/zfcp_fc.h25
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c35
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h12
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h17
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c18
-rw-r--r--drivers/scsi/53c700.c23
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/NCR5380.c4
-rw-r--r--drivers/scsi/NCR_Q720.c3
-rw-r--r--drivers/scsi/a2091.c17
-rw-r--r--drivers/scsi/a3000.c17
-rw-r--r--drivers/scsi/aacraid/aachba.c334
-rw-r--r--drivers/scsi/aacraid/aacraid.h3
-rw-r--r--drivers/scsi/aacraid/comminit.c6
-rw-r--r--drivers/scsi/aacraid/commsup.c3
-rw-r--r--drivers/scsi/aacraid/linit.c320
-rw-r--r--drivers/scsi/aha152x.c13
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx/Makefile6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg.h_shipped1251
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped34
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c5
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h1
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_reg.h_shipped44
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c6
-rw-r--r--drivers/scsi/arcmsr/arcmsr_attr.c6
-rw-r--r--drivers/scsi/arm/acornscsi.c9
-rw-r--r--drivers/scsi/arm/cumana_1.c2
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/atari_scsi.c6
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.c6
-rw-r--r--drivers/scsi/be2iscsi/be_main.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c37
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h1
-rw-r--r--drivers/scsi/ch.c22
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4
-rw-r--r--drivers/scsi/csiostor/csio_hw.h2
-rw-r--r--drivers/scsi/csiostor/csio_init.c23
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c3
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c27
-rw-r--r--drivers/scsi/cxlflash/main.c3
-rw-r--r--drivers/scsi/cxlflash/superpipe.c14
-rw-r--r--drivers/scsi/cxlflash/vlun.c6
-rw-r--r--drivers/scsi/dmx3191d.c2
-rw-r--r--drivers/scsi/dpt_i2o.c5
-rw-r--r--drivers/scsi/eata.c9
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2
-rw-r--r--drivers/scsi/esp_scsi.c53
-rw-r--r--drivers/scsi/esp_scsi.h1
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c4
-rw-r--r--drivers/scsi/fdomain.c6
-rw-r--r--drivers/scsi/fdomain.h2
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c4
-rw-r--r--drivers/scsi/g_NCR5380.c283
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/gdth_proc.c2
-rw-r--r--drivers/scsi/gvp11.c18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c198
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c605
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c237
-rw-r--r--drivers/scsi/hosts.c8
-rw-r--r--drivers/scsi/hpsa.c117
-rw-r--r--drivers/scsi/hpsa.h81
-rw-r--r--drivers/scsi/hptiop.c11
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c8
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/ipr.c34
-rw-r--r--drivers/scsi/ipr.h2
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/lasi700.c6
-rw-r--r--drivers/scsi/libfc/fc_fcp.c2
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/libsas/Kconfig1
-rw-r--r--drivers/scsi/libsas/sas_ata.c1
-rw-r--r--drivers/scsi/libsas/sas_expander.c70
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c106
-rw-r--r--drivers/scsi/libsas/sas_internal.h12
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c15
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c52
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.h10
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c93
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h18
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c73
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c35
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c31
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c279
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h23
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac53c94.c13
-rw-r--r--drivers/scsi/mac_esp.c37
-rw-r--r--drivers/scsi/mac_scsi.c4
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c32
-rw-r--r--drivers/scsi/megaraid/megaraid_mm.c29
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c112
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c40
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c216
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c85
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c230
-rw-r--r--drivers/scsi/mvme147.c16
-rw-r--r--drivers/scsi/mvsas/mv_init.c12
-rw-r--r--drivers/scsi/mvsas/mv_sas.c6
-rw-r--r--drivers/scsi/nsp32.c22
-rw-r--r--drivers/scsi/osst.c8
-rw-r--r--drivers/scsi/pcmcia/fdomain_stub.c2
-rw-r--r--drivers/scsi/pcmcia/qlogic_stub.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c7
-rw-r--r--drivers/scsi/pmcraid.c17
-rw-r--r--drivers/scsi/pmcraid.h2
-rw-r--r--drivers/scsi/ppa.c1
-rw-r--r--drivers/scsi/qedf/qedf.h2
-rw-r--r--drivers/scsi/qedf/qedf_els.c14
-rw-r--r--drivers/scsi/qedf/qedf_fip.c35
-rw-r--r--drivers/scsi/qedf/qedf_main.c113
-rw-r--r--drivers/scsi/qedf/qedf_version.h6
-rw-r--r--drivers/scsi/qedi/qedi.h5
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c2
-rw-r--r--drivers/scsi/qedi/qedi_main.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h118
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c183
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c78
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c263
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c161
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c155
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c29
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c31
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicfas408.c6
-rw-r--r--drivers/scsi/qlogicfas408.h2
-rw-r--r--drivers/scsi/qlogicpti.c2
-rw-r--r--drivers/scsi/scsi.c144
-rw-r--r--drivers/scsi/scsi_debug.c5
-rw-r--r--drivers/scsi/scsi_debugfs.c4
-rw-r--r--drivers/scsi/scsi_error.c10
-rw-r--r--drivers/scsi/scsi_ioctl.c4
-rw-r--r--drivers/scsi/scsi_lib.c131
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/scsi_sysfs.c33
-rw-r--r--drivers/scsi/scsi_transport_fc.c39
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c6
-rw-r--r--drivers/scsi/scsi_transport_sas.c121
-rw-r--r--drivers/scsi/scsi_transport_srp.c7
-rw-r--r--drivers/scsi/sd.c12
-rw-r--r--drivers/scsi/sd_zbc.c9
-rw-r--r--drivers/scsi/ses.c64
-rw-r--r--drivers/scsi/sg.c86
-rw-r--r--drivers/scsi/sgiwd93.c15
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h44
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c145
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c9
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c111
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h4
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sun3_scsi.c4
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c13
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/scsi/wd33c93.c2
-rw-r--r--drivers/scsi/zalon.c8
-rw-r--r--drivers/sfi/sfi_core.c23
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile2
-rw-r--r--drivers/soc/amlogic/Kconfig12
-rw-r--r--drivers/soc/amlogic/Makefile1
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c177
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c10
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c8
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c12
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c11
-rw-r--r--drivers/soc/fsl/qe/gpio.c4
-rw-r--r--drivers/soc/imx/gpcv2.c15
-rw-r--r--drivers/soc/lantiq/Makefile2
-rw-r--r--drivers/soc/lantiq/fpi-bus.c87
-rw-r--r--drivers/soc/lantiq/gphy.c260
-rw-r--r--drivers/soc/mediatek/mtk-pmic-wrap.c10
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c247
-rw-r--r--drivers/soc/qcom/Kconfig13
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink_ssr.c164
-rw-r--r--drivers/soc/qcom/mdt_loader.c5
-rw-r--r--drivers/soc/qcom/smsm.c3
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c1
-rw-r--r--drivers/soc/renesas/Kconfig7
-rw-r--r--drivers/soc/renesas/Makefile1
-rw-r--r--drivers/soc/renesas/r8a77995-sysc.c31
-rw-r--r--drivers/soc/renesas/rcar-rst.c5
-rw-r--r--drivers/soc/renesas/rcar-sysc.c9
-rw-r--r--drivers/soc/renesas/rcar-sysc.h1
-rw-r--r--drivers/soc/renesas/renesas-soc.c8
-rw-r--r--drivers/soc/rockchip/grf.c14
-rw-r--r--drivers/soc/rockchip/pm_domains.c32
-rw-r--r--drivers/soc/samsung/pm_domains.c10
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c57
-rw-r--r--drivers/soc/tegra/Kconfig5
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c56
-rw-r--r--drivers/soc/tegra/pmc.c4
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c3
-rw-r--r--drivers/soc/ti/ti_sci_pm_domains.c2
-rw-r--r--drivers/soc/versatile/soc-realview.c2
-rw-r--r--drivers/spi/Kconfig5
-rw-r--r--drivers/spi/spi-altera.c163
-rw-r--r--drivers/spi/spi-ath79.c13
-rw-r--r--drivers/spi/spi-bcm-qspi.c89
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c10
-rw-r--r--drivers/spi/spi-bcm63xx.c4
-rw-r--r--drivers/spi/spi-cadence.c4
-rw-r--r--drivers/spi/spi-ep93xx.c501
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-imx.c218
-rw-r--r--drivers/spi/spi-loopback-test.c34
-rw-r--r--drivers/spi/spi-omap2-mcspi.c4
-rw-r--r--drivers/spi/spi-orion.c4
-rw-r--r--drivers/spi/spi-pic32.c4
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c35
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/spi/spi-qup.c564
-rw-r--r--drivers/spi/spi-rockchip.c60
-rw-r--r--drivers/spi/spi-sh-msiof.c32
-rw-r--r--drivers/spi/spi-sh.c4
-rw-r--r--drivers/spi/spi-stm32.c2
-rw-r--r--drivers/spi/spi-sun6i.c2
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi-xlp.c4
-rw-r--r--drivers/spi/spi.c142
-rw-r--r--drivers/spmi/spmi-pmic-arb.c837
-rw-r--r--drivers/spmi/spmi.c14
-rw-r--r--drivers/staging/Kconfig6
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/ashmem.c29
-rw-r--r--drivers/staging/android/ion/ion.h12
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c5
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c2
-rw-r--r--drivers/staging/ccree/Kconfig9
-rw-r--r--drivers/staging/ccree/Makefile2
-rw-r--r--drivers/staging/ccree/cc_hw_queue_defs.h3
-rw-r--r--drivers/staging/ccree/ssi_aead.c246
-rw-r--r--drivers/staging/ccree/ssi_aead.h12
-rw-r--r--drivers/staging/ccree/ssi_buffer_mgr.c473
-rw-r--r--drivers/staging/ccree/ssi_cipher.c185
-rw-r--r--drivers/staging/ccree/ssi_driver.c64
-rw-r--r--drivers/staging/ccree/ssi_driver.h1
-rw-r--r--drivers/staging/ccree/ssi_fips.c119
-rw-r--r--drivers/staging/ccree/ssi_fips.h58
-rw-r--r--drivers/staging/ccree/ssi_fips_data.h306
-rw-r--r--drivers/staging/ccree/ssi_fips_ext.c92
-rw-r--r--drivers/staging/ccree/ssi_fips_ll.c1649
-rw-r--r--drivers/staging/ccree/ssi_fips_local.c357
-rw-r--r--drivers/staging/ccree/ssi_fips_local.h67
-rw-r--r--drivers/staging/ccree/ssi_hash.c276
-rw-r--r--drivers/staging/ccree/ssi_ivgen.c13
-rw-r--r--drivers/staging/ccree/ssi_pm.c4
-rw-r--r--drivers/staging/ccree/ssi_request_mgr.c54
-rw-r--r--drivers/staging/ccree/ssi_sram_mgr.c6
-rw-r--r--drivers/staging/ccree/ssi_sysfs.c80
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c2
-rw-r--r--drivers/staging/comedi/drivers.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c3
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c24
-rw-r--r--drivers/staging/fbtft/fb_st7789v.c2
-rw-r--r--drivers/staging/fbtft/fbtft-core.c4
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c6
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h4
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c2
-rw-r--r--drivers/staging/fsl-dpaa2/ethernet/dpni.c2
-rw-r--r--drivers/staging/fsl-mc/bus/Kconfig4
-rw-r--r--drivers/staging/fsl-mc/bus/dpio/qbman-portal.c24
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c5
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-allocator.c6
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-bus.c4
-rw-r--r--drivers/staging/fsl-mc/bus/fsl-mc-msi.c9
-rw-r--r--drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c11
-rw-r--r--drivers/staging/fsl-mc/bus/mc-io.c11
-rw-r--r--drivers/staging/fsl-mc/bus/mc-sys.c36
-rw-r--r--drivers/staging/fsl-mc/include/dpaa2-io.h1
-rw-r--r--drivers/staging/goldfish/goldfish_nand.c24
-rw-r--r--drivers/staging/greybus/arche-platform.c14
-rw-r--r--drivers/staging/greybus/audio_codec.c2
-rw-r--r--drivers/staging/greybus/gbphy.c2
-rw-r--r--drivers/staging/greybus/interface.c40
-rw-r--r--drivers/staging/greybus/light.c46
-rw-r--r--drivers/staging/greybus/spilib.h3
-rw-r--r--drivers/staging/greybus/tools/loopback_test.c48
-rw-r--r--drivers/staging/greybus/usb.c2
-rw-r--r--drivers/staging/greybus/vibrator.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c90
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c4
-rw-r--r--drivers/staging/iio/adc/ad7280a.c21
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c4
-rw-r--r--drivers/staging/iio/light/tsl2x7x.c372
-rw-r--r--drivers/staging/irda/TODO4
-rw-r--r--drivers/staging/irda/drivers/Kconfig (renamed from drivers/net/irda/Kconfig)0
-rw-r--r--drivers/staging/irda/drivers/Makefile (renamed from drivers/net/irda/Makefile)2
-rw-r--r--drivers/staging/irda/drivers/act200l-sir.c (renamed from drivers/net/irda/act200l-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/actisys-sir.c (renamed from drivers/net/irda/actisys-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.c (renamed from drivers/net/irda/ali-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/ali-ircc.h (renamed from drivers/net/irda/ali-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/au1k_ir.c (renamed from drivers/net/irda/au1k_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.c (renamed from drivers/net/irda/bfin_sir.c)0
-rw-r--r--drivers/staging/irda/drivers/bfin_sir.h (renamed from drivers/net/irda/bfin_sir.h)0
-rw-r--r--drivers/staging/irda/drivers/donauboe.c (renamed from drivers/net/irda/donauboe.c)0
-rw-r--r--drivers/staging/irda/drivers/donauboe.h (renamed from drivers/net/irda/donauboe.h)0
-rw-r--r--drivers/staging/irda/drivers/esi-sir.c (renamed from drivers/net/irda/esi-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/girbil-sir.c (renamed from drivers/net/irda/girbil-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/irda-usb.c (renamed from drivers/net/irda/irda-usb.c)2
-rw-r--r--drivers/staging/irda/drivers/irda-usb.h (renamed from drivers/net/irda/irda-usb.h)0
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.c (renamed from drivers/net/irda/irtty-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/irtty-sir.h (renamed from drivers/net/irda/irtty-sir.h)0
-rw-r--r--drivers/staging/irda/drivers/kingsun-sir.c (renamed from drivers/net/irda/kingsun-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/ks959-sir.c (renamed from drivers/net/irda/ks959-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/ksdazzle-sir.c (renamed from drivers/net/irda/ksdazzle-sir.c)2
-rw-r--r--drivers/staging/irda/drivers/litelink-sir.c (renamed from drivers/net/irda/litelink-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/ma600-sir.c (renamed from drivers/net/irda/ma600-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/mcp2120-sir.c (renamed from drivers/net/irda/mcp2120-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/mcs7780.c (renamed from drivers/net/irda/mcs7780.c)2
-rw-r--r--drivers/staging/irda/drivers/mcs7780.h (renamed from drivers/net/irda/mcs7780.h)0
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.c (renamed from drivers/net/irda/nsc-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/nsc-ircc.h (renamed from drivers/net/irda/nsc-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/old_belkin-sir.c (renamed from drivers/net/irda/old_belkin-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/pxaficp_ir.c (renamed from drivers/net/irda/pxaficp_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/sa1100_ir.c (renamed from drivers/net/irda/sa1100_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/sh_sir.c (renamed from drivers/net/irda/sh_sir.c)0
-rw-r--r--drivers/staging/irda/drivers/sir-dev.h (renamed from drivers/net/irda/sir-dev.h)0
-rw-r--r--drivers/staging/irda/drivers/sir_dev.c (renamed from drivers/net/irda/sir_dev.c)0
-rw-r--r--drivers/staging/irda/drivers/sir_dongle.c (renamed from drivers/net/irda/sir_dongle.c)0
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.c (renamed from drivers/net/irda/smsc-ircc2.c)0
-rw-r--r--drivers/staging/irda/drivers/smsc-ircc2.h (renamed from drivers/net/irda/smsc-ircc2.h)0
-rw-r--r--drivers/staging/irda/drivers/smsc-sio.h (renamed from drivers/net/irda/smsc-sio.h)0
-rw-r--r--drivers/staging/irda/drivers/stir4200.c (renamed from drivers/net/irda/stir4200.c)2
-rw-r--r--drivers/staging/irda/drivers/tekram-sir.c (renamed from drivers/net/irda/tekram-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/toim3232-sir.c (renamed from drivers/net/irda/toim3232-sir.c)0
-rw-r--r--drivers/staging/irda/drivers/via-ircc.c (renamed from drivers/net/irda/via-ircc.c)0
-rw-r--r--drivers/staging/irda/drivers/via-ircc.h (renamed from drivers/net/irda/via-ircc.h)0
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.c (renamed from drivers/net/irda/vlsi_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/vlsi_ir.h (renamed from drivers/net/irda/vlsi_ir.h)0
-rw-r--r--drivers/staging/irda/drivers/w83977af.h (renamed from drivers/net/irda/w83977af.h)0
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.c (renamed from drivers/net/irda/w83977af_ir.c)0
-rw-r--r--drivers/staging/irda/drivers/w83977af_ir.h (renamed from drivers/net/irda/w83977af_ir.h)0
-rw-r--r--drivers/staging/irda/include/net/irda/af_irda.h87
-rw-r--r--drivers/staging/irda/include/net/irda/crc.h29
-rw-r--r--drivers/staging/irda/include/net/irda/discovery.h95
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_core.h106
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_event.h83
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_lmp.h36
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_param.h147
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_ttp.h37
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty.h121
-rw-r--r--drivers/staging/irda/include/net/irda/ircomm_tty_attach.h92
-rw-r--r--drivers/staging/irda/include/net/irda/irda.h115
-rw-r--r--drivers/staging/irda/include/net/irda/irda_device.h285
-rw-r--r--drivers/staging/irda/include/net/irda/iriap.h108
-rw-r--r--drivers/staging/irda/include/net/irda/iriap_event.h85
-rw-r--r--drivers/staging/irda/include/net/irda/irias_object.h108
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_client.h42
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_common.h230
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_eth.h32
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_event.h81
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_filter.h35
-rw-r--r--drivers/staging/irda/include/net/irda/irlan_provider.h52
-rw-r--r--drivers/staging/irda/include/net/irda/irlap.h311
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_event.h129
-rw-r--r--drivers/staging/irda/include/net/irda/irlap_frame.h167
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp.h295
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_event.h98
-rw-r--r--drivers/staging/irda/include/net/irda/irlmp_frame.h62
-rw-r--r--drivers/staging/irda/include/net/irda/irmod.h109
-rw-r--r--drivers/staging/irda/include/net/irda/irqueue.h96
-rw-r--r--drivers/staging/irda/include/net/irda/irttp.h210
-rw-r--r--drivers/staging/irda/include/net/irda/parameters.h100
-rw-r--r--drivers/staging/irda/include/net/irda/qos.h101
-rw-r--r--drivers/staging/irda/include/net/irda/timer.h105
-rw-r--r--drivers/staging/irda/include/net/irda/wrapper.h58
-rw-r--r--drivers/staging/irda/net/Kconfig96
-rw-r--r--drivers/staging/irda/net/Makefile17
-rw-r--r--drivers/staging/irda/net/af_irda.c2695
-rw-r--r--drivers/staging/irda/net/discovery.c417
-rw-r--r--drivers/staging/irda/net/ircomm/Kconfig12
-rw-r--r--drivers/staging/irda/net/ircomm/Makefile8
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_core.c563
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_event.c246
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_lmp.c350
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_param.c501
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_ttp.c350
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty.c1329
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_attach.c987
-rw-r--r--drivers/staging/irda/net/ircomm/ircomm_tty_ioctl.c291
-rw-r--r--drivers/staging/irda/net/irda_device.c316
-rw-r--r--drivers/staging/irda/net/iriap.c1085
-rw-r--r--drivers/staging/irda/net/iriap_event.c496
-rw-r--r--drivers/staging/irda/net/irias_object.c555
-rw-r--r--drivers/staging/irda/net/irlan/Kconfig14
-rw-r--r--drivers/staging/irda/net/irlan/Makefile7
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client.c559
-rw-r--r--drivers/staging/irda/net/irlan/irlan_client_event.c511
-rw-r--r--drivers/staging/irda/net/irlan/irlan_common.c1176
-rw-r--r--drivers/staging/irda/net/irlan/irlan_eth.c340
-rw-r--r--drivers/staging/irda/net/irlan/irlan_event.c60
-rw-r--r--drivers/staging/irda/net/irlan/irlan_filter.c240
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider.c408
-rw-r--r--drivers/staging/irda/net/irlan/irlan_provider_event.c233
-rw-r--r--drivers/staging/irda/net/irlap.c1207
-rw-r--r--drivers/staging/irda/net/irlap_event.c2316
-rw-r--r--drivers/staging/irda/net/irlap_frame.c1407
-rw-r--r--drivers/staging/irda/net/irlmp.c1996
-rw-r--r--drivers/staging/irda/net/irlmp_event.c886
-rw-r--r--drivers/staging/irda/net/irlmp_frame.c476
-rw-r--r--drivers/staging/irda/net/irmod.c199
-rw-r--r--drivers/staging/irda/net/irnet/Kconfig13
-rw-r--r--drivers/staging/irda/net/irnet/Makefile7
-rw-r--r--drivers/staging/irda/net/irnet/irnet.h522
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.c1885
-rw-r--r--drivers/staging/irda/net/irnet/irnet_irda.h178
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.c1189
-rw-r--r--drivers/staging/irda/net/irnet/irnet_ppp.h116
-rw-r--r--drivers/staging/irda/net/irnetlink.c162
-rw-r--r--drivers/staging/irda/net/irproc.c96
-rw-r--r--drivers/staging/irda/net/irqueue.c911
-rw-r--r--drivers/staging/irda/net/irsysctl.c258
-rw-r--r--drivers/staging/irda/net/irttp.c1891
-rw-r--r--drivers/staging/irda/net/parameters.c584
-rw-r--r--drivers/staging/irda/net/qos.c771
-rw-r--r--drivers/staging/irda/net/timer.c231
-rw-r--r--drivers/staging/irda/net/wrapper.c492
-rw-r--r--drivers/staging/ks7010/ks7010_sdio.c4
-rw-r--r--drivers/staging/ks7010/ks_hostif.c4
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h28
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h104
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h26
-rw-r--r--drivers/staging/lustre/include/linux/lnet/api.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h15
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h50
-rw-r--r--drivers/staging/lustre/include/linux/lnet/socklnd.h15
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_debug.h149
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/libcfs_ioctl.h (renamed from drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h)0
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-dlc.h (renamed from drivers/staging/lustre/include/linux/lnet/lib-dlc.h)4
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnet-types.h (renamed from drivers/staging/lustre/include/linux/lnet/types.h)0
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetctl.h (renamed from drivers/staging/lustre/include/linux/lnet/lnetctl.h)51
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/lnetst.h (renamed from drivers/staging/lustre/include/linux/lnet/lnetst.h)129
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/nidstr.h (renamed from drivers/staging/lustre/include/linux/lnet/nidstr.h)2
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lnet/socklnd.h (renamed from drivers/staging/lustre/include/linux/lnet/lnet.h)24
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_cfg.h (renamed from drivers/staging/lustre/lustre/include/lustre_cfg.h)188
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fid.h293
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_fiemap.h (renamed from drivers/staging/lustre/lustre/include/lustre/ll_fiemap.h)6
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_idl.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_idl.h)689
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ioctl.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_ioctl.h)203
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_kernelcomm.h (renamed from drivers/staging/lustre/lustre/include/uapi_kernelcomm.h)6
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ostid.h236
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_param.h94
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_user.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_user.h)15
-rw-r--r--drivers/staging/lustre/include/uapi/linux/lustre/lustre_ver.h (renamed from drivers/staging/lustre/lustre/include/lustre_ver.h)0
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h5
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.h18
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c20
-rw-r--r--drivers/staging/lustre/lnet/libcfs/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c49
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_string.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-curproc.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-debug.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-mem.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-tracefile.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/prng.c2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c12
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.h2
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-md.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-me.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c6
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-ptl.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/lo.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c5
-rw-r--r--drivers/staging/lustre/lnet/lnet/net_fault.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/nidstrings.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/peer.c4
-rw-r--r--drivers/staging/lustre/lnet/lnet/router.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c5
-rw-r--r--drivers/staging/lustre/lnet/selftest/Makefile3
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c8
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c9
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h7
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h2
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h9
-rw-r--r--drivers/staging/lustre/lustre/Kconfig10
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c5
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c19
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c18
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c14
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/interval_tree.h4
-rw-r--r--drivers/staging/lustre/lustre/include/llog_swab.h3
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h10
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_debug.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h23
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h25
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_errno.h (renamed from drivers/staging/lustre/lustre/include/lustre/lustre_errno.h)0
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_export.h6
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h84
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_handles.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_linkea.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lmv.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h15
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mds.h11
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h39
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_nrs.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_obdo.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h109
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_swab.h8
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h20
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h6
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h44
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h6
-rw-r--r--drivers/staging/lustre/lustre/include/seq_range.h2
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c46
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_inodebits.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c12
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c17
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c10
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_plain.c6
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c36
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c22
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c35
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h20
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c31
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c12
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/range_lock.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_security.c23
-rw-r--r--drivers/staging/lustre/lustre/lmv/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c15
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c22
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h8
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c184
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c6
-rw-r--r--drivers/staging/lustre/lustre/lov/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h96
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c87
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c29
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h31
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c152
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c43
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c469
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c13
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c4
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c46
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_io.c51
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c2
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c6
-rw-r--r--drivers/staging/lustre/lustre/mdc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c10
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c26
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c25
-rw-r--r--drivers/staging/lustre/lustre/mgc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c4
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_internal.h11
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c39
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c41
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/kernelcomm.c11
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linkea.c75
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c102
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_counters.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c29
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ref.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c14
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c74
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c44
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c6
-rw-r--r--drivers/staging/lustre/lustre/obdclass/statfs_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/uuid.c6
-rw-r--r--drivers/staging/lustre/lustre/obdecho/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c33
-rw-r--r--drivers/staging/lustre/lustre/osc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c8
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c31
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h6
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c9
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c2
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c32
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs_fifo.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pers.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c8
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c18
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c20
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c13
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_lproc.c16
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_null.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c10
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c26
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c24
-rw-r--r--drivers/staging/media/atomisp/i2c/ap1302.c7
-rw-r--r--drivers/staging/media/atomisp/i2c/gc0310.c5
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/gc2235.h6
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/ad5816g.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/drv201.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9714.c14
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9718.c5
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/dw9719.c11
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.c48
-rw-r--r--drivers/staging/media/atomisp/i2c/imx/imx.h29
-rw-r--r--drivers/staging/media/atomisp/i2c/lm3554.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/mt9m114.c12
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2680.c19
-rw-r--r--drivers/staging/media/atomisp/i2c/ov2722.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/ov5693.c10
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.c2
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858.h3
-rw-r--r--drivers/staging/media/atomisp/i2c/ov8858_btns.h3
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp.h6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.c50
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_cmd.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_css20.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_internal.h13
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_ioctl.c3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_subdev.c6
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/atomisp_v4l2.c35
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/isys/src/ibuf_ctrl_rmgr.c27
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css.c1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/css2400/sh_css_firmware.c8
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm.c139
-rw-r--r--drivers/staging/media/bcm2048/radio-bcm2048.c3
-rw-r--r--drivers/staging/media/cxd2099/cxd2099.c21
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c2
-rw-r--r--drivers/staging/media/imx/Kconfig3
-rw-r--r--drivers/staging/media/imx/imx-ic-prpencvf.c57
-rw-r--r--drivers/staging/media/imx/imx-media-capture.c4
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c37
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c4
-rw-r--r--drivers/staging/media/imx/imx-media-of.c50
-rw-r--r--drivers/staging/media/imx/imx-media-vdic.c37
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c18
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hdm.c10
-rw-r--r--drivers/staging/most/hdm-usb/hdm_usb.c22
-rw-r--r--drivers/staging/mt29f_spinand/mt29f_spinand.c2
-rw-r--r--drivers/staging/nvec/nvec.c2
-rw-r--r--drivers/staging/octeon/ethernet-rx.c79
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c2
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433-overlay.dts53
-rw-r--r--drivers/staging/pi433/Documentation/devicetree/pi433.txt62
-rw-r--r--drivers/staging/pi433/Documentation/pi433.txt274
-rw-r--r--drivers/staging/pi433/Kconfig16
-rw-r--r--drivers/staging/pi433/Makefile3
-rw-r--r--drivers/staging/pi433/TODO5
-rw-r--r--drivers/staging/pi433/pi433_if.c1338
-rw-r--r--drivers/staging/pi433/pi433_if.h152
-rw-r--r--drivers/staging/pi433/rf69.c1032
-rw-r--r--drivers/staging/pi433/rf69.h82
-rw-r--r--drivers/staging/pi433/rf69_enum.h201
-rw-r--r--drivers/staging/pi433/rf69_registers.h489
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c16
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c14
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c6
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h3
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c78
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_HTProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c23
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h11
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c2
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c4
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c2
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_btcoex.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c7
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_odm.c38
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_pwrctrl.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c14
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_btcoex.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/hal_com.c4
-rw-r--r--drivers/staging/rtl8723bs/hal/odm.c46
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c20
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_rxdesc.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/sdio_ops.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c1008
-rw-r--r--drivers/staging/rtl8723bs/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_intf.c2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/sdio_ops_linux.c4
-rw-r--r--drivers/staging/rtl8723bs/os_dep/xmit_linux.c10
-rw-r--r--drivers/staging/rtlwifi/Kconfig22
-rw-r--r--drivers/staging/rtlwifi/Makefile70
-rw-r--r--drivers/staging/rtlwifi/TODO11
-rw-r--r--drivers/staging/rtlwifi/base.c2826
-rw-r--r--drivers/staging/rtlwifi/base.h186
-rw-r--r--drivers/staging/rtlwifi/btcoexist/Makefile8
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbt_precomp.h85
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.c5244
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b1ant.h444
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.c5225
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822b2ant.h498
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.c65
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtc8822bwifionly.h35
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.c1881
-rw-r--r--drivers/staging/rtlwifi/btcoexist/halbtcoutsrc.h802
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.c528
-rw-r--r--drivers/staging/rtlwifi/btcoexist/rtl_btc.h75
-rw-r--r--drivers/staging/rtlwifi/cam.c326
-rw-r--r--drivers/staging/rtlwifi/cam.h50
-rw-r--r--drivers/staging/rtlwifi/core.c2046
-rw-r--r--drivers/staging/rtlwifi/core.h86
-rw-r--r--drivers/staging/rtlwifi/debug.c636
-rw-r--r--drivers/staging/rtlwifi/debug.h234
-rw-r--r--drivers/staging/rtlwifi/efuse.c1342
-rw-r--r--drivers/staging/rtlwifi/efuse.h120
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_2_platform.h52
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_cfg.h132
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_phy.c106
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.c563
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_8822b_pwr_seq.h40
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.c343
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b.h44
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.c323
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_pcie.h53
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.c184
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_sdio.h42
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.c185
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_api_8822b_usb.h45
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.c414
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_8822b/halmac_func_8822b.h38
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_88xx_cfg.h171
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.c5979
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx.h396
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.c329
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_pcie.h71
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.c974
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_sdio.h84
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.c554
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_api_88xx_usb.h73
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c4494
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.h321
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.c426
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_api.h82
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit2.h13407
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_bit_8822b.h12103
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_info.h122
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_c2h_nic.h184
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_fw_offload_h2c_nic.h515
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_h2c_extra_info_nic.h115
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_intf_phy_cmd.h54
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_c2h_nic.h403
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_original_h2c_nic.h1011
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pcie_reg.h28
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_pwr_seq_cmd.h116
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg2.h1132
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_reg_8822b.h728
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_chip.h48
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_bd_nic.h48
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_chip.h118
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_rx_desc_nic.h133
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_sdio_reg.h62
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_chip.h118
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_bd_nic.h123
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_chip.h444
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_tx_desc_nic.h506
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_type.h1934
-rw-r--r--drivers/staging/rtlwifi/halmac/halmac_usb_reg.h28
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.c1384
-rw-r--r--drivers/staging/rtlwifi/halmac/rtl_halmac.h94
-rw-r--r--drivers/staging/rtlwifi/pci.c2508
-rw-r--r--drivers/staging/rtlwifi/pci.h329
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.c965
-rw-r--r--drivers/staging/rtlwifi/phydm/halphyrf_ce.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/mp_precomp.h24
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.c1986
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm.h946
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.c200
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_acs.h57
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.c941
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adaptivity.h119
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.c628
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_adc_sampling.h96
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.c83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_antdiv.h301
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_beamforming.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.c457
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_ccx.h83
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.c343
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_cfotracking.h60
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.c2910
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_debug.h175
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dfs.h59
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.c1535
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dig.h241
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamic_rx_path.h37
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.c129
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamicbbpowersaving.h50
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.c102
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_dynamictxpower.h64
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.c139
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_edcaturbocheck.h44
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_features.h33
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.c1928
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_hwconfig.h510
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.c341
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_interface.h205
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_iqk.h76
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.c228
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_kfree.h42
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.c330
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_noisemonitor.h46
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.c644
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_powertracking_ce.h293
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_pre_define.h613
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_precomp.h85
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.c422
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_psd.h67
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.c1208
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_rainfo.h269
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_reg.h151
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11ac.h94
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_regdefine11n.h213
-rw-r--r--drivers/staging/rtlwifi/phydm/phydm_types.h130
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.c1969
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_bb.h54
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.c222
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_mac.h38
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.c4744
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halhwimg8822b_rf.h129
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.c351
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/halphyrf_8822b.h45
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.c1815
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_hal_api8822b.h84
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.c1410
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_iqk_8822b.h48
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.c168
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_regconfig8822b.h54
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.c225
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/phydm_rtl8822b.h30
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl8822b/version_rtl8822b.h34
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c874
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.h45
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/halcomtxbf.h67
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbf8822b.h39
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfinterface.h38
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/haltxbfjaguar.h36
-rw-r--r--drivers/staging/rtlwifi/phydm/txbf/phydm_hal_txbf_api.h41
-rw-r--r--drivers/staging/rtlwifi/ps.c1007
-rw-r--r--drivers/staging/rtlwifi/ps.h50
-rw-r--r--drivers/staging/rtlwifi/pwrseqcmd.h94
-rw-r--r--drivers/staging/rtlwifi/rc.c322
-rw-r--r--drivers/staging/rtlwifi/rc.h49
-rw-r--r--drivers/staging/rtlwifi/regd.c469
-rw-r--r--drivers/staging/rtlwifi/regd.h63
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/Makefile7
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/def.h82
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c968
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.h198
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2441
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.h66
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.c127
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/led.h34
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.c2233
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/phy.h145
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/reg.h1653
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.c481
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/sw.h32
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.c1015
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/trx.h165
-rw-r--r--drivers/staging/rtlwifi/stats.c260
-rw-r--r--drivers/staging/rtlwifi/stats.h42
-rw-r--r--drivers/staging/rtlwifi/wifi.h3375
-rw-r--r--drivers/staging/rts5208/ms.c5
-rw-r--r--drivers/staging/rts5208/rtsx.c15
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c4
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c2
-rw-r--r--drivers/staging/rts5208/sd.c25
-rw-r--r--drivers/staging/rts5208/spi.c8
-rw-r--r--drivers/staging/rts5208/xd.c17
-rw-r--r--drivers/staging/skein/skein_block.c323
-rw-r--r--drivers/staging/skein/skein_block.h323
-rw-r--r--drivers/staging/speakup/spk_ttyio.c77
-rw-r--r--drivers/staging/typec/fusb302/Kconfig2
-rw-r--r--drivers/staging/typec/fusb302/TODO4
-rw-r--r--drivers/staging/typec/fusb302/fusb302.c144
-rw-r--r--drivers/staging/typec/pd.h2
-rw-r--r--drivers/staging/typec/tcpm.c471
-rw-r--r--drivers/staging/typec/tcpm.h12
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt14
-rw-r--r--drivers/staging/unisys/include/channel.h361
-rw-r--r--drivers/staging/unisys/include/iochannel.h554
-rw-r--r--drivers/staging/unisys/include/visorbus.h44
-rw-r--r--drivers/staging/unisys/visorbus/controlvmchannel.h715
-rw-r--r--drivers/staging/unisys/visorbus/vbuschannel.h96
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c469
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_private.h38
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c219
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c607
-rw-r--r--drivers/staging/unisys/visorbus/vmcallinterface.h54
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c556
-rw-r--r--drivers/staging/unisys/visorinput/ultrainputreport.h68
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c138
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c736
-rw-r--r--drivers/staging/vboxvideo/Kconfig11
-rw-r--r--drivers/staging/vboxvideo/TODO4
-rw-r--r--drivers/staging/vboxvideo/vbox_drv.c7
-rw-r--r--drivers/staging/vboxvideo/vbox_fb.c176
-rw-r--r--drivers/staging/vboxvideo/vbox_main.c8
-rw-r--r--drivers/staging/vboxvideo/vbox_mode.c12
-rw-r--r--drivers/staging/vboxvideo/vbox_ttm.c2
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c8
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c2
-rw-r--r--drivers/staging/vt6655/card.c6
-rw-r--r--drivers/staging/vt6655/mac.c2
-rw-r--r--drivers/staging/vt6656/device.h2
-rw-r--r--drivers/staging/vt6656/firmware.c2
-rw-r--r--drivers/staging/vt6656/key.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c5
-rw-r--r--drivers/staging/vt6656/power.c6
-rw-r--r--drivers/staging/vt6656/rf.c6
-rw-r--r--drivers/staging/vt6656/usbpipe.c4
-rw-r--r--drivers/staging/wilc1000/host_interface.c4
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c5
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c65
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h4
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h100
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c16
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c27
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c37
-rw-r--r--drivers/target/target_core_alua.c3
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_iblock.c4
-rw-r--r--drivers/target/target_core_pr.c3
-rw-r--r--drivers/tee/optee/core.c19
-rw-r--r--drivers/tee/optee/optee_smc.h12
-rw-r--r--drivers/tee/optee/rpc.c15
-rw-r--r--drivers/tee/tee_core.c5
-rw-r--r--drivers/tee/tee_shm.c2
-rw-r--r--drivers/thermal/Kconfig12
-rw-r--r--drivers/thermal/Makefile1
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c2
-rw-r--r--drivers/thermal/hisi_thermal.c2
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c2
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.h8
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c43
-rw-r--r--drivers/thermal/int340x_thermal/int3406_thermal.c96
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c2
-rw-r--r--drivers/thermal/intel_pch_thermal.c12
-rw-r--r--drivers/thermal/mtk_thermal.c88
-rw-r--r--drivers/thermal/qoriq_thermal.c2
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/rockchip_thermal.c65
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/thermal/thermal_core.c31
-rw-r--r--drivers/thermal/thermal_core.h1
-rw-r--r--drivers/thermal/thermal_sysfs.c29
-rw-r--r--drivers/thermal/uniphier_thermal.c384
-rw-r--r--drivers/thermal/zx2967_thermal.c2
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/icm.c13
-rw-r--r--drivers/thunderbolt/switch.c20
-rw-r--r--drivers/thunderbolt/tb.c4
-rw-r--r--drivers/tty/Kconfig8
-rw-r--r--drivers/tty/Makefile1
-rw-r--r--drivers/tty/ehv_bytechan.c2
-rw-r--r--drivers/tty/goldfish.c234
-rw-r--r--drivers/tty/hvc/Kconfig2
-rw-r--r--drivers/tty/hvc/hvc_opal.c16
-rw-r--r--drivers/tty/hvc/hvc_vio.c20
-rw-r--r--drivers/tty/hvc/hvcs.c4
-rw-r--r--drivers/tty/isicom.c2
-rw-r--r--drivers/tty/mips_ejtag_fdc.c2
-rw-r--r--drivers/tty/moxa.c2
-rw-r--r--drivers/tty/mxser.c2
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/pty.c72
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serial/21285.c2
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c7
-rw-r--r--drivers/tty/serial/8250/8250_core.c16
-rw-r--r--drivers/tty/serial/8250/8250_dw.c2
-rw-r--r--drivers/tty/serial/8250/8250_early.c8
-rw-r--r--drivers/tty/serial/8250/8250_exar.c2
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c8
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c8
-rw-r--r--drivers/tty/serial/8250/8250_men_mcb.c118
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c5
-rw-r--r--drivers/tty/serial/8250/8250_of.c60
-rw-r--r--drivers/tty/serial/8250/8250_pci.c43
-rw-r--r--drivers/tty/serial/8250/8250_port.c81
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c63
-rw-r--r--drivers/tty/serial/8250/Kconfig11
-rw-r--r--drivers/tty/serial/8250/Makefile1
-rw-r--r--drivers/tty/serial/Kconfig4
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c6
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/arc_uart.c4
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/tty/serial/earlycon.c7
-rw-r--r--drivers/tty/serial/fsl_lpuart.c76
-rw-r--r--drivers/tty/serial/imx.c20
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c2
-rw-r--r--drivers/tty/serial/meson_uart.c2
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c14
-rw-r--r--drivers/tty/serial/msm_serial.c19
-rw-r--r--drivers/tty/serial/mux.c16
-rw-r--r--drivers/tty/serial/omap-serial.c13
-rw-r--r--drivers/tty/serial/owl-uart.c635
-rw-r--r--drivers/tty/serial/pch_uart.c40
-rw-r--r--drivers/tty/serial/pmac_zilog.c4
-rw-r--r--drivers/tty/serial/serial-tegra.c2
-rw-r--r--drivers/tty/serial/serial_core.c47
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/serial/sprd_serial.c8
-rw-r--r--drivers/tty/serial/st-asc.c2
-rw-r--r--drivers/tty/serial/stm32-usart.c125
-rw-r--r--drivers/tty/serial/stm32-usart.h37
-rw-r--r--drivers/tty/serial/sunsab.c2
-rw-r--r--drivers/tty/serial/sunsu.c6
-rw-r--r--drivers/tty/serial/ucc_uart.c2
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/synclink.c2
-rw-r--r--drivers/tty/synclink_gt.c2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/tty_buffer.c26
-rw-r--r--drivers/tty/tty_io.c115
-rw-r--r--drivers/tty/vcc.c1155
-rw-r--r--drivers/usb/atm/cxacru.c2
-rw-r--r--drivers/usb/atm/speedtch.c6
-rw-r--r--drivers/usb/atm/ueagle-atm.c4
-rw-r--r--drivers/usb/atm/usbatm.c6
-rw-r--r--drivers/usb/atm/xusbatm.c1
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c2
-rw-r--r--drivers/usb/chipidea/Makefile1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c1
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c155
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c2
-rw-r--r--drivers/usb/chipidea/core.c6
-rw-r--r--drivers/usb/chipidea/otg_fsm.c2
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usbtmc.c4
-rw-r--r--drivers/usb/common/common.c15
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/usb/core/ledtrig-usbport.c5
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/core/sysfs.c2
-rw-r--r--drivers/usb/dwc2/gadget.c2
-rw-r--r--drivers/usb/dwc2/hcd.c6
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c22
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c4
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/function/f_fs.c7
-rw-r--r--drivers/usb/gadget/function/f_hid.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c21
-rw-r--r--drivers/usb/gadget/function/f_midi.c68
-rw-r--r--drivers/usb/gadget/function/f_ncm.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c20
-rw-r--r--drivers/usb/gadget/function/u_audio.c4
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.h1
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h35
-rw-r--r--drivers/usb/gadget/function/u_rndis.h4
-rw-r--r--drivers/usb/gadget/function/u_serial.c2
-rw-r--r--drivers/usb/gadget/legacy/webcam.c1
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h24
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c148
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c16
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c7
-rw-r--r--drivers/usb/gadget/udc/core.c20
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c117
-rw-r--r--drivers/usb/gadget/udc/s3c2410_udc.c4
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-omap.c4
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c4
-rw-r--r--drivers/usb/host/hwa-hc.c4
-rw-r--r--drivers/usb/host/imx21-hcd.c8
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/max3421-hcd.c2
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-sm501.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c9
-rw-r--r--drivers/usb/host/pci-quirks.c35
-rw-r--r--drivers/usb/host/r8a66597-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/whci/hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c127
-rw-r--r--drivers/usb/host/xhci-mtk.c1
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-rcar.c40
-rw-r--r--drivers/usb/host/xhci-ring.c22
-rw-r--r--drivers/usb/host/xhci-trace.h23
-rw-r--r--drivers/usb/host/xhci.h90
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/isp1760/isp1760-hcd.c6
-rw-r--r--drivers/usb/misc/adutux.c2
-rw-r--r--drivers/usb/misc/chaoskey.c2
-rw-r--r--drivers/usb/misc/cytherm.c1
-rw-r--r--drivers/usb/misc/ftdi-elan.c33
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c4
-rw-r--r--drivers/usb/misc/ldusb.c1
-rw-r--r--drivers/usb/misc/legousbtower.c2
-rw-r--r--drivers/usb/misc/lvstest.c41
-rw-r--r--drivers/usb/misc/rio500.c4
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c13
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usb251xb.c1
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/uss720.c7
-rw-r--r--drivers/usb/mtu3/mtu3.h2
-rw-r--r--drivers/usb/mtu3/mtu3_dr.c58
-rw-r--r--drivers/usb/mtu3/mtu3_gadget.c4
-rw-r--r--drivers/usb/mtu3/mtu3_gadget_ep0.c23
-rw-r--r--drivers/usb/mtu3/mtu3_host.c4
-rw-r--r--drivers/usb/mtu3/mtu3_hw_regs.h4
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c1
-rw-r--r--drivers/usb/musb/musb_core.c22
-rw-r--r--drivers/usb/musb/musb_core.h24
-rw-r--r--drivers/usb/musb/musb_dsps.c13
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c2
-rw-r--r--drivers/usb/phy/phy-mv-usb.c4
-rw-r--r--drivers/usb/phy/phy-qcom-8x16-usb.c9
-rw-r--r--drivers/usb/phy/phy-tahvo.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/phy/phy.c276
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c2
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c2
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/storage/realtek_cr.c1
-rw-r--r--drivers/usb/storage/uas.c4
-rw-r--r--drivers/usb/usbip/stub_main.c2
-rw-r--r--drivers/usb/usbip/usbip_common.c2
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/usb/usbip/vhci_hcd.c4
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c6
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/dev-sysfs.c2
-rw-r--r--drivers/usb/wusbcore/wusbhc.c2
-rw-r--r--drivers/uwb/lc-rc.c2
-rw-r--r--drivers/vfio/platform/vfio_amba.c2
-rw-r--r--drivers/vfio/vfio.c25
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/vhost/vhost.h2
-rw-r--r--drivers/video/backlight/gpio_backlight.c62
-rw-r--r--drivers/video/backlight/kb3886_bl.c2
-rw-r--r--drivers/video/backlight/lm3630a_bl.c5
-rw-r--r--drivers/video/backlight/pandora_bl.c2
-rw-r--r--drivers/video/backlight/pwm_bl.c2
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/Makefile8
-rw-r--r--drivers/video/console/sticore.c11
-rw-r--r--drivers/video/console/vgacon.c5
-rw-r--r--drivers/video/fbdev/68328fb.c2
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/amba-clcd.c2
-rw-r--r--drivers/video/fbdev/arkfb.c2
-rw-r--r--drivers/video/fbdev/asiliantfb.c2
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/aty/aty128fb.c4
-rw-r--r--drivers/video/fbdev/aty/atyfb_base.c6
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c6
-rw-r--r--drivers/video/fbdev/bfin-lq035q1-fb.c2
-rw-r--r--drivers/video/fbdev/bw2.c4
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c4
-rw-r--r--drivers/video/fbdev/cg6.c4
-rw-r--r--drivers/video/fbdev/chipsfb.c4
-rw-r--r--drivers/video/fbdev/cobalt_lcdfb.c2
-rw-r--r--drivers/video/fbdev/core/Makefile14
-rw-r--r--drivers/video/fbdev/core/bitblit.c (renamed from drivers/video/console/bitblit.c)8
-rw-r--r--drivers/video/fbdev/core/fb_defio.c2
-rw-r--r--drivers/video/fbdev/core/fbcon.c (renamed from drivers/video/console/fbcon.c)37
-rw-r--r--drivers/video/fbdev/core/fbcon.h (renamed from drivers/video/console/fbcon.h)9
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c (renamed from drivers/video/console/fbcon_ccw.c)8
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c (renamed from drivers/video/console/fbcon_cw.c)8
-rw-r--r--drivers/video/fbdev/core/fbcon_dmi_quirks.c145
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.c (renamed from drivers/video/console/fbcon_rotate.c)4
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h (renamed from drivers/video/console/fbcon_rotate.h)0
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c (renamed from drivers/video/console/fbcon_ud.c)8
-rw-r--r--drivers/video/fbdev/core/fbmem.c24
-rw-r--r--drivers/video/fbdev/core/fbmon.c4
-rw-r--r--drivers/video/fbdev/core/softcursor.c (renamed from drivers/video/console/softcursor.c)4
-rw-r--r--drivers/video/fbdev/core/tileblit.c (renamed from drivers/video/console/tileblit.c)7
-rw-r--r--drivers/video/fbdev/cyber2000fb.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c2
-rw-r--r--drivers/video/fbdev/dnfb.c2
-rw-r--r--drivers/video/fbdev/efifb.c31
-rw-r--r--drivers/video/fbdev/fb-puv3.c2
-rw-r--r--drivers/video/fbdev/ffb.c4
-rw-r--r--drivers/video/fbdev/fm2fb.c2
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c2
-rw-r--r--drivers/video/fbdev/grvga.c2
-rw-r--r--drivers/video/fbdev/i810/i810_main.c4
-rw-r--r--drivers/video/fbdev/imsttfb.c2
-rw-r--r--drivers/video/fbdev/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/fbdev/kyro/fbdev.c2
-rw-r--r--drivers/video/fbdev/leo.c4
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c14
-rw-r--r--drivers/video/fbdev/maxinefb.c2
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c4
-rw-r--r--drivers/video/fbdev/neofb.c2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c2
-rw-r--r--drivers/video/fbdev/offb.c10
-rw-r--r--drivers/video/fbdev/omap/lcd_h3.c2
-rw-r--r--drivers/video/fbdev/omap/lcd_mipid.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/p9100.c4
-rw-r--r--drivers/video/fbdev/pm2fb.c2
-rw-r--r--drivers/video/fbdev/pm3fb.c2
-rw-r--r--drivers/video/fbdev/pmag-aa-fb.c4
-rw-r--r--drivers/video/fbdev/pmag-ba-fb.c4
-rw-r--r--drivers/video/fbdev/pmagb-b-fb.c4
-rw-r--r--drivers/video/fbdev/ps3fb.c2
-rw-r--r--drivers/video/fbdev/pvr2fb.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/fbdev/q40fb.c2
-rw-r--r--drivers/video/fbdev/riva/fbdev.c2
-rw-r--r--drivers/video/fbdev/s3fb.c2
-rw-r--r--drivers/video/fbdev/savage/savagefb_driver.c2
-rw-r--r--drivers/video/fbdev/sis/init301.c15
-rw-r--r--drivers/video/fbdev/skeletonfb.c4
-rw-r--r--drivers/video/fbdev/sm501fb.c2
-rw-r--r--drivers/video/fbdev/sm712fb.c17
-rw-r--r--drivers/video/fbdev/smscufx.c2
-rw-r--r--drivers/video/fbdev/sunxvr1000.c10
-rw-r--r--drivers/video/fbdev/sunxvr2500.c2
-rw-r--r--drivers/video/fbdev/sunxvr500.c2
-rw-r--r--drivers/video/fbdev/tcx.c4
-rw-r--r--drivers/video/fbdev/tdfxfb.c2
-rw-r--r--drivers/video/fbdev/tridentfb.c2
-rw-r--r--drivers/video/fbdev/udlfb.c5
-rw-r--r--drivers/video/fbdev/uvesafb.c9
-rw-r--r--drivers/video/fbdev/vermilion/vermilion.c4
-rw-r--r--drivers/video/fbdev/via/via-core.c2
-rw-r--r--drivers/video/fbdev/vt8623fb.c4
-rw-r--r--drivers/video/fbdev/xilinxfb.c62
-rw-r--r--drivers/video/of_display_timing.c41
-rw-r--r--drivers/video/of_videomode.c2
-rw-r--r--drivers/virt/fsl_hypervisor.c12
-rw-r--r--drivers/virtio/Kconfig4
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/w1/masters/ds1wm.c108
-rw-r--r--drivers/w1/masters/ds2482.c12
-rw-r--r--drivers/w1/masters/ds2490.c2
-rw-r--r--drivers/w1/slaves/Kconfig14
-rw-r--r--drivers/w1/slaves/Makefile2
-rw-r--r--drivers/w1/slaves/w1_bq27000.c117
-rw-r--r--drivers/w1/slaves/w1_ds2438.c9
-rw-r--r--drivers/w1/slaves/w1_ds2805.c313
-rw-r--r--drivers/w1/slaves/w1_therm.c164
-rw-r--r--drivers/w1/w1.c22
-rw-r--r--drivers/watchdog/asm9260_wdt.c4
-rw-r--r--drivers/watchdog/aspeed_wdt.c132
-rw-r--r--drivers/watchdog/bcm7038_wdt.c4
-rw-r--r--drivers/watchdog/cadence_wdt.c6
-rw-r--r--drivers/watchdog/coh901327_wdt.c2
-rw-r--r--drivers/watchdog/da9063_wdt.c67
-rw-r--r--drivers/watchdog/diag288_wdt.c2
-rw-r--r--drivers/watchdog/iTCO_wdt.c22
-rw-r--r--drivers/watchdog/it87_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c74
-rw-r--r--drivers/watchdog/max77620_wdt.c2
-rw-r--r--drivers/watchdog/mei_wdt.c2
-rw-r--r--drivers/watchdog/meson_wdt.c2
-rw-r--r--drivers/watchdog/mt7621_wdt.c4
-rw-r--r--drivers/watchdog/octeon-wdt-main.c354
-rw-r--r--drivers/watchdog/octeon-wdt-nmi.S42
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c83
-rw-r--r--drivers/watchdog/pcwd_usb.c2
-rw-r--r--drivers/watchdog/qcom-wdt.c2
-rw-r--r--drivers/watchdog/renesas_wdt.c80
-rw-r--r--drivers/watchdog/rt2880_wdt.c4
-rw-r--r--drivers/watchdog/sc1200wdt.c2
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/watchdog/stm32_iwdg.c2
-rw-r--r--drivers/watchdog/ts72xx_wdt.c2
-rw-r--r--drivers/watchdog/twl4030_wdt.c2
-rw-r--r--drivers/watchdog/w83627hf_wdt.c2
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/watchdog/zx2967_wdt.c2
-rw-r--r--drivers/xen/Kconfig12
-rw-r--r--drivers/xen/Makefile4
-rw-r--r--drivers/xen/balloon.c8
-rw-r--r--drivers/xen/biomerge.c3
-rw-r--r--drivers/xen/events/events_base.c6
-rw-r--r--drivers/xen/events/events_fifo.c7
-rw-r--r--drivers/xen/gntalloc.c2
-rw-r--r--drivers/xen/gntdev.c8
-rw-r--r--drivers/xen/platform-pci.c2
-rw-r--r--drivers/xen/pvcalls-back.c1240
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c44
5740 files changed, 424794 insertions, 133456 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index dfdcda00bfe3..d90fdc413648 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -125,7 +125,6 @@ obj-$(CONFIG_ACCESSIBILITY) += accessibility/
obj-$(CONFIG_ISDN) += isdn/
obj-$(CONFIG_EDAC) += edac/
obj-$(CONFIG_EISA) += eisa/
-obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
obj-y += mmc/
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index b1aacfc62b1f..90265ab4437a 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -50,6 +50,7 @@ acpi-$(CONFIG_ACPI_REDUCED_HARDWARE_ONLY) += evged.o
acpi-y += sysfs.o
acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
+acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-$(CONFIG_ACPI_NUMA) += numa.o
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
index c1c4877ca96c..2cd9f738812b 100644
--- a/drivers/acpi/acpi_lpat.c
+++ b/drivers/acpi/acpi_lpat.c
@@ -25,7 +25,7 @@
* @raw: the raw value, used as a key to get the temerature from the
* above mapping table
*
- * A positive converted temperarure value will be returned on success,
+ * A positive converted temperature value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
@@ -55,11 +55,11 @@ EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
* acpi_lpat_temp_to_raw(): Return raw value from temperature through
* LPAT conversion table
*
- * @lpat: the temperature_raw mapping table
+ * @lpat_table: the temperature_raw mapping table
* @temp: the temperature, used as a key to get the raw value from the
* above mapping table
*
- * A positive converted temperature value will be returned on success,
+ * The raw value will be returned on success,
* a negative errno will be returned in error cases.
*/
int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index f88caf5aab76..032ae44710e5 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -465,7 +465,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) {
- ret = -ENOMEM;
+ /* Skip the device, but continue the namespace scan. */
+ ret = 0;
goto err_out;
}
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index f098e25b6b41..86c10599d9f8 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -670,7 +670,7 @@ err:
}
-void __init acpi_processor_check_duplicates(void)
+static void __init acpi_processor_check_duplicates(void)
{
/* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index e88fe3632dd6..0972ec0e2eb8 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -418,7 +418,7 @@ static int video_set_report_key_events(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id video_dmi_table[] = {
+static const struct dmi_system_id video_dmi_table[] = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
*/
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index b125bdd3d58b..1709551bc4aa 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -18,6 +18,7 @@ acpi-y := \
dsmthdat.o \
dsobject.o \
dsopcode.o \
+ dspkginit.o \
dsutils.o \
dswexec.o \
dswload.o \
diff --git a/drivers/acpi/acpica/acapps.h b/drivers/acpi/acpica/acapps.h
index bb6a84b0b4b3..7a1a68b5ac5c 100644
--- a/drivers/acpi/acpica/acapps.h
+++ b/drivers/acpi/acpica/acapps.h
@@ -114,6 +114,8 @@ ac_get_all_tables_from_file(char *filename,
u8 get_only_aml_tables,
struct acpi_new_table_desc **return_list_head);
+void ac_delete_table_list(struct acpi_new_table_desc *list_head);
+
u8 ac_is_file_binary(FILE * file);
acpi_status ac_validate_table_header(FILE * file, long table_offset);
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 0d95c85cce06..f8f3a6e74128 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -237,6 +237,11 @@ acpi_ds_initialize_objects(u32 table_index,
* dsobject - Parser/Interpreter interface - object initialization and conversion
*/
acpi_status
+acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ union acpi_operand_object **obj_desc_ptr);
+
+acpi_status
acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
u32 buffer_length,
@@ -259,6 +264,14 @@ acpi_ds_create_node(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
/*
+ * dspkginit - Package object initialization
+ */
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context);
+
+/*
* dsutils - Parser/Interpreter interface utility routines
*/
void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 8ddd3b20e0c6..0d45b8bb1678 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -199,6 +199,7 @@ struct acpi_namespace_node {
#define ANOBJ_EVALUATED 0x20 /* Set on first evaluation of node */
#define ANOBJ_ALLOCATED_BUFFER 0x40 /* Method AML buffer is dynamic (install_method) */
+#define IMPLICIT_EXTERNAL 0x02 /* iASL only: This object created implicitly via External */
#define ANOBJ_IS_EXTERNAL 0x08 /* iASL only: This object created via External() */
#define ANOBJ_METHOD_NO_RETVAL 0x10 /* iASL only: Method has no return value */
#define ANOBJ_METHOD_SOME_NO_RETVAL 0x20 /* iASL only: Method has at least one return value */
@@ -604,7 +605,7 @@ struct acpi_update_state {
* Pkg state - used to traverse nested package structures
*/
struct acpi_pkg_state {
- ACPI_STATE_COMMON u16 index;
+ ACPI_STATE_COMMON u32 index;
union acpi_operand_object *source_object;
union acpi_operand_object *dest_object;
struct acpi_walk_state *walk_state;
@@ -867,7 +868,7 @@ struct acpi_parse_obj_named {
/* This version is used by the iASL compiler only */
-#define ACPI_MAX_PARSEOP_NAME 20
+#define ACPI_MAX_PARSEOP_NAME 20
struct acpi_parse_obj_asl {
ACPI_PARSE_COMMON union acpi_parse_object *child;
@@ -907,7 +908,7 @@ union acpi_parse_object {
struct asl_comment_state {
u8 comment_type;
u32 spaces_before;
- union acpi_parse_object *latest_parse_node;
+ union acpi_parse_object *latest_parse_op;
union acpi_parse_object *parsing_paren_brace_node;
u8 capture_comments;
};
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 27c3f982d810..5226146190bf 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -122,7 +122,9 @@ struct acpi_object_integer {
_type *pointer; \
u32 length;
-struct acpi_object_string { /* Null terminated, ASCII characters only */
+/* Null terminated, ASCII characters only */
+
+struct acpi_object_string {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_BUFFER_INFO(char) /* String in AML stream or allocated string */
};
@@ -211,7 +213,9 @@ struct acpi_object_method {
union acpi_operand_object *notify_list[2]; /* Handlers for system/device notifies */\
union acpi_operand_object *handler; /* Handler for Address space */
-struct acpi_object_notify_common { /* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+/* COMMON NOTIFY for POWER, PROCESSOR, DEVICE, and THERMAL */
+
+struct acpi_object_notify_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
struct acpi_object_device {
@@ -258,7 +262,9 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u8 access_length; /* For serial regions/fields */
-struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+/* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
+
+struct acpi_object_field_common {
ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Parent Operation Region object (REGION/BANK fields only) */
};
@@ -333,11 +339,12 @@ struct acpi_object_addr_handler {
struct acpi_object_reference {
ACPI_OBJECT_COMMON_HEADER u8 class; /* Reference Class */
u8 target_type; /* Used for Index Op */
- u8 reserved;
+ u8 resolved; /* Reference has been resolved to a value */
void *object; /* name_op=>HANDLE to obj, index_op=>union acpi_operand_object */
struct acpi_namespace_node *node; /* ref_of or Namepath */
union acpi_operand_object **where; /* Target of Index */
u8 *index_pointer; /* Used for Buffers and Strings */
+ u8 *aml; /* Used for deferred resolution of the ref */
u32 value; /* Used for Local/Arg/Index/ddb_handle */
};
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index c8da453bd960..84a3ceb6e384 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -76,7 +76,8 @@ void acpi_tb_release_temp_table(struct acpi_table_desc *table_desc);
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc);
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature);
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index);
u8 acpi_tb_is_table_loaded(u32 table_index);
@@ -132,6 +133,8 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
acpi_status acpi_tb_unload_table(u32 table_index);
+void acpi_tb_notify_table(u32 event, void *table);
+
void acpi_tb_terminate(void);
acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 2a3cc4296481..745134ade35f 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -516,7 +516,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index);
+ u32 index);
acpi_status
acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
@@ -538,6 +538,13 @@ acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder);
+acpi_status
+acpi_ut_short_multiply(u64 in_multiplicand, u32 multiplier, u64 *outproduct);
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result);
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result);
+
/*
* utmisc
*/
diff --git a/drivers/acpi/acpica/dbdisply.c b/drivers/acpi/acpica/dbdisply.c
index 46bf270ac525..5a606eac0c22 100644
--- a/drivers/acpi/acpica/dbdisply.c
+++ b/drivers/acpi/acpica/dbdisply.c
@@ -310,7 +310,7 @@ dump_node:
}
else {
- acpi_os_printf("Object (%p) Pathname: %s\n",
+ acpi_os_printf("Object %p: Namespace Node - Pathname: %s\n",
node, (char *)ret_buf.pointer);
}
@@ -326,7 +326,7 @@ dump_node:
obj_desc = acpi_ns_get_attached_object(node);
if (obj_desc) {
- acpi_os_printf("\nAttached Object (%p):\n", obj_desc);
+ acpi_os_printf("\nAttached Object %p:", obj_desc);
if (!acpi_os_readable
(obj_desc, sizeof(union acpi_operand_object))) {
acpi_os_printf
@@ -335,9 +335,36 @@ dump_node:
return;
}
- acpi_ut_debug_dump_buffer((void *)obj_desc,
- sizeof(union acpi_operand_object),
- display, ACPI_UINT32_MAX);
+ if (ACPI_GET_DESCRIPTOR_TYPE(((struct acpi_namespace_node *)
+ obj_desc)) ==
+ ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" Namespace Node - ");
+ status =
+ acpi_get_name((struct acpi_namespace_node *)
+ obj_desc,
+ ACPI_FULL_PATHNAME_NO_TRAILING,
+ &ret_buf);
+ if (ACPI_FAILURE(status)) {
+ acpi_os_printf
+ ("Could not convert name to pathname\n");
+ } else {
+ acpi_os_printf("Pathname: %s",
+ (char *)ret_buf.pointer);
+ }
+
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(struct
+ acpi_namespace_node),
+ display, ACPI_UINT32_MAX);
+ } else {
+ acpi_os_printf("\n");
+ acpi_ut_debug_dump_buffer((void *)obj_desc,
+ sizeof(union
+ acpi_operand_object),
+ display, ACPI_UINT32_MAX);
+ }
+
acpi_ex_dump_object_descriptor(obj_desc, 1);
}
}
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index c5dccc54307d..7bcf5f5ea029 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -184,6 +184,7 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
/* Execute flag should always be set when this function is entered */
if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+ ACPI_ERROR((AE_INFO, "Parse execute mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
@@ -556,6 +557,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
return_ACPI_STATUS(AE_OK);
}
+ ACPI_ERROR((AE_INFO, "Parse deferred mode is not set"));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 7df3152ed856..82448551781b 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -52,12 +52,6 @@
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsobject")
-/* Local prototypes */
-static acpi_status
-acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- union acpi_operand_object **obj_desc_ptr);
-
#ifndef ACPI_NO_METHOD_EXECUTION
/*******************************************************************************
*
@@ -73,15 +67,13 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
* Simple objects are any objects other than a package object!
*
******************************************************************************/
-
-static acpi_status
+acpi_status
acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
union acpi_parse_object *op,
union acpi_operand_object **obj_desc_ptr)
{
union acpi_operand_object *obj_desc;
acpi_status status;
- acpi_object_type type;
ACPI_FUNCTION_TRACE(ds_build_internal_object);
@@ -89,140 +81,47 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
if (op->common.aml_opcode == AML_INT_NAMEPATH_OP) {
/*
* This is a named object reference. If this name was
- * previously looked up in the namespace, it was stored in this op.
- * Otherwise, go ahead and look it up now
+ * previously looked up in the namespace, it was stored in
+ * this op. Otherwise, go ahead and look it up now
*/
if (!op->common.node) {
- status = acpi_ns_lookup(walk_state->scope_info,
- op->common.value.string,
- ACPI_TYPE_ANY,
- ACPI_IMODE_EXECUTE,
- ACPI_NS_SEARCH_PARENT |
- ACPI_NS_DONT_OPEN_SCOPE, NULL,
- ACPI_CAST_INDIRECT_PTR(struct
- acpi_namespace_node,
- &(op->
- common.
- node)));
- if (ACPI_FAILURE(status)) {
-
- /* Check if we are resolving a named reference within a package */
-
- if ((status == AE_NOT_FOUND)
- && (acpi_gbl_enable_interpreter_slack)
- &&
- ((op->common.parent->common.aml_opcode ==
- AML_PACKAGE_OP)
- || (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP))) {
- /*
- * We didn't find the target and we are populating elements
- * of a package - ignore if slack enabled. Some ASL code
- * contains dangling invalid references in packages and
- * expects that no exception will be issued. Leave the
- * element as a null element. It cannot be used, but it
- * can be overwritten by subsequent ASL code - this is
- * typically the case.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Ignoring unresolved reference in package [%4.4s]\n",
- walk_state->
- scope_info->scope.
- node->name.ascii));
-
- return_ACPI_STATUS(AE_OK);
- } else {
- ACPI_ERROR_NAMESPACE(op->common.value.
- string, status);
- }
-
- return_ACPI_STATUS(status);
- }
- }
-
- /* Special object resolution for elements of a package */
-
- if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (op->common.parent->common.aml_opcode ==
- AML_VARIABLE_PACKAGE_OP)) {
- /*
- * Attempt to resolve the node to a value before we insert it into
- * the package. If this is a reference to a common data type,
- * resolve it immediately. According to the ACPI spec, package
- * elements can only be "data objects" or method references.
- * Attempt to resolve to an Integer, Buffer, String or Package.
- * If cannot, return the named reference (for things like Devices,
- * Methods, etc.) Buffer Fields and Fields will resolve to simple
- * objects (int/buf/str/pkg).
- *
- * NOTE: References to things like Devices, Methods, Mutexes, etc.
- * will remain as named references. This behavior is not described
- * in the ACPI spec, but it appears to be an oversight.
- */
- obj_desc =
- ACPI_CAST_PTR(union acpi_operand_object,
- op->common.node);
-
- status =
- acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
- (struct
- acpi_namespace_node,
- &obj_desc),
- walk_state);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
- }
-
- /*
- * Special handling for Alias objects. We need to setup the type
- * and the Op->Common.Node to point to the Alias target. Note,
- * Alias has at most one level of indirection internally.
- */
- type = op->common.node->type;
- if (type == ACPI_TYPE_LOCAL_ALIAS) {
- type = obj_desc->common.type;
- op->common.node =
- ACPI_CAST_PTR(struct acpi_namespace_node,
- op->common.node->object);
- }
-
- switch (type) {
- /*
- * For these types, we need the actual node, not the subobject.
- * However, the subobject did not get an extra reference count above.
- *
- * TBD: should ex_resolve_node_to_value be changed to fix this?
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_THERMAL:
-
- acpi_ut_add_reference(op->common.node->object);
- /*lint -fallthrough */
- /*
- * For these types, we need the actual node, not the subobject.
- * The subobject got an extra reference count in ex_resolve_node_to_value.
- */
- case ACPI_TYPE_MUTEX:
- case ACPI_TYPE_METHOD:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_EVENT:
- case ACPI_TYPE_REGION:
-
- /* We will create a reference object for these types below */
- break;
+ /* Check if we are resolving a named reference within a package */
- default:
+ if ((op->common.parent->common.aml_opcode ==
+ AML_PACKAGE_OP)
+ || (op->common.parent->common.aml_opcode ==
+ AML_VARIABLE_PACKAGE_OP)) {
/*
- * All other types - the node was resolved to an actual
- * object, we are done.
+ * We won't resolve package elements here, we will do this
+ * after all ACPI tables are loaded into the namespace. This
+ * behavior supports both forward references to named objects
+ * and external references to objects in other tables.
*/
- goto exit;
+ goto create_new_object;
+ } else {
+ status = acpi_ns_lookup(walk_state->scope_info,
+ op->common.value.string,
+ ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT |
+ ACPI_NS_DONT_OPEN_SCOPE,
+ NULL,
+ ACPI_CAST_INDIRECT_PTR
+ (struct
+ acpi_namespace_node,
+ &(op->common.node)));
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(op->common.value.
+ string, status);
+ return_ACPI_STATUS(status);
+ }
}
}
}
+create_new_object:
+
/* Create and init a new internal ACPI object */
obj_desc = acpi_ut_create_internal_object((acpi_ps_get_opcode_info
@@ -240,7 +139,27 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(status);
}
-exit:
+ /*
+ * Handling for unresolved package reference elements.
+ * These are elements that are namepaths.
+ */
+ if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (op->common.parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ obj_desc->reference.resolved = TRUE;
+
+ if ((op->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+ !obj_desc->reference.node) {
+ /*
+ * Name was unresolved above.
+ * Get the prefix node for later lookup
+ */
+ obj_desc->reference.node =
+ walk_state->scope_info->scope.node;
+ obj_desc->reference.aml = op->common.aml;
+ obj_desc->reference.resolved = FALSE;
+ }
+ }
+
*obj_desc_ptr = obj_desc;
return_ACPI_STATUS(status);
}
@@ -351,200 +270,6 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
/*******************************************************************************
*
- * FUNCTION: acpi_ds_build_internal_package_obj
- *
- * PARAMETERS: walk_state - Current walk state
- * op - Parser object to be translated
- * element_count - Number of elements in the package - this is
- * the num_elements argument to Package()
- * obj_desc_ptr - Where the ACPI internal object is returned
- *
- * RETURN: Status
- *
- * DESCRIPTION: Translate a parser Op package object to the equivalent
- * namespace object
- *
- * NOTE: The number of elements in the package will be always be the num_elements
- * count, regardless of the number of elements in the package list. If
- * num_elements is smaller, only that many package list elements are used.
- * if num_elements is larger, the Package object is padded out with
- * objects of type Uninitialized (as per ACPI spec.)
- *
- * Even though the ASL compilers do not allow num_elements to be smaller
- * than the Package list length (for the fixed length package opcode), some
- * BIOS code modifies the AML on the fly to adjust the num_elements, and
- * this code compensates for that. This also provides compatibility with
- * other AML interpreters.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
- union acpi_parse_object *op,
- u32 element_count,
- union acpi_operand_object **obj_desc_ptr)
-{
- union acpi_parse_object *arg;
- union acpi_parse_object *parent;
- union acpi_operand_object *obj_desc = NULL;
- acpi_status status = AE_OK;
- u32 i;
- u16 index;
- u16 reference_count;
-
- ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
-
- /* Find the parent of a possibly nested package */
-
- parent = op->common.parent;
- while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
- (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
- parent = parent->common.parent;
- }
-
- /*
- * If we are evaluating a Named package object "Name (xxxx, Package)",
- * the package object already exists, otherwise it must be created.
- */
- obj_desc = *obj_desc_ptr;
- if (!obj_desc) {
- obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
- *obj_desc_ptr = obj_desc;
- if (!obj_desc) {
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.node = parent->common.node;
- }
-
- /*
- * Allocate the element array (array of pointers to the individual
- * objects) based on the num_elements parameter. Add an extra pointer slot
- * so that the list is always null terminated.
- */
- obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
- element_count +
- 1) * sizeof(void *));
-
- if (!obj_desc->package.elements) {
- acpi_ut_delete_object_desc(obj_desc);
- return_ACPI_STATUS(AE_NO_MEMORY);
- }
-
- obj_desc->package.count = element_count;
-
- /*
- * Initialize the elements of the package, up to the num_elements count.
- * Package is automatically padded with uninitialized (NULL) elements
- * if num_elements is greater than the package list length. Likewise,
- * Package is truncated if num_elements is less than the list length.
- */
- arg = op->common.value.arg;
- arg = arg->common.next;
- for (i = 0; arg && (i < element_count); i++) {
- if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
- if (arg->common.node->type == ACPI_TYPE_METHOD) {
- /*
- * A method reference "looks" to the parser to be a method
- * invocation, so we special case it here
- */
- arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
- status =
- acpi_ds_build_internal_object(walk_state,
- arg,
- &obj_desc->
- package.
- elements[i]);
- } else {
- /* This package element is already built, just get it */
-
- obj_desc->package.elements[i] =
- ACPI_CAST_PTR(union acpi_operand_object,
- arg->common.node);
- }
- } else {
- status =
- acpi_ds_build_internal_object(walk_state, arg,
- &obj_desc->package.
- elements[i]);
- }
-
- if (*obj_desc_ptr) {
-
- /* Existing package, get existing reference count */
-
- reference_count =
- (*obj_desc_ptr)->common.reference_count;
- if (reference_count > 1) {
-
- /* Make new element ref count match original ref count */
-
- for (index = 0; index < (reference_count - 1);
- index++) {
- acpi_ut_add_reference((obj_desc->
- package.
- elements[i]));
- }
- }
- }
-
- arg = arg->common.next;
- }
-
- /* Check for match between num_elements and actual length of package_list */
-
- if (arg) {
- /*
- * num_elements was exhausted, but there are remaining elements in the
- * package_list. Truncate the package to num_elements.
- *
- * Note: technically, this is an error, from ACPI spec: "It is an error
- * for NumElements to be less than the number of elements in the
- * PackageList". However, we just print a message and
- * no exception is returned. This provides Windows compatibility. Some
- * BIOSs will alter the num_elements on the fly, creating this type
- * of ill-formed package object.
- */
- while (arg) {
- /*
- * We must delete any package elements that were created earlier
- * and are not going to be used because of the package truncation.
- */
- if (arg->common.node) {
- acpi_ut_remove_reference(ACPI_CAST_PTR
- (union
- acpi_operand_object,
- arg->common.node));
- arg->common.node = NULL;
- }
-
- /* Find out how many elements there really are */
-
- i++;
- arg = arg->common.next;
- }
-
- ACPI_INFO(("Actual Package length (%u) is larger than "
- "NumElements field (%u), truncated",
- i, element_count));
- } else if (i < element_count) {
- /*
- * Arg list (elements) was exhausted, but we did not reach num_elements count.
- * Note: this is not an error, the package is padded out with NULLs.
- */
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Package List length (%u) smaller than NumElements "
- "count (%u), padded with null elements\n",
- i, element_count));
- }
-
- obj_desc->package.flags |= AOPOBJ_DATA_VALID;
- op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
- return_ACPI_STATUS(status);
-}
-
-/*******************************************************************************
- *
* FUNCTION: acpi_ds_create_node
*
* PARAMETERS: walk_state - Current walk state
@@ -662,11 +387,20 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
case ACPI_TYPE_PACKAGE:
/*
- * Defer evaluation of Package term_arg operand
+ * Defer evaluation of Package term_arg operand and all
+ * package elements. (01/2017): We defer the element
+ * resolution to allow forward references from the package
+ * in order to provide compatibility with other ACPI
+ * implementations.
*/
obj_desc->package.node =
ACPI_CAST_PTR(struct acpi_namespace_node,
walk_state->operands[0]);
+
+ if (!op->named.data) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
obj_desc->package.aml_start = op->named.data;
obj_desc->package.aml_length = op->named.length;
break;
@@ -818,9 +552,11 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
/* Node was saved in Op */
obj_desc->reference.node = op->common.node;
- obj_desc->reference.object =
- op->common.node->object;
obj_desc->reference.class = ACPI_REFCLASS_NAME;
+ if (op->common.node) {
+ obj_desc->reference.object =
+ op->common.node->object;
+ }
break;
case AML_DEBUG_OP:
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index dfc3c25a083d..0336df7ac47d 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -599,6 +599,15 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
*/
walk_state->operand_index = walk_state->num_operands;
+ /* Ignore if child is not valid */
+
+ if (!op->common.value.arg) {
+ ACPI_ERROR((AE_INFO,
+ "Dispatch: Missing child while executing TermArg for %X",
+ op->common.aml_opcode));
+ return_ACPI_STATUS(AE_OK);
+ }
+
status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/dspkginit.c b/drivers/acpi/acpica/dspkginit.c
new file mode 100644
index 000000000000..6d487edfe2de
--- /dev/null
+++ b/drivers/acpi/acpica/dspkginit.c
@@ -0,0 +1,496 @@
+/******************************************************************************
+ *
+ * Module Name: dspkginit - Completion of deferred package initialization
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2017, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+#include "amlcode.h"
+#include "acdispat.h"
+#include "acinterp.h"
+
+#define _COMPONENT ACPI_NAMESPACE
+ACPI_MODULE_NAME("dspkginit")
+
+/* Local prototypes */
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_build_internal_package_obj
+ *
+ * PARAMETERS: walk_state - Current walk state
+ * op - Parser object to be translated
+ * element_count - Number of elements in the package - this is
+ * the num_elements argument to Package()
+ * obj_desc_ptr - Where the ACPI internal object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Translate a parser Op package object to the equivalent
+ * namespace object
+ *
+ * NOTE: The number of elements in the package will be always be the num_elements
+ * count, regardless of the number of elements in the package list. If
+ * num_elements is smaller, only that many package list elements are used.
+ * if num_elements is larger, the Package object is padded out with
+ * objects of type Uninitialized (as per ACPI spec.)
+ *
+ * Even though the ASL compilers do not allow num_elements to be smaller
+ * than the Package list length (for the fixed length package opcode), some
+ * BIOS code modifies the AML on the fly to adjust the num_elements, and
+ * this code compensates for that. This also provides compatibility with
+ * other AML interpreters.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
+ union acpi_parse_object *op,
+ u32 element_count,
+ union acpi_operand_object **obj_desc_ptr)
+{
+ union acpi_parse_object *arg;
+ union acpi_parse_object *parent;
+ union acpi_operand_object *obj_desc = NULL;
+ acpi_status status = AE_OK;
+ u16 reference_count;
+ u32 index;
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
+
+ /* Find the parent of a possibly nested package */
+
+ parent = op->common.parent;
+ while ((parent->common.aml_opcode == AML_PACKAGE_OP) ||
+ (parent->common.aml_opcode == AML_VARIABLE_PACKAGE_OP)) {
+ parent = parent->common.parent;
+ }
+
+ /*
+ * If we are evaluating a Named package object of the form:
+ * Name (xxxx, Package)
+ * the package object already exists, otherwise it must be created.
+ */
+ obj_desc = *obj_desc_ptr;
+ if (!obj_desc) {
+ obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_PACKAGE);
+ *obj_desc_ptr = obj_desc;
+ if (!obj_desc) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.node = parent->common.node;
+ }
+
+ if (obj_desc->package.flags & AOPOBJ_DATA_VALID) { /* Just in case */
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /*
+ * Allocate the element array (array of pointers to the individual
+ * objects) based on the num_elements parameter. Add an extra pointer slot
+ * so that the list is always null terminated.
+ */
+ obj_desc->package.elements = ACPI_ALLOCATE_ZEROED(((acpi_size)
+ element_count +
+ 1) * sizeof(void *));
+
+ if (!obj_desc->package.elements) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ obj_desc->package.count = element_count;
+ arg = op->common.value.arg;
+ arg = arg->common.next;
+
+ if (arg) {
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ /*
+ * Initialize the elements of the package, up to the num_elements count.
+ * Package is automatically padded with uninitialized (NULL) elements
+ * if num_elements is greater than the package list length. Likewise,
+ * Package is truncated if num_elements is less than the list length.
+ */
+ for (i = 0; arg && (i < element_count); i++) {
+ if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
+ if (arg->common.node->type == ACPI_TYPE_METHOD) {
+ /*
+ * A method reference "looks" to the parser to be a method
+ * invocation, so we special case it here
+ */
+ arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
+ status =
+ acpi_ds_build_internal_object(walk_state,
+ arg,
+ &obj_desc->
+ package.
+ elements[i]);
+ } else {
+ /* This package element is already built, just get it */
+
+ obj_desc->package.elements[i] =
+ ACPI_CAST_PTR(union acpi_operand_object,
+ arg->common.node);
+ }
+ } else {
+ status =
+ acpi_ds_build_internal_object(walk_state, arg,
+ &obj_desc->package.
+ elements[i]);
+ if (status == AE_NOT_FOUND) {
+ ACPI_ERROR((AE_INFO, "%-48s",
+ "****DS namepath not found"));
+ }
+
+ /*
+ * Initialize this package element. This function handles the
+ * resolution of named references within the package.
+ */
+ acpi_ds_init_package_element(0,
+ obj_desc->package.
+ elements[i], NULL,
+ &obj_desc->package.
+ elements[i]);
+ }
+
+ if (*obj_desc_ptr) {
+
+ /* Existing package, get existing reference count */
+
+ reference_count =
+ (*obj_desc_ptr)->common.reference_count;
+ if (reference_count > 1) {
+
+ /* Make new element ref count match original ref count */
+ /* TBD: Probably need an acpi_ut_add_references function */
+
+ for (index = 0;
+ index < ((u32)reference_count - 1);
+ index++) {
+ acpi_ut_add_reference((obj_desc->
+ package.
+ elements[i]));
+ }
+ }
+ }
+
+ arg = arg->common.next;
+ }
+
+ /* Check for match between num_elements and actual length of package_list */
+
+ if (arg) {
+ /*
+ * num_elements was exhausted, but there are remaining elements in
+ * the package_list. Truncate the package to num_elements.
+ *
+ * Note: technically, this is an error, from ACPI spec: "It is an
+ * error for NumElements to be less than the number of elements in
+ * the PackageList". However, we just print a message and no
+ * exception is returned. This provides compatibility with other
+ * ACPI implementations. Some firmware implementations will alter
+ * the num_elements on the fly, possibly creating this type of
+ * ill-formed package object.
+ */
+ while (arg) {
+ /*
+ * We must delete any package elements that were created earlier
+ * and are not going to be used because of the package truncation.
+ */
+ if (arg->common.node) {
+ acpi_ut_remove_reference(ACPI_CAST_PTR
+ (union
+ acpi_operand_object,
+ arg->common.node));
+ arg->common.node = NULL;
+ }
+
+ /* Find out how many elements there really are */
+
+ i++;
+ arg = arg->common.next;
+ }
+
+ ACPI_INFO(("Actual Package length (%u) is larger than "
+ "NumElements field (%u), truncated",
+ i, element_count));
+ } else if (i < element_count) {
+ /*
+ * Arg list (elements) was exhausted, but we did not reach
+ * num_elements count.
+ *
+ * Note: this is not an error, the package is padded out
+ * with NULLs.
+ */
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Package List length (%u) smaller than NumElements "
+ "count (%u), padded with null elements\n",
+ i, element_count));
+ }
+
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node, obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_init_package_element
+ *
+ * PARAMETERS: acpi_pkg_callback
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Resolve a named reference element within a package object
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_init_package_element(u8 object_type,
+ union acpi_operand_object *source_object,
+ union acpi_generic_state *state, void *context)
+{
+ union acpi_operand_object **element_ptr;
+
+ if (!source_object) {
+ return (AE_OK);
+ }
+
+ /*
+ * The following code is a bit of a hack to workaround a (current)
+ * limitation of the acpi_pkg_callback interface. We need a pointer
+ * to the location within the element array because a new object
+ * may be created and stored there.
+ */
+ if (context) {
+
+ /* A direct call was made to this function */
+
+ element_ptr = (union acpi_operand_object **)context;
+ } else {
+ /* Call came from acpi_ut_walk_package_tree */
+
+ element_ptr = state->pkg.this_target_obj;
+ }
+
+ /* We are only interested in reference objects/elements */
+
+ if (source_object->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
+
+ /* Attempt to resolve the (named) reference to a namespace node */
+
+ acpi_ds_resolve_package_element(element_ptr);
+ } else if (source_object->common.type == ACPI_TYPE_PACKAGE) {
+ source_object->package.flags |= AOPOBJ_DATA_VALID;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ds_resolve_package_element
+ *
+ * PARAMETERS: element_ptr - Pointer to a reference object
+ *
+ * RETURN: Possible new element is stored to the indirect element_ptr
+ *
+ * DESCRIPTION: Resolve a package element that is a reference to a named
+ * object.
+ *
+ ******************************************************************************/
+
+static void
+acpi_ds_resolve_package_element(union acpi_operand_object **element_ptr)
+{
+ acpi_status status;
+ union acpi_generic_state scope_info;
+ union acpi_operand_object *element = *element_ptr;
+ struct acpi_namespace_node *resolved_node;
+ char *external_path = NULL;
+ acpi_object_type type;
+
+ ACPI_FUNCTION_TRACE(ds_resolve_package_element);
+
+ /* Check if reference element is already resolved */
+
+ if (element->reference.resolved) {
+ return_VOID;
+ }
+
+ /* Element must be a reference object of correct type */
+
+ scope_info.scope.node = element->reference.node; /* Prefix node */
+
+ status = acpi_ns_lookup(&scope_info, (char *)element->reference.aml, /* Pointer to AML path */
+ ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+ ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+ NULL, &resolved_node);
+ if (ACPI_FAILURE(status)) {
+ status = acpi_ns_externalize_name(ACPI_UINT32_MAX,
+ (char *)element->reference.
+ aml, NULL, &external_path);
+
+ ACPI_EXCEPTION((AE_INFO, status,
+ "Could not find/resolve named package element: %s",
+ external_path));
+
+ ACPI_FREE(external_path);
+ *element_ptr = NULL;
+ return_VOID;
+ } else if (resolved_node->type == ACPI_TYPE_ANY) {
+
+ /* Named reference not resolved, return a NULL package element */
+
+ ACPI_ERROR((AE_INFO,
+ "Could not resolve named package element [%4.4s] in [%4.4s]",
+ resolved_node->name.ascii,
+ scope_info.scope.node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#if 0
+ else if (resolved_node->flags & ANOBJ_TEMPORARY) {
+ /*
+ * A temporary node found here indicates that the reference is
+ * to a node that was created within this method. We are not
+ * going to allow it (especially if the package is returned
+ * from the method) -- the temporary node will be deleted out
+ * from under the method. (05/2017).
+ */
+ ACPI_ERROR((AE_INFO,
+ "Package element refers to a temporary name [%4.4s], "
+ "inserting a NULL element",
+ resolved_node->name.ascii));
+ *element_ptr = NULL;
+ return_VOID;
+ }
+#endif
+
+ /*
+ * Special handling for Alias objects. We need resolved_node to point
+ * to the Alias target. This effectively "resolves" the alias.
+ */
+ if (resolved_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+ resolved_node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ resolved_node->object);
+ }
+
+ /* Update the reference object */
+
+ element->reference.resolved = TRUE;
+ element->reference.node = resolved_node;
+ type = element->reference.node->type;
+
+ /*
+ * Attempt to resolve the node to a value before we insert it into
+ * the package. If this is a reference to a common data type,
+ * resolve it immediately. According to the ACPI spec, package
+ * elements can only be "data objects" or method references.
+ * Attempt to resolve to an Integer, Buffer, String or Package.
+ * If cannot, return the named reference (for things like Devices,
+ * Methods, etc.) Buffer Fields and Fields will resolve to simple
+ * objects (int/buf/str/pkg).
+ *
+ * NOTE: References to things like Devices, Methods, Mutexes, etc.
+ * will remain as named references. This behavior is not described
+ * in the ACPI spec, but it appears to be an oversight.
+ */
+ status = acpi_ex_resolve_node_to_value(&resolved_node, NULL);
+ if (ACPI_FAILURE(status)) {
+ return_VOID;
+ }
+#if 0
+/* TBD - alias support */
+ /*
+ * Special handling for Alias objects. We need to setup the type
+ * and the Op->Common.Node to point to the Alias target. Note,
+ * Alias has at most one level of indirection internally.
+ */
+ type = op->common.node->type;
+ if (type == ACPI_TYPE_LOCAL_ALIAS) {
+ type = obj_desc->common.type;
+ op->common.node = ACPI_CAST_PTR(struct acpi_namespace_node,
+ op->common.node->object);
+ }
+#endif
+
+ switch (type) {
+ /*
+ * These object types are a result of named references, so we will
+ * leave them as reference objects. In other words, these types
+ * have no intrinsic "value".
+ */
+ case ACPI_TYPE_DEVICE:
+ case ACPI_TYPE_THERMAL:
+
+ /* TBD: This may not be necesssary */
+
+ acpi_ut_add_reference(resolved_node->object);
+ break;
+
+ case ACPI_TYPE_MUTEX:
+ case ACPI_TYPE_METHOD:
+ case ACPI_TYPE_POWER:
+ case ACPI_TYPE_PROCESSOR:
+ case ACPI_TYPE_EVENT:
+ case ACPI_TYPE_REGION:
+
+ break;
+
+ default:
+ /*
+ * For all other types - the node was resolved to an actual
+ * operand object with a value, return the object
+ */
+ *element_ptr = (union acpi_operand_object *)resolved_node;
+ break;
+ }
+
+ return_VOID;
+}
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c941947a063..3a3cb8624f41 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *ignored)
{
acpi_status status;
+ acpi_event_status event_status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
+ gpe_number = gpe_block->block_base_number + gpe_index;
/*
* Ignore GPEs that have no corresponding _Lxx/_Exx method
- * and GPEs that are used to wake the system
+ * and GPEs that are used for wakeup
*/
- if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_NONE)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_HANDLER)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_RAW_HANDLER)
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_METHOD)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
+ event_status = 0;
+ (void)acpi_hw_get_gpe_status(gpe_event_info,
+ &event_status);
+
status = acpi_ev_add_gpe_reference(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index +
- gpe_block->block_base_number));
+ gpe_number));
continue;
}
+ gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
+
+ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
+ ACPI_INFO(("GPE 0x%02X active on init",
+ gpe_number));
+ (void)acpi_ev_gpe_dispatch(gpe_block->node,
+ gpe_event_info,
+ gpe_number);
+ }
+
gpe_enabled_count++;
}
}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 57718a3e029a..67c7c4ce276c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
*/
gpe_event_info->flags =
(ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
+ } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
+ /*
+ * A reference to this GPE has been added during the GPE block
+ * initialization, so drop it now to prevent the GPE from being
+ * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
+ */
+ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
+ gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
}
/*
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index d43d7da4c734..b8adb11f1b07 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -87,68 +87,40 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
target_node->object);
}
- /*
- * For objects that can never change (i.e., the NS node will
- * permanently point to the same object), we can simply attach
- * the object to the new NS node. For other objects (such as
- * Integers, buffers, etc.), we have to point the Alias node
- * to the original Node.
- */
- switch (target_node->type) {
+ /* Ensure that the target node is valid */
- /* For these types, the sub-object can change dynamically via a Store */
+ if (!target_node) {
+ return_ACPI_STATUS(AE_NULL_OBJECT);
+ }
- case ACPI_TYPE_INTEGER:
- case ACPI_TYPE_STRING:
- case ACPI_TYPE_BUFFER:
- case ACPI_TYPE_PACKAGE:
- case ACPI_TYPE_BUFFER_FIELD:
- /*
- * These types open a new scope, so we need the NS node in order to access
- * any children.
- */
- case ACPI_TYPE_DEVICE:
- case ACPI_TYPE_POWER:
- case ACPI_TYPE_PROCESSOR:
- case ACPI_TYPE_THERMAL:
- case ACPI_TYPE_LOCAL_SCOPE:
- /*
- * The new alias has the type ALIAS and points to the original
- * NS node, not the object itself.
- */
- alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
- break;
+ /* Construct the alias object (a namespace node) */
+ switch (target_node->type) {
case ACPI_TYPE_METHOD:
/*
- * Control method aliases need to be differentiated
+ * Control method aliases need to be differentiated with
+ * a special type
*/
alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
- alias_node->object =
- ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
default:
-
- /* Attach the original source object to the new Alias Node */
-
/*
- * The new alias assumes the type of the target, and it points
- * to the same object. The reference count of the object has an
- * additional reference to prevent deletion out from under either the
- * target node or the alias Node
+ * All other object types.
+ *
+ * The new alias has the type ALIAS and points to the original
+ * NS node, not the object itself.
*/
- status = acpi_ns_attach_object(alias_node,
- acpi_ns_get_attached_object
- (target_node),
- target_node->type);
+ alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
break;
}
/* Since both operands are Nodes, we don't need to delete them */
+ alias_node->object =
+ ACPI_CAST_PTR(union acpi_operand_object, target_node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 44092f744477..83398dc4b7c2 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -102,7 +102,7 @@ static struct acpi_exdump_info acpi_ex_dump_package[6] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_package), NULL},
{ACPI_EXD_NODE, ACPI_EXD_OFFSET(package.node), "Parent Node"},
{ACPI_EXD_UINT8, ACPI_EXD_OFFSET(package.flags), "Flags"},
- {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Elements"},
+ {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(package.count), "Element Count"},
{ACPI_EXD_POINTER, ACPI_EXD_OFFSET(package.elements), "Element List"},
{ACPI_EXD_PACKAGE, 0, NULL}
};
@@ -384,6 +384,10 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
count = info->offset;
while (count) {
+ if (!obj_desc) {
+ return;
+ }
+
target = ACPI_ADD_PTR(u8, obj_desc, info->offset);
name = info->name;
@@ -469,9 +473,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
start = *ACPI_CAST_PTR(void *, target);
next = start;
- acpi_os_printf("%20s : %p", name, next);
+ acpi_os_printf("%20s : %p ", name, next);
if (next) {
- acpi_os_printf("(%s %2.2X)",
+ acpi_os_printf("%s (Type %2.2X)",
acpi_ut_get_object_type_name
(next), next->common.type);
@@ -493,6 +497,8 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
break;
}
}
+ } else {
+ acpi_os_printf("- No attached objects");
}
acpi_os_printf("\n");
@@ -1129,7 +1135,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
default:
- acpi_os_printf("[Unknown Type] %X\n", obj_desc->common.type);
+ acpi_os_printf("[%s] Type: %2.2X\n",
+ acpi_ut_get_type_name(obj_desc->common.type),
+ obj_desc->common.type);
break;
}
}
@@ -1167,11 +1175,17 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
acpi_ex_dump_namespace_node((struct acpi_namespace_node *)
obj_desc, flags);
- acpi_os_printf("\nAttached Object (%p):\n",
- ((struct acpi_namespace_node *)obj_desc)->
- object);
-
obj_desc = ((struct acpi_namespace_node *)obj_desc)->object;
+ if (!obj_desc) {
+ return_VOID;
+ }
+
+ acpi_os_printf("\nAttached Object %p", obj_desc);
+ if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) == ACPI_DESC_TYPE_NAMED) {
+ acpi_os_printf(" - Namespace Node");
+ }
+
+ acpi_os_printf(":\n");
goto dump_object;
}
@@ -1191,6 +1205,10 @@ acpi_ex_dump_object_descriptor(union acpi_operand_object *obj_desc, u32 flags)
dump_object:
+ if (!obj_desc) {
+ return_VOID;
+ }
+
/* Common Fields */
acpi_ex_dump_object(obj_desc, acpi_ex_dump_common);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index f222a80ca38e..1e7649ce0a7b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -265,6 +265,8 @@ acpi_ex_do_logical_numeric_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid numeric logical opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -345,6 +347,9 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type for logical operator: %X",
+ operand0->common.type));
status = AE_AML_INTERNAL;
break;
}
@@ -388,6 +393,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
@@ -456,6 +463,8 @@ acpi_ex_do_logical_op(u16 opcode,
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid comparison opcode: %X", opcode));
status = AE_AML_INTERNAL;
break;
}
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index eecb3bff7fd7..57980b7d3594 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -414,6 +414,9 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
default:
+ ACPI_ERROR((AE_INFO,
+ "Invalid object type: %X",
+ (operand[0])->common.type));
status = AE_AML_INTERNAL;
goto cleanup;
}
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index de74a4c25085..acb417b58bbb 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -107,7 +107,7 @@ acpi_hw_get_access_bit_width(u64 address,
ACPI_IS_ALIGNED(reg->bit_width, 8)) {
access_bit_width = reg->bit_width;
} else if (reg->access_width) {
- access_bit_width = (1 << (reg->access_width + 2));
+ access_bit_width = ACPI_ACCESS_BIT_WIDTH(reg->access_width);
} else {
access_bit_width =
ACPI_ROUND_UP_POWER_OF_TWO_8(reg->bit_offset +
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 7ef13934968f..e5c095ca6083 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -72,13 +72,16 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id);
static struct acpi_sleep_functions acpi_sleep_dispatch[] = {
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_sleep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_sleep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_sleep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake_prep)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake_prep) },
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake_prep)},
{ACPI_STRUCT_INIT(legacy_function,
ACPI_HW_OPTIONAL_FUNCTION(acpi_hw_legacy_wake)),
- ACPI_STRUCT_INIT(extended_function, acpi_hw_extended_wake) }
+ ACPI_STRUCT_INIT(extended_function,
+ acpi_hw_extended_wake)}
};
/*
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index e5f4fa496572..f2733f51ca8d 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -292,6 +292,7 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
{
acpi_status status;
char *path = pathname;
+ char *external_path;
struct acpi_namespace_node *prefix_node;
struct acpi_namespace_node *current_node = NULL;
struct acpi_namespace_node *this_node = NULL;
@@ -427,13 +428,22 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
num_carats++;
this_node = this_node->parent;
if (!this_node) {
+ /*
+ * Current scope has no parent scope. Externalize
+ * the internal path for error message.
+ */
+ status =
+ acpi_ns_externalize_name
+ (ACPI_UINT32_MAX, pathname, NULL,
+ &external_path);
+ if (ACPI_SUCCESS(status)) {
+ ACPI_ERROR((AE_INFO,
+ "%s: Path has too many parent prefixes (^)",
+ external_path));
+
+ ACPI_FREE(external_path);
+ }
- /* Current scope has no parent scope */
-
- ACPI_ERROR((AE_INFO,
- "%s: Path has too many parent prefixes (^) "
- "- reached beyond root node",
- pathname));
return_ACPI_STATUS(AE_NOT_FOUND);
}
}
@@ -634,6 +644,12 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
this_node->object;
}
}
+#ifdef ACPI_ASL_COMPILER
+ if (!acpi_gbl_disasm_flag &&
+ (this_node->flags & ANOBJ_IS_EXTERNAL)) {
+ this_node->flags |= IMPLICIT_EXTERNAL;
+ }
+#endif
}
/* Special handling for the last segment (num_segments == 0) */
diff --git a/drivers/acpi/acpica/nsarguments.c b/drivers/acpi/acpica/nsarguments.c
index 9095d51f6b37..67b7370dcae5 100644
--- a/drivers/acpi/acpica/nsarguments.c
+++ b/drivers/acpi/acpica/nsarguments.c
@@ -69,9 +69,14 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
u8 user_arg_type;
u32 i;
- /* If not a predefined name, cannot typecheck args */
-
- if (!info->predefined) {
+ /*
+ * If not a predefined name, cannot typecheck args, because
+ * we have no idea what argument types are expected.
+ * Also, ignore typecheck if warnings/errors if this method
+ * has already been evaluated at least once -- in order
+ * to suppress repetitive messages.
+ */
+ if (!info->predefined || (info->node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -93,6 +98,10 @@ void acpi_ns_check_argument_types(struct acpi_evaluate_info *info)
acpi_ut_get_type_name
(user_arg_type),
acpi_ut_get_type_name(arg_type)));
+
+ /* Prevent any additional typechecking for this method */
+
+ info->node->flags |= ANOBJ_EVALUATED;
}
}
}
@@ -121,7 +130,7 @@ acpi_ns_check_acpi_compliance(char *pathname,
u32 aml_param_count;
u32 required_param_count;
- if (!predefined) {
+ if (!predefined || (node->flags & ANOBJ_EVALUATED)) {
return;
}
@@ -215,6 +224,10 @@ acpi_ns_check_argument_count(char *pathname,
u32 aml_param_count;
u32 required_param_count;
+ if (node->flags & ANOBJ_EVALUATED) {
+ return;
+ }
+
if (!predefined) {
/*
* Not a predefined name. Check the incoming user argument count
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index ce33e7297ea7..9c6297949712 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -396,6 +396,20 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
info->package_init++;
status = acpi_ds_get_package_arguments(obj_desc);
+ if (ACPI_FAILURE(status)) {
+ break;
+ }
+
+ /*
+ * Resolve all named references in package objects (and all
+ * sub-packages). This action has been deferred until the entire
+ * namespace has been loaded, in order to support external and
+ * forward references from individual package elements (05/2017).
+ */
+ status = acpi_ut_walk_package_tree(obj_desc, NULL,
+ acpi_ds_init_package_element,
+ NULL);
+ obj_desc->package.flags |= AOPOBJ_DATA_VALID;
break;
default:
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index aa16aeaa8937..a410760a0308 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -89,7 +89,14 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
{
acpi_size size;
- ACPI_FUNCTION_ENTRY();
+ /* Validate the Node */
+
+ if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/cached reference target node: %p, descriptor type %d",
+ node, ACPI_GET_DESCRIPTOR_TYPE(node)));
+ return (0);
+ }
size = acpi_ns_build_normalized_path(node, NULL, 0, FALSE);
return (size);
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c
index 4954cb6c9090..a8ea8fb1d299 100644
--- a/drivers/acpi/acpica/nsprepkg.c
+++ b/drivers/acpi/acpica/nsprepkg.c
@@ -614,6 +614,8 @@ acpi_ns_check_package_list(struct acpi_evaluate_info *info,
default: /* Should not get here, type was validated by caller */
+ ACPI_ERROR((AE_INFO, "Invalid Package type: %X",
+ package->ret_info.type));
return (AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index 538c61677c10..783f4c838aee 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -100,9 +100,13 @@ acpi_evaluate_object_typed(acpi_handle handle,
free_buffer_on_error = TRUE;
}
- status = acpi_get_handle(handle, pathname, &target_handle);
- if (ACPI_FAILURE(status)) {
- return_ACPI_STATUS(status);
+ if (pathname) {
+ status = acpi_get_handle(handle, pathname, &target_handle);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ } else {
+ target_handle = handle;
}
full_pathname = acpi_ns_get_external_pathname(target_handle);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index b4224005783c..bb04dec168ad 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -164,6 +164,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
INCREMENT_ARG_LIST(walk_state->arg_types);
}
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Final argument count: %u pass %u\n",
+ walk_state->arg_count,
+ walk_state->pass_number));
+
/*
* Handle executable code at "module-level". This refers to
* executable opcodes that appear outside of any control method.
@@ -277,6 +282,11 @@ acpi_ps_get_arguments(struct acpi_walk_state *walk_state,
AML_NAME_OP)
&& (walk_state->pass_number <=
ACPI_IMODE_LOAD_PASS2)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Setup Package/Buffer: Pass %u, AML Ptr: %p\n",
+ walk_state->pass_number,
+ aml_op_start));
+
/*
* Skip parsing of Buffers and Packages because we don't have
* enough info in the first pass to parse them correctly.
@@ -570,6 +580,10 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
/* Check for arguments that need to be processed */
+ ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
+ "Parseloop: argument count: %u\n",
+ walk_state->arg_count));
+
if (walk_state->arg_count) {
/*
* There are arguments (complex ones), push Op and
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index ef6384e374fc..0bef6df71bba 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -359,6 +359,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
acpi_ps_build_named_op(walk_state, aml_op_start, op,
&named_op);
acpi_ps_free_op(op);
+
+#ifdef ACPI_ASL_COMPILER
+ if (acpi_gbl_disasm_flag
+ && walk_state->opcode == AML_EXTERNAL_OP
+ && status == AE_NOT_FOUND) {
+ /*
+ * If parsing of AML_EXTERNAL_OP's name path fails, then skip
+ * past this opcode and keep parsing. This is a much better
+ * alternative than to abort the entire disassembler. At this
+ * point, the parser_state is at the end of the namepath of the
+ * external declaration opcode. Setting walk_state->Aml to
+ * walk_state->parser_state.Aml + 2 moves increments the
+ * walk_state->Aml past the object type and the paramcount of the
+ * external opcode. For the error message, only print the AML
+ * offset. We could attempt to print the name but this may cause
+ * a segmentation fault when printing the namepath because the
+ * AML may be incorrect.
+ */
+ acpi_os_printf
+ ("// Invalid external declaration at AML offset 0x%x.\n",
+ walk_state->aml -
+ walk_state->parser_state.aml_start);
+ walk_state->aml = walk_state->parser_state.aml + 2;
+ return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
+ }
+#endif
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index 59a4f9ed06a7..be65e65e216e 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -615,7 +615,7 @@ ACPI_EXPORT_SYMBOL(acpi_walk_resource_buffer)
* device we are querying
* name - Method name of the resources we want.
* (METHOD_NAME__CRS, METHOD_NAME__PRS, or
- * METHOD_NAME__AEI)
+ * METHOD_NAME__AEI or METHOD_NAME__DMA)
* user_function - Called for each resource
* context - Passed to user_function
*
@@ -641,11 +641,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
!ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__DMA))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS/_PRS/_AEI resource list */
+ /* Get the _CRS/_PRS/_AEI/_DMA resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index c9d6fa6d7cc6..b19a2f0ea331 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -50,6 +50,57 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbdata")
+/* Local prototypes */
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index);
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_compare_tables
+ *
+ * PARAMETERS: table_desc - Table 1 descriptor to be compared
+ * table_index - Index of table 2 to be compared
+ *
+ * RETURN: TRUE if both tables are identical.
+ *
+ * DESCRIPTION: This function compares a table with another table that has
+ * already been installed in the root table list.
+ *
+ ******************************************************************************/
+
+static u8
+acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
+{
+ acpi_status status = AE_OK;
+ u8 is_identical;
+ struct acpi_table_header *table;
+ u32 table_length;
+ u8 table_flags;
+
+ status =
+ acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
+ &table, &table_length, &table_flags);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ is_identical = (u8)((table_desc->length != table_length ||
+ memcmp(table_desc->pointer, table, table_length)) ?
+ FALSE : TRUE);
+
+ /* Release the acquired table */
+
+ acpi_tb_release_table(table, table_length, table_flags);
+ return (is_identical);
+}
+
/*******************************************************************************
*
* FUNCTION: acpi_tb_init_table_descriptor
@@ -64,6 +115,7 @@ ACPI_MODULE_NAME("tbdata")
* DESCRIPTION: Initialize a new table descriptor
*
******************************************************************************/
+
void
acpi_tb_init_table_descriptor(struct acpi_table_desc *table_desc,
acpi_physical_address address,
@@ -338,7 +390,7 @@ void acpi_tb_invalidate_table(struct acpi_table_desc *table_desc)
acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
{
- if (!table_desc->pointer && !acpi_gbl_verify_table_checksum) {
+ if (!table_desc->pointer && !acpi_gbl_enable_table_validation) {
/*
* Only validates the header of the table.
* Note that Length contains the size of the mapping after invoking
@@ -354,22 +406,100 @@ acpi_status acpi_tb_validate_temp_table(struct acpi_table_desc *table_desc)
return (acpi_tb_validate_table(table_desc));
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_check_duplication
+ *
+ * PARAMETERS: table_desc - Table descriptor
+ * table_index - Where the table index is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Avoid installing duplicated tables. However table override and
+ * user aided dynamic table load is allowed, thus comparing the
+ * address of the table is not sufficient, and checking the entire
+ * table content is required.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_tb_check_duplication(struct acpi_table_desc *table_desc, u32 *table_index)
+{
+ u32 i;
+
+ ACPI_FUNCTION_TRACE(tb_check_duplication);
+
+ /* Check if table is already registered */
+
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
+
+ /* Do not compare with unverified tables */
+
+ if (!
+ (acpi_gbl_root_table_list.tables[i].
+ flags & ACPI_TABLE_IS_VERIFIED)) {
+ continue;
+ }
+
+ /*
+ * Check for a table match on the entire table length,
+ * not just the header.
+ */
+ if (!acpi_tb_compare_tables(table_desc, i)) {
+ continue;
+ }
+
+ /*
+ * Note: the current mechanism does not unregister a table if it is
+ * dynamically unloaded. The related namespace entries are deleted,
+ * but the table remains in the root table list.
+ *
+ * The assumption here is that the number of different tables that
+ * will be loaded is actually small, and there is minimal overhead
+ * in just keeping the table in case it is needed again.
+ *
+ * If this assumption changes in the future (perhaps on large
+ * machines with many table load/unload operations), tables will
+ * need to be unregistered when they are unloaded, and slots in the
+ * root table list should be reused when empty.
+ */
+ if (acpi_gbl_root_table_list.tables[i].flags &
+ ACPI_TABLE_IS_LOADED) {
+
+ /* Table is still loaded, this is an error */
+
+ return_ACPI_STATUS(AE_ALREADY_EXISTS);
+ } else {
+ *table_index = i;
+ return_ACPI_STATUS(AE_CTRL_TERMINATE);
+ }
+ }
+
+ /* Indicate no duplication to the caller */
+
+ return_ACPI_STATUS(AE_OK);
+}
+
/******************************************************************************
*
* FUNCTION: acpi_tb_verify_temp_table
*
* PARAMETERS: table_desc - Table descriptor
* signature - Table signature to verify
+ * table_index - Where the table index is returned
*
* RETURN: Status
*
* DESCRIPTION: This function is called to validate and verify the table, the
* returned table descriptor is in "VALIDATED" state.
+ * Note that 'TableIndex' is required to be set to !NULL to
+ * enable duplication check.
*
*****************************************************************************/
acpi_status
-acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
+acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc,
+ char *signature, u32 *table_index)
{
acpi_status status = AE_OK;
@@ -392,9 +522,10 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
- /* Verify the checksum */
+ if (acpi_gbl_enable_table_validation) {
+
+ /* Verify the checksum */
- if (acpi_gbl_verify_table_checksum) {
status =
acpi_tb_verify_checksum(table_desc->pointer,
table_desc->length);
@@ -411,9 +542,34 @@ acpi_tb_verify_temp_table(struct acpi_table_desc *table_desc, char *signature)
goto invalidate_and_exit;
}
+
+ /* Avoid duplications */
+
+ if (table_index) {
+ status =
+ acpi_tb_check_duplication(table_desc, table_index);
+ if (ACPI_FAILURE(status)) {
+ if (status != AE_CTRL_TERMINATE) {
+ ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY,
+ "%4.4s 0x%8.8X%8.8X"
+ " Table is duplicated",
+ acpi_ut_valid_nameseg
+ (table_desc->signature.
+ ascii) ? table_desc->
+ signature.
+ ascii : "????",
+ ACPI_FORMAT_UINT64
+ (table_desc->address)));
+ }
+
+ goto invalidate_and_exit;
+ }
+ }
+
+ table_desc->flags |= ACPI_TABLE_IS_VERIFIED;
}
- return_ACPI_STATUS(AE_OK);
+ return_ACPI_STATUS(status);
invalidate_and_exit:
acpi_tb_invalidate_table(table_desc);
@@ -436,6 +592,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
{
struct acpi_table_desc *tables;
u32 table_count;
+ u32 current_table_count, max_table_count;
+ u32 i;
ACPI_FUNCTION_TRACE(tb_resize_root_table_list);
@@ -455,8 +613,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
table_count = acpi_gbl_root_table_list.current_table_count;
}
- tables = ACPI_ALLOCATE_ZEROED(((acpi_size)table_count +
- ACPI_ROOT_TABLE_SIZE_INCREMENT) *
+ max_table_count = table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ tables = ACPI_ALLOCATE_ZEROED(((acpi_size)max_table_count) *
sizeof(struct acpi_table_desc));
if (!tables) {
ACPI_ERROR((AE_INFO,
@@ -466,9 +624,16 @@ acpi_status acpi_tb_resize_root_table_list(void)
/* Copy and free the previous table array */
+ current_table_count = 0;
if (acpi_gbl_root_table_list.tables) {
- memcpy(tables, acpi_gbl_root_table_list.tables,
- (acpi_size)table_count * sizeof(struct acpi_table_desc));
+ for (i = 0; i < table_count; i++) {
+ if (acpi_gbl_root_table_list.tables[i].address) {
+ memcpy(tables + current_table_count,
+ acpi_gbl_root_table_list.tables + i,
+ sizeof(struct acpi_table_desc));
+ current_table_count++;
+ }
+ }
if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -476,8 +641,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
}
acpi_gbl_root_table_list.tables = tables;
- acpi_gbl_root_table_list.max_table_count =
- table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT;
+ acpi_gbl_root_table_list.max_table_count = max_table_count;
+ acpi_gbl_root_table_list.current_table_count = current_table_count;
acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
return_ACPI_STATUS(AE_OK);
@@ -818,13 +983,9 @@ acpi_tb_load_table(u32 table_index, struct acpi_namespace_node *parent_node)
acpi_ev_update_gpes(owner_id);
}
- /* Invoke table handler if present */
-
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
- acpi_gbl_table_handler_context);
- }
+ /* Invoke table handler */
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_LOAD, table);
return_ACPI_STATUS(status);
}
@@ -894,15 +1055,11 @@ acpi_status acpi_tb_unload_table(u32 table_index)
return_ACPI_STATUS(AE_NOT_EXIST);
}
- /* Invoke table handler if present */
+ /* Invoke table handler */
- if (acpi_gbl_table_handler) {
- status = acpi_get_table_by_index(table_index, &table);
- if (ACPI_SUCCESS(status)) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
- table,
- acpi_gbl_table_handler_context);
- }
+ status = acpi_get_table_by_index(table_index, &table);
+ if (ACPI_SUCCESS(status)) {
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_UNLOAD, table);
}
/* Delete the portion of the namespace owned by this table */
@@ -918,3 +1075,26 @@ acpi_status acpi_tb_unload_table(u32 table_index)
}
ACPI_EXPORT_SYMBOL(acpi_tb_unload_table)
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_tb_notify_table
+ *
+ * PARAMETERS: event - Table event
+ * table - Validated table pointer
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Notify a table event to the users.
+ *
+ ******************************************************************************/
+
+void acpi_tb_notify_table(u32 event, void *table)
+{
+ /* Invoke table handler if present */
+
+ if (acpi_gbl_table_handler) {
+ (void)acpi_gbl_table_handler(event, table,
+ acpi_gbl_table_handler_context);
+ }
+}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 4620f3c68c13..0dfc0ac3c141 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -48,54 +48,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbinstal")
-/* Local prototypes */
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index);
-
-/*******************************************************************************
- *
- * FUNCTION: acpi_tb_compare_tables
- *
- * PARAMETERS: table_desc - Table 1 descriptor to be compared
- * table_index - Index of table 2 to be compared
- *
- * RETURN: TRUE if both tables are identical.
- *
- * DESCRIPTION: This function compares a table with another table that has
- * already been installed in the root table list.
- *
- ******************************************************************************/
-
-static u8
-acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
-{
- acpi_status status = AE_OK;
- u8 is_identical;
- struct acpi_table_header *table;
- u32 table_length;
- u8 table_flags;
-
- status =
- acpi_tb_acquire_table(&acpi_gbl_root_table_list.tables[table_index],
- &table, &table_length, &table_flags);
- if (ACPI_FAILURE(status)) {
- return (FALSE);
- }
-
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- is_identical = (u8)((table_desc->length != table_length ||
- memcmp(table_desc->pointer, table, table_length)) ?
- FALSE : TRUE);
-
- /* Release the acquired table */
-
- acpi_tb_release_table(table, table_length, table_flags);
- return (is_identical);
-}
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_install_table_with_override
@@ -112,7 +64,6 @@ acpi_tb_compare_tables(struct acpi_table_desc *table_desc, u32 table_index)
* table array.
*
******************************************************************************/
-
void
acpi_tb_install_table_with_override(struct acpi_table_desc *new_table_desc,
u8 override, u32 *table_index)
@@ -210,95 +161,29 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
- /* Validate and verify a table before installation */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
- if (ACPI_FAILURE(status)) {
- goto release_and_exit;
- }
-
/* Acquire the table lock */
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
- if (reload) {
- /*
- * Validate the incoming table signature.
- *
- * 1) Originally, we checked the table signature for "SSDT" or "PSDT".
- * 2) We added support for OEMx tables, signature "OEM".
- * 3) Valid tables were encountered with a null signature, so we just
- * gave up on validating the signature, (05/2008).
- * 4) We encountered non-AML tables such as the MADT, which caused
- * interpreter errors and kernel faults. So now, we once again allow
- * only "SSDT", "OEMx", and now, also a null signature. (05/2011).
- */
- if ((new_table_desc.signature.ascii[0] != 0x00) &&
- (!ACPI_COMPARE_NAME
- (&new_table_desc.signature, ACPI_SIG_SSDT))
- && (strncmp(new_table_desc.signature.ascii, "OEM", 3))) {
- ACPI_BIOS_ERROR((AE_INFO,
- "Table has invalid signature [%4.4s] (0x%8.8X), "
- "must be SSDT or OEMx",
- acpi_ut_valid_nameseg(new_table_desc.
- signature.
- ascii) ?
- new_table_desc.signature.
- ascii : "????",
- new_table_desc.signature.integer));
-
- status = AE_BAD_SIGNATURE;
- goto unlock_and_exit;
- }
-
- /* Check if table is already registered */
-
- for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
- ++i) {
- /*
- * Check for a table match on the entire table length,
- * not just the header.
- */
- if (!acpi_tb_compare_tables(&new_table_desc, i)) {
- continue;
- }
+ /* Validate and verify a table before installation */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, &i);
+ if (ACPI_FAILURE(status)) {
+ if (status == AE_CTRL_TERMINATE) {
/*
- * Note: the current mechanism does not unregister a table if it is
- * dynamically unloaded. The related namespace entries are deleted,
- * but the table remains in the root table list.
- *
- * The assumption here is that the number of different tables that
- * will be loaded is actually small, and there is minimal overhead
- * in just keeping the table in case it is needed again.
- *
- * If this assumption changes in the future (perhaps on large
- * machines with many table load/unload operations), tables will
- * need to be unregistered when they are unloaded, and slots in the
- * root table list should be reused when empty.
+ * Table was unloaded, allow it to be reloaded.
+ * As we are going to return AE_OK to the caller, we should
+ * take the responsibility of freeing the input descriptor.
+ * Refill the input descriptor to ensure
+ * acpi_tb_install_table_with_override() can be called again to
+ * indicate the re-installation.
*/
- if (acpi_gbl_root_table_list.tables[i].flags &
- ACPI_TABLE_IS_LOADED) {
-
- /* Table is still loaded, this is an error */
-
- status = AE_ALREADY_EXISTS;
- goto unlock_and_exit;
- } else {
- /*
- * Table was unloaded, allow it to be reloaded.
- * As we are going to return AE_OK to the caller, we should
- * take the responsibility of freeing the input descriptor.
- * Refill the input descriptor to ensure
- * acpi_tb_install_table_with_override() can be called again to
- * indicate the re-installation.
- */
- acpi_tb_uninstall_table(&new_table_desc);
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- *table_index = i;
- return_ACPI_STATUS(AE_OK);
- }
+ acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
+ *table_index = i;
+ return_ACPI_STATUS(AE_OK);
}
+ goto unlock_and_exit;
}
/* Add the table to the global root table list */
@@ -306,14 +191,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
acpi_tb_install_table_with_override(&new_table_desc, override,
table_index);
- /* Invoke table handler if present */
+ /* Invoke table handler */
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
- if (acpi_gbl_table_handler) {
- (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
- new_table_desc.pointer,
- acpi_gbl_table_handler_context);
- }
+ acpi_tb_notify_table(ACPI_TABLE_EVENT_INSTALL, new_table_desc.pointer);
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
unlock_and_exit:
@@ -382,9 +263,11 @@ void acpi_tb_override_table(struct acpi_table_desc *old_table_desc)
finish_override:
- /* Validate and verify a table before overriding */
-
- status = acpi_tb_verify_temp_table(&new_table_desc, NULL);
+ /*
+ * Validate and verify a table before overriding, no nested table
+ * duplication check as it's too complicated and unnecessary.
+ */
+ status = acpi_tb_verify_temp_table(&new_table_desc, NULL, NULL);
if (ACPI_FAILURE(status)) {
return;
}
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 010b1c43df92..26ad596c973e 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -167,7 +167,8 @@ ACPI_EXPORT_SYMBOL_INIT(acpi_initialize_tables)
acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
{
acpi_status status;
- u32 i;
+ struct acpi_table_desc *table_desc;
+ u32 i, j;
ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
@@ -179,6 +180,8 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
return_ACPI_STATUS(AE_SUPPORT);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
/*
* Ensure OS early boot logic, which is required by some hosts. If the
* table state is reported to be wrong, developers should fix the
@@ -186,17 +189,39 @@ acpi_status ACPI_INIT_FUNCTION acpi_reallocate_root_table(void)
* early stage.
*/
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
- if (acpi_gbl_root_table_list.tables[i].pointer) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (table_desc->pointer) {
ACPI_ERROR((AE_INFO,
"Table [%4.4s] is not invalidated during early boot stage",
- acpi_gbl_root_table_list.tables[i].
- signature.ascii));
+ table_desc->signature.ascii));
}
}
- acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
+ if (!acpi_gbl_enable_table_validation) {
+ /*
+ * Now it's safe to do full table validation. We can do deferred
+ * table initilization here once the flag is set.
+ */
+ acpi_gbl_enable_table_validation = TRUE;
+ for (i = 0; i < acpi_gbl_root_table_list.current_table_count;
+ ++i) {
+ table_desc = &acpi_gbl_root_table_list.tables[i];
+ if (!(table_desc->flags & ACPI_TABLE_IS_VERIFIED)) {
+ status =
+ acpi_tb_verify_temp_table(table_desc, NULL,
+ &j);
+ if (ACPI_FAILURE(status)) {
+ acpi_tb_uninstall_table(table_desc);
+ }
+ }
+ }
+ }
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE;
status = acpi_tb_resize_root_table_list();
+ acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED;
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
@@ -369,6 +394,10 @@ void acpi_put_table(struct acpi_table_header *table)
ACPI_FUNCTION_TRACE(acpi_put_table);
+ if (!table) {
+ return_VOID;
+ }
+
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
/* Walk the root table list */
diff --git a/drivers/acpi/acpica/tbxfload.c b/drivers/acpi/acpica/tbxfload.c
index b71ce3b817ea..d81f442228b8 100644
--- a/drivers/acpi/acpica/tbxfload.c
+++ b/drivers/acpi/acpica/tbxfload.c
@@ -206,7 +206,7 @@ acpi_status acpi_tb_load_namespace(void)
for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
table = &acpi_gbl_root_table_list.tables[i];
- if (!acpi_gbl_root_table_list.tables[i].address ||
+ if (!table->address ||
(!ACPI_COMPARE_NAME(table->signature.ascii, ACPI_SIG_SSDT)
&& !ACPI_COMPARE_NAME(table->signature.ascii,
ACPI_SIG_PSDT)
diff --git a/drivers/acpi/acpica/uthex.c b/drivers/acpi/acpica/uthex.c
index 6600bc257516..fb406daf47fa 100644
--- a/drivers/acpi/acpica/uthex.c
+++ b/drivers/acpi/acpica/uthex.c
@@ -69,8 +69,10 @@ static const char acpi_gbl_hex_to_ascii[] = {
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
{
+ u64 index;
- return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
+ acpi_ut_short_shift_right(integer, position, &index);
+ return (acpi_gbl_hex_to_ascii[index & 0xF]);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index aa0502d1d019..5f9c680076c4 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -47,15 +47,6 @@
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utmath")
-/*
- * Optional support for 64-bit double-precision integer divide. This code
- * is configurable and is implemented in order to support 32-bit kernel
- * environments where a 64-bit double-precision math library is not available.
- *
- * Support for a more normal 64-bit divide/modulo (with check for a divide-
- * by-zero) appears after this optional section of code.
- */
-#ifndef ACPI_USE_NATIVE_DIVIDE
/* Structures used only for 64-bit divide */
typedef struct uint64_struct {
u32 lo;
@@ -69,6 +60,217 @@ typedef union uint64_overlay {
} uint64_overlay;
+/*
+ * Optional support for 64-bit double-precision integer multiply and shift.
+ * This code is configurable and is implemented in order to support 32-bit
+ * kernel environments where a 64-bit double-precision math library is not
+ * available.
+ */
+#ifndef ACPI_USE_NATIVE_MATH64
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: multiplicand - 64-bit multiplicand
+ * multiplier - 32-bit multiplier
+ * out_product - Pointer to where the product is returned
+ *
+ * DESCRIPTION: Perform a short multiply.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+ union uint64_overlay multiplicand_ovl;
+ union uint64_overlay product;
+ u32 carry32;
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ multiplicand_ovl.full = multiplicand;
+
+ /*
+ * The Product is 64 bits, the carry is always 32 bits,
+ * and is generated by the second multiply.
+ */
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.hi, multiplier,
+ product.part.hi, carry32);
+
+ ACPI_MUL_64_BY_32(0, multiplicand_ovl.part.lo, multiplier,
+ product.part.lo, carry32);
+
+ product.part.hi += carry32;
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = product.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short left shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.hi = operand_ovl.part.lo;
+ operand_ovl.part.lo ^= operand_ovl.part.lo;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_LEFT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: operand - 64-bit shift operand
+ * count - 32-bit shift count
+ * out_result - Pointer to where the result is returned
+ *
+ * DESCRIPTION: Perform a short right shift.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+ union uint64_overlay operand_ovl;
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ operand_ovl.full = operand;
+
+ if ((count & 63) >= 32) {
+ operand_ovl.part.lo = operand_ovl.part.hi;
+ operand_ovl.part.hi ^= operand_ovl.part.hi;
+ count = (count & 63) - 32;
+ }
+ ACPI_SHIFT_RIGHT_64_BY_32(operand_ovl.part.hi,
+ operand_ovl.part.lo, count);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand_ovl.full;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#else
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_multiply
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_multiply function.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ut_short_multiply(u64 multiplicand, u32 multiplier, u64 *out_product)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_multiply);
+
+ /* Return only what was requested */
+
+ if (out_product) {
+ *out_product = multiplicand * multiplier;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_left
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_left function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_left(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_left);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand << count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_short_shift_right
+ *
+ * PARAMETERS: See function headers above
+ *
+ * DESCRIPTION: Native version of the ut_short_shift_right function.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_short_shift_right(u64 operand, u32 count, u64 *out_result)
+{
+
+ ACPI_FUNCTION_TRACE(ut_short_shift_right);
+
+ /* Return only what was requested */
+
+ if (out_result) {
+ *out_result = operand >> count;
+ }
+
+ return_ACPI_STATUS(AE_OK);
+}
+#endif
+
+/*
+ * Optional support for 64-bit double-precision integer divide. This code
+ * is configurable and is implemented in order to support 32-bit kernel
+ * environments where a 64-bit double-precision math library is not available.
+ *
+ * Support for a more normal 64-bit divide/modulo (with check for a divide-
+ * by-zero) appears after this optional section of code.
+ */
+#ifndef ACPI_USE_NATIVE_DIVIDE
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide
@@ -258,6 +460,7 @@ acpi_ut_divide(u64 in_dividend,
}
#else
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_short_divide, acpi_ut_divide
@@ -272,6 +475,7 @@ acpi_ut_divide(u64 in_dividend,
* perform the divide.
*
******************************************************************************/
+
acpi_status
acpi_ut_short_divide(u64 in_dividend,
u32 divisor, u64 *out_quotient, u32 *out_remainder)
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 443ffad01209..45c78c2adbf0 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -224,7 +224,7 @@ acpi_ut_create_update_state_and_push(union acpi_operand_object *object,
*
* RETURN: Status
*
- * DESCRIPTION: Walk through a package
+ * DESCRIPTION: Walk through a package, including subpackages
*
******************************************************************************/
@@ -236,8 +236,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
acpi_status status = AE_OK;
union acpi_generic_state *state_list = NULL;
union acpi_generic_state *state;
- u32 this_index;
union acpi_operand_object *this_source_obj;
+ u32 this_index;
ACPI_FUNCTION_TRACE(ut_walk_package_tree);
@@ -251,8 +251,10 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* Get one element of the package */
this_index = state->pkg.index;
- this_source_obj = (union acpi_operand_object *)
+ this_source_obj =
state->pkg.source_object->package.elements[this_index];
+ state->pkg.this_target_obj =
+ &state->pkg.source_object->package.elements[this_index];
/*
* Check for:
@@ -339,6 +341,8 @@ acpi_ut_walk_package_tree(union acpi_operand_object *source_object,
/* We should never get here */
+ ACPI_ERROR((AE_INFO, "State list did not terminate correctly"));
+
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 64e6641bfe82..cb3db9fed50d 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -483,6 +483,11 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
/* A namespace node should never get here */
+ ACPI_ERROR((AE_INFO,
+ "Received a namespace node [%4.4s] "
+ "where an operand object is required",
+ ACPI_CAST_PTR(struct acpi_namespace_node,
+ internal_object)->name.ascii));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c
index 7e6e1ae6140f..c008589b41bd 100644
--- a/drivers/acpi/acpica/utprint.c
+++ b/drivers/acpi/acpica/utprint.c
@@ -176,7 +176,7 @@ const char *acpi_ut_scan_number(const char *string, u64 *number_ptr)
u64 number = 0;
while (isdigit((int)*string)) {
- number *= 10;
+ acpi_ut_short_multiply(number, 10, &number);
number += *(string++) - '0';
}
@@ -286,7 +286,7 @@ static char *acpi_ut_format_number(char *string,
/* Generate full string in reverse order */
pos = acpi_ut_put_number(reversed_string, number, base, upper);
- i = ACPI_PTR_DIFF(pos, reversed_string);
+ i = (s32)ACPI_PTR_DIFF(pos, reversed_string);
/* Printing 100 using %2d gives "100", not "00" */
@@ -475,7 +475,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
if (!s) {
s = "<NULL>";
}
- length = acpi_ut_bound_string_length(s, precision);
+ length = (s32)acpi_ut_bound_string_length(s, precision);
if (!(type & ACPI_FORMAT_LEFT)) {
while (length < width--) {
pos =
@@ -579,7 +579,7 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args)
}
}
- return (ACPI_PTR_DIFF(pos, string));
+ return ((int)ACPI_PTR_DIFF(pos, string));
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 70f78a4bf13b..f9801d13547f 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -237,6 +237,13 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
+ /*
+ * Don't attempt to perform any validation on the 2nd byte.
+ * Although all known ASL compilers insert a zero for the 2nd
+ * byte, it can also be a checksum (as per the ACPI spec),
+ * and this is occasionally seen in the field. July 2017.
+ */
+
/* Return the pointer to the end_tag if requested */
if (!user_function) {
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 64308c304ade..eafabcd2fada 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -226,7 +226,7 @@ union acpi_generic_state *acpi_ut_create_update_state(union acpi_operand_object
union acpi_generic_state *acpi_ut_create_pkg_state(void *internal_object,
void *external_object,
- u16 index)
+ u32 index)
{
union acpi_generic_state *state;
diff --git a/drivers/acpi/acpica/utstrtoul64.c b/drivers/acpi/acpica/utstrtoul64.c
index f42be01d99fd..9633ee142855 100644
--- a/drivers/acpi/acpica/utstrtoul64.c
+++ b/drivers/acpi/acpica/utstrtoul64.c
@@ -276,8 +276,8 @@ static u64 acpi_ut_strtoul_base10(char *string, u32 flags)
/* Convert and insert (add) the decimal digit */
- next_value =
- (return_value * 10) + (ascii_digit - ACPI_ASCII_ZERO);
+ acpi_ut_short_multiply(return_value, 10, &next_value);
+ next_value += (ascii_digit - ACPI_ASCII_ZERO);
/* Check for overflow (32 or 64 bit) - return current converted value */
@@ -335,9 +335,8 @@ static u64 acpi_ut_strtoul_base16(char *string, u32 flags)
/* Convert and insert the hex digit */
- return_value =
- (return_value << 4) |
- acpi_ut_ascii_char_to_hex(ascii_digit);
+ acpi_ut_short_shift_left(return_value, 4, &return_value);
+ return_value |= acpi_ut_ascii_char_to_hex(ascii_digit);
string++;
valid_digits++;
diff --git a/drivers/acpi/acpica/uttrack.c b/drivers/acpi/acpica/uttrack.c
index 9a07a42cae34..3c8de88ecbd5 100644
--- a/drivers/acpi/acpica/uttrack.c
+++ b/drivers/acpi/acpica/uttrack.c
@@ -591,6 +591,10 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
return_VOID;
}
+ if (!acpi_gbl_global_list) {
+ goto exit;
+ }
+
element = acpi_gbl_global_list->list_head;
while (element) {
if ((element->component & component) &&
@@ -602,7 +606,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (element->size <
sizeof(struct acpi_common_descriptor)) {
- acpi_os_printf("%p Length 0x%04X %9.9s-%u "
+ acpi_os_printf("%p Length 0x%04X %9.9s-%4.4u "
"[Not a Descriptor - too small]\n",
descriptor, element->size,
element->module, element->line);
@@ -612,7 +616,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) !=
ACPI_DESC_TYPE_CACHED) {
acpi_os_printf
- ("%p Length 0x%04X %9.9s-%u [%s] ",
+ ("%p Length 0x%04X %9.9s-%4.4u [%s] ",
descriptor, element->size,
element->module, element->line,
acpi_ut_get_descriptor_name
@@ -705,6 +709,7 @@ void acpi_ut_dump_allocations(u32 component, const char *module)
element = element->next;
}
+exit:
(void)acpi_ut_release_mutex(ACPI_MTX_MEMORY);
/* Print summary */
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index 6e9f14c0a71b..cb4126051f62 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -120,11 +120,6 @@ int apei_exec_collect_resources(struct apei_exec_context *ctx,
struct dentry;
struct dentry *apei_get_debugfs_dir(void);
-#define apei_estatus_for_each_section(estatus, section) \
- for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
- (void *)section - (void *)estatus < estatus->data_length; \
- section = (void *)(section+1) + section->error_data_length)
-
static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
{
if (estatus->raw_data_length)
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index ec50c32ea3da..b38737c83a24 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -281,7 +281,7 @@ static struct acpi_generic_address *einj_get_trigger_parameter_region(
((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
for (i = 0; i < trigger_tab->entry_count; i++) {
if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
- entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+ entry->instruction <= ACPI_EINJ_WRITE_REGISTER_VALUE &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY &&
(entry->register_region.address & param2) == (param1 & param2))
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index d661d452b238..077f9bad6f44 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1157,7 +1157,8 @@ static int ghes_probe(struct platform_device *ghes_dev)
generic->header.source_id);
goto err_edac_unreg;
}
- rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
+ rc = request_irq(ghes->irq, ghes_irq_func, IRQF_SHARED,
+ "GHES IRQ", ghes);
if (rc) {
pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
generic->header.source_id);
@@ -1265,9 +1266,14 @@ static int __init ghes_init(void)
if (acpi_disabled)
return -ENODEV;
- if (hest_disable) {
+ switch (hest_disable) {
+ case HEST_NOT_FOUND:
+ return -ENODEV;
+ case HEST_DISABLED:
pr_info(GHES_PFX "HEST is not enabled!\n");
return -EINVAL;
+ default:
+ break;
}
if (ghes_disable) {
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 456b488eb1df..9cb74115a43d 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -37,7 +37,7 @@
#define HEST_PFX "HEST: "
-bool hest_disable;
+int hest_disable;
EXPORT_SYMBOL_GPL(hest_disable);
/* HEST table parsing */
@@ -213,7 +213,7 @@ err:
static int __init setup_hest_disable(char *str)
{
- hest_disable = 1;
+ hest_disable = HEST_DISABLED;
return 0;
}
@@ -232,9 +232,10 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND)
- goto err;
- else if (ACPI_FAILURE(status)) {
+ if (status == AE_NOT_FOUND) {
+ hest_disable = HEST_NOT_FOUND;
+ return;
+ } else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
@@ -257,5 +258,5 @@ void __init acpi_hest_init(void)
pr_info(HEST_PFX "Table parsing has been initialized.\n");
return;
err:
- hest_disable = 1;
+ hest_disable = HEST_DISABLED;
}
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index a3215ee671c1..9565d572f8dd 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -588,7 +588,8 @@ void acpi_configure_pmsi_domain(struct device *dev)
dev_set_msi_domain(dev, msi_domain);
}
-static int __get_pci_rid(struct pci_dev *pdev, u16 alias, void *data)
+static int __maybe_unused __get_pci_rid(struct pci_dev *pdev, u16 alias,
+ void *data)
{
u32 *rid = data;
@@ -633,8 +634,7 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
{
int err = 0;
- if (!IS_ERR_OR_NULL(ops) && ops->add_device && dev->bus &&
- !dev->iommu_group)
+ if (ops->add_device && dev->bus && !dev->iommu_group)
err = ops->add_device(dev);
return err;
@@ -648,45 +648,81 @@ int iort_add_device_replay(const struct iommu_ops *ops, struct device *dev)
{ return 0; }
#endif
-static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
- struct acpi_iort_node *node,
- u32 streamid)
+static int iort_iommu_xlate(struct device *dev, struct acpi_iort_node *node,
+ u32 streamid)
{
- const struct iommu_ops *ops = NULL;
- int ret = -ENODEV;
+ const struct iommu_ops *ops;
struct fwnode_handle *iort_fwnode;
- if (node) {
- iort_fwnode = iort_get_fwnode(node);
- if (!iort_fwnode)
- return NULL;
+ if (!node)
+ return -ENODEV;
- ops = iommu_ops_from_fwnode(iort_fwnode);
- /*
- * If the ops look-up fails, this means that either
- * the SMMU drivers have not been probed yet or that
- * the SMMU drivers are not built in the kernel;
- * Depending on whether the SMMU drivers are built-in
- * in the kernel or not, defer the IOMMU configuration
- * or just abort it.
- */
- if (!ops)
- return iort_iommu_driver_enabled(node->type) ?
- ERR_PTR(-EPROBE_DEFER) : NULL;
+ iort_fwnode = iort_get_fwnode(node);
+ if (!iort_fwnode)
+ return -ENODEV;
- ret = arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
- }
+ /*
+ * If the ops look-up fails, this means that either
+ * the SMMU drivers have not been probed yet or that
+ * the SMMU drivers are not built in the kernel;
+ * Depending on whether the SMMU drivers are built-in
+ * in the kernel or not, defer the IOMMU configuration
+ * or just abort it.
+ */
+ ops = iommu_ops_from_fwnode(iort_fwnode);
+ if (!ops)
+ return iort_iommu_driver_enabled(node->type) ?
+ -EPROBE_DEFER : -ENODEV;
+
+ return arm_smmu_iort_xlate(dev, streamid, iort_fwnode, ops);
+}
+
+struct iort_pci_alias_info {
+ struct device *dev;
+ struct acpi_iort_node *node;
+};
+
+static int iort_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
+{
+ struct iort_pci_alias_info *info = data;
+ struct acpi_iort_node *parent;
+ u32 streamid;
- return ret ? NULL : ops;
+ parent = iort_node_map_id(info->node, alias, &streamid,
+ IORT_IOMMU_TYPE);
+ return iort_iommu_xlate(info->dev, parent, streamid);
+}
+
+static int nc_dma_get_range(struct device *dev, u64 *size)
+{
+ struct acpi_iort_node *node;
+ struct acpi_iort_named_component *ncomp;
+
+ node = iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
+ iort_match_node_callback, dev);
+ if (!node)
+ return -ENODEV;
+
+ ncomp = (struct acpi_iort_named_component *)node->node_data;
+
+ *size = ncomp->memory_address_limit >= 64 ? U64_MAX :
+ 1ULL<<ncomp->memory_address_limit;
+
+ return 0;
}
/**
- * iort_set_dma_mask - Set-up dma mask for a device.
+ * iort_dma_setup() - Set-up device DMA parameters.
*
* @dev: device to configure
+ * @dma_addr: device DMA address result pointer
+ * @size: DMA range size result pointer
*/
-void iort_set_dma_mask(struct device *dev)
+void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
{
+ u64 mask, dmaaddr = 0, size = 0, offset = 0;
+ int ret, msb;
+
/*
* Set default coherent_dma_mask to 32 bit. Drivers are expected to
* setup the correct supported mask.
@@ -700,6 +736,36 @@ void iort_set_dma_mask(struct device *dev)
*/
if (!dev->dma_mask)
dev->dma_mask = &dev->coherent_dma_mask;
+
+ size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
+
+ if (dev_is_pci(dev))
+ ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size);
+ else
+ ret = nc_dma_get_range(dev, &size);
+
+ if (!ret) {
+ msb = fls64(dmaaddr + size - 1);
+ /*
+ * Round-up to the power-of-two mask or set
+ * the mask to the whole 64-bit address space
+ * in case the DMA region covers the full
+ * memory window.
+ */
+ mask = msb == 64 ? U64_MAX : (1ULL << msb) - 1;
+ /*
+ * Limit coherent and dma mask based on size
+ * retrieved from firmware.
+ */
+ dev->coherent_dma_mask = mask;
+ *dev->dma_mask = mask;
+ }
+
+ *dma_addr = dmaaddr;
+ *dma_size = size;
+
+ dev->dma_pfn_offset = PFN_DOWN(offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08llx)\n", offset);
}
/**
@@ -713,9 +779,9 @@ void iort_set_dma_mask(struct device *dev)
const struct iommu_ops *iort_iommu_configure(struct device *dev)
{
struct acpi_iort_node *node, *parent;
- const struct iommu_ops *ops = NULL;
+ const struct iommu_ops *ops;
u32 streamid = 0;
- int err;
+ int err = -ENODEV;
/*
* If we already translated the fwspec there
@@ -727,21 +793,16 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (dev_is_pci(dev)) {
struct pci_bus *bus = to_pci_dev(dev)->bus;
- u32 rid;
-
- pci_for_each_dma_alias(to_pci_dev(dev), __get_pci_rid,
- &rid);
+ struct iort_pci_alias_info info = { .dev = dev };
node = iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
iort_match_node_callback, &bus->dev);
if (!node)
return NULL;
- parent = iort_node_map_id(node, rid, &streamid,
- IORT_IOMMU_TYPE);
-
- ops = iort_iommu_xlate(dev, parent, streamid);
-
+ info.node = node;
+ err = pci_for_each_dma_alias(to_pci_dev(dev),
+ iort_pci_iommu_init, &info);
} else {
int i = 0;
@@ -750,31 +811,30 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
if (!node)
return NULL;
- parent = iort_node_map_platform_id(node, &streamid,
- IORT_IOMMU_TYPE, i++);
-
- while (parent) {
- ops = iort_iommu_xlate(dev, parent, streamid);
- if (IS_ERR_OR_NULL(ops))
- return ops;
-
+ do {
parent = iort_node_map_platform_id(node, &streamid,
IORT_IOMMU_TYPE,
i++);
- }
+
+ if (parent)
+ err = iort_iommu_xlate(dev, parent, streamid);
+ } while (parent && !err);
}
/*
* If we have reason to believe the IOMMU driver missed the initial
* add_device callback for dev, replay it to get things in order.
*/
- err = iort_add_device_replay(ops, dev);
- if (err)
- ops = ERR_PTR(err);
+ if (!err) {
+ ops = iort_fwspec_iommu_ops(dev->iommu_fwspec);
+ err = iort_add_device_replay(ops, dev);
+ }
/* Ignore all other errors apart from EPROBE_DEFER */
- if (IS_ERR(ops) && (PTR_ERR(ops) != -EPROBE_DEFER)) {
- dev_dbg(dev, "Adding to IOMMU failed: %ld\n", PTR_ERR(ops));
+ if (err == -EPROBE_DEFER) {
+ ops = ERR_PTR(err);
+ } else if (err) {
+ dev_dbg(dev, "Adding to IOMMU failed: %d\n", err);
ops = NULL;
}
@@ -908,6 +968,27 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
return smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE;
}
+#if defined(CONFIG_ACPI_NUMA) && defined(ACPI_IORT_SMMU_V3_PXM_VALID)
+/*
+ * set numa proximity domain for smmuv3 device
+ */
+static void __init arm_smmu_v3_set_proximity(struct device *dev,
+ struct acpi_iort_node *node)
+{
+ struct acpi_iort_smmu_v3 *smmu;
+
+ smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
+ if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
+ set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
+ pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
+ smmu->base_address,
+ smmu->pxm);
+ }
+}
+#else
+#define arm_smmu_v3_set_proximity NULL
+#endif
+
static int __init arm_smmu_count_resources(struct acpi_iort_node *node)
{
struct acpi_iort_smmu *smmu;
@@ -977,13 +1058,16 @@ struct iort_iommu_config {
int (*iommu_count_resources)(struct acpi_iort_node *node);
void (*iommu_init_resources)(struct resource *res,
struct acpi_iort_node *node);
+ void (*iommu_set_proximity)(struct device *dev,
+ struct acpi_iort_node *node);
};
static const struct iort_iommu_config iort_arm_smmu_v3_cfg __initconst = {
.name = "arm-smmu-v3",
.iommu_is_coherent = arm_smmu_v3_is_coherent,
.iommu_count_resources = arm_smmu_v3_count_resources,
- .iommu_init_resources = arm_smmu_v3_init_resources
+ .iommu_init_resources = arm_smmu_v3_init_resources,
+ .iommu_set_proximity = arm_smmu_v3_set_proximity,
};
static const struct iort_iommu_config iort_arm_smmu_cfg __initconst = {
@@ -1028,6 +1112,9 @@ static int __init iort_add_smmu_platform_device(struct acpi_iort_node *node)
if (!pdev)
return -ENOMEM;
+ if (ops->iommu_set_proximity)
+ ops->iommu_set_proximity(&pdev->dev, node);
+
count = ops->iommu_count_resources(node);
r = kcalloc(count, sizeof(*r), GFP_KERNEL);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 1cbb88d938e5..13e7b56e33ae 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -620,7 +620,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static struct device_attribute alarm_attr = {
+static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index bb542acc0574..995c4d8922b1 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -30,30 +30,13 @@
#include "internal.h"
-enum acpi_blacklist_predicates {
- all_versions,
- less_than_or_equal,
- equal,
- greater_than_or_equal,
-};
-
-struct acpi_blacklist_item {
- char oem_id[7];
- char oem_table_id[9];
- u32 oem_revision;
- char *table;
- enum acpi_blacklist_predicates oem_revision_predicate;
- char *reason;
- u32 is_critical_error;
-};
-
-static struct dmi_system_id acpi_rev_dmi_table[] __initdata;
+static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
* If they are critical errors, mark it critical, and abort driver load.
*/
-static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
+static struct acpi_platform_list acpi_blacklist[] __initdata = {
/* Compaq Presario 1700 */
{"PTLTD ", " DSDT ", 0x06040000, ACPI_SIG_DSDT, less_than_or_equal,
"Multiple problems", 1},
@@ -67,65 +50,27 @@ static struct acpi_blacklist_item acpi_blacklist[] __initdata = {
{"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal,
"Incorrect _ADR", 1},
- {""}
+ { }
};
int __init acpi_blacklisted(void)
{
- int i = 0;
+ int i;
int blacklisted = 0;
- struct acpi_table_header table_header;
-
- while (acpi_blacklist[i].oem_id[0] != '\0') {
- if (acpi_get_table_header(acpi_blacklist[i].table, 0, &table_header)) {
- i++;
- continue;
- }
-
- if (strncmp(acpi_blacklist[i].oem_id, table_header.oem_id, 6)) {
- i++;
- continue;
- }
-
- if (strncmp
- (acpi_blacklist[i].oem_table_id, table_header.oem_table_id,
- 8)) {
- i++;
- continue;
- }
-
- if ((acpi_blacklist[i].oem_revision_predicate == all_versions)
- || (acpi_blacklist[i].oem_revision_predicate ==
- less_than_or_equal
- && table_header.oem_revision <=
- acpi_blacklist[i].oem_revision)
- || (acpi_blacklist[i].oem_revision_predicate ==
- greater_than_or_equal
- && table_header.oem_revision >=
- acpi_blacklist[i].oem_revision)
- || (acpi_blacklist[i].oem_revision_predicate == equal
- && table_header.oem_revision ==
- acpi_blacklist[i].oem_revision)) {
- printk(KERN_ERR PREFIX
- "Vendor \"%6.6s\" System \"%8.8s\" "
- "Revision 0x%x has a known ACPI BIOS problem.\n",
- acpi_blacklist[i].oem_id,
- acpi_blacklist[i].oem_table_id,
- acpi_blacklist[i].oem_revision);
+ i = acpi_match_platform_list(acpi_blacklist);
+ if (i >= 0) {
+ pr_err(PREFIX "Vendor \"%6.6s\" System \"%8.8s\" Revision 0x%x has a known ACPI BIOS problem.\n",
+ acpi_blacklist[i].oem_id,
+ acpi_blacklist[i].oem_table_id,
+ acpi_blacklist[i].oem_revision);
- printk(KERN_ERR PREFIX
- "Reason: %s. This is a %s error\n",
- acpi_blacklist[i].reason,
- (acpi_blacklist[i].
- is_critical_error ? "non-recoverable" :
- "recoverable"));
+ pr_err(PREFIX "Reason: %s. This is a %s error\n",
+ acpi_blacklist[i].reason,
+ (acpi_blacklist[i].data ?
+ "non-recoverable" : "recoverable"));
- blacklisted = acpi_blacklist[i].is_critical_error;
- break;
- } else {
- i++;
- }
+ blacklisted = acpi_blacklist[i].data;
}
(void)early_acpi_osi_init();
@@ -144,7 +89,7 @@ static int __init dmi_enable_rev_override(const struct dmi_system_id *d)
}
#endif
-static struct dmi_system_id acpi_rev_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_rev_dmi_table[] __initconst = {
#ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
/*
* DELL XPS 13 (2015) switches sound between HDA and I2S
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index af74b420ec83..4d0979e02a28 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -67,7 +67,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
}
#endif
-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
/*
* Invoke DSDT corruption work-around on all Toshiba Satellite.
* https://bugzilla.kernel.org/show_bug.cgi?id=14679
@@ -83,7 +83,7 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
{}
};
#else
-static struct dmi_system_id dsdt_dmi_table[] __initdata = {
+static const struct dmi_system_id dsdt_dmi_table[] __initconst = {
{}
};
#endif
@@ -995,9 +995,6 @@ void __init acpi_early_init(void)
printk(KERN_INFO PREFIX "Core revision %08x\n", ACPI_CA_VERSION);
- /* It's safe to verify table checksums during late stage */
- acpi_gbl_verify_table_checksum = TRUE;
-
/* enable workarounds, unless strict ACPI spec. compliance */
if (!acpi_strict)
acpi_gbl_enable_interpreter_slack = TRUE;
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 2ed6935d4483..fbcc73f7a099 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -401,6 +401,8 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (val != ACPI_NOTIFY_DEVICE_WAKE)
return;
+ acpi_handle_debug(handle, "Wake notify\n");
+
adev = acpi_bus_get_acpi_device(handle);
if (!adev)
return;
@@ -409,8 +411,12 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
if (adev->wakeup.flags.notifier_present) {
pm_wakeup_ws_event(adev->wakeup.ws, 0, acpi_s2idle_wakeup());
- if (adev->wakeup.context.func)
+ if (adev->wakeup.context.func) {
+ acpi_handle_debug(handle, "Running %pF for %s\n",
+ adev->wakeup.context.func,
+ dev_name(adev->wakeup.context.dev));
adev->wakeup.context.func(&adev->wakeup.context);
+ }
}
mutex_unlock(&acpi_pm_notifier_lock);
@@ -682,55 +688,88 @@ static void acpi_pm_notify_work_func(struct acpi_device_wakeup_context *context)
}
}
+static DEFINE_MUTEX(acpi_wakeup_lock);
+
+static int __acpi_device_wakeup_enable(struct acpi_device *adev,
+ u32 target_state, int max_count)
+{
+ struct acpi_device_wakeup *wakeup = &adev->wakeup;
+ acpi_status status;
+ int error = 0;
+
+ mutex_lock(&acpi_wakeup_lock);
+
+ if (wakeup->enable_count >= max_count)
+ goto out;
+
+ if (wakeup->enable_count > 0)
+ goto inc;
+
+ error = acpi_enable_wakeup_device_power(adev, target_state);
+ if (error)
+ goto out;
+
+ status = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ if (ACPI_FAILURE(status)) {
+ acpi_disable_wakeup_device_power(adev);
+ error = -EIO;
+ goto out;
+ }
+
+inc:
+ wakeup->enable_count++;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
+ return error;
+}
+
/**
- * acpi_device_wakeup - Enable/disable wakeup functionality for device.
- * @adev: ACPI device to enable/disable wakeup functionality for.
+ * acpi_device_wakeup_enable - Enable wakeup functionality for device.
+ * @adev: ACPI device to enable wakeup functionality for.
* @target_state: State the system is transitioning into.
- * @enable: Whether to enable or disable the wakeup functionality.
*
- * Enable/disable the GPE associated with @adev so that it can generate
- * wakeup signals for the device in response to external (remote) events and
- * enable/disable device wakeup power.
+ * Enable the GPE associated with @adev so that it can generate wakeup signals
+ * for the device in response to external (remote) events and enable wakeup
+ * power for it.
+ *
+ * Callers must ensure that @adev is a valid ACPI device node before executing
+ * this function.
+ */
+static int acpi_device_wakeup_enable(struct acpi_device *adev, u32 target_state)
+{
+ return __acpi_device_wakeup_enable(adev, target_state, 1);
+}
+
+/**
+ * acpi_device_wakeup_disable - Disable wakeup functionality for device.
+ * @adev: ACPI device to disable wakeup functionality for.
+ *
+ * Disable the GPE associated with @adev and disable wakeup power for it.
*
* Callers must ensure that @adev is a valid ACPI device node before executing
* this function.
*/
-static int acpi_device_wakeup(struct acpi_device *adev, u32 target_state,
- bool enable)
+static void acpi_device_wakeup_disable(struct acpi_device *adev)
{
struct acpi_device_wakeup *wakeup = &adev->wakeup;
- if (enable) {
- acpi_status res;
- int error;
+ mutex_lock(&acpi_wakeup_lock);
- if (adev->wakeup.flags.enabled)
- return 0;
+ if (!wakeup->enable_count)
+ goto out;
- error = acpi_enable_wakeup_device_power(adev, target_state);
- if (error)
- return error;
+ acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
+ acpi_disable_wakeup_device_power(adev);
- res = acpi_enable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- if (ACPI_FAILURE(res)) {
- acpi_disable_wakeup_device_power(adev);
- return -EIO;
- }
- adev->wakeup.flags.enabled = 1;
- } else if (adev->wakeup.flags.enabled) {
- acpi_disable_gpe(wakeup->gpe_device, wakeup->gpe_number);
- acpi_disable_wakeup_device_power(adev);
- adev->wakeup.flags.enabled = 0;
- }
- return 0;
+ wakeup->enable_count--;
+
+out:
+ mutex_unlock(&acpi_wakeup_lock);
}
-/**
- * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
- * @dev: Device to enable/disable to generate wakeup events.
- * @enable: Whether to enable or disable the wakeup functionality.
- */
-int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+static int __acpi_pm_set_device_wakeup(struct device *dev, bool enable,
+ int max_count)
{
struct acpi_device *adev;
int error;
@@ -744,13 +783,41 @@ int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
if (!acpi_device_can_wakeup(adev))
return -EINVAL;
- error = acpi_device_wakeup(adev, acpi_target_system_state(), enable);
+ if (!enable) {
+ acpi_device_wakeup_disable(adev);
+ dev_dbg(dev, "Wakeup disabled by ACPI\n");
+ return 0;
+ }
+
+ error = __acpi_device_wakeup_enable(adev, acpi_target_system_state(),
+ max_count);
if (!error)
- dev_dbg(dev, "Wakeup %s by ACPI\n", enable ? "enabled" : "disabled");
+ dev_dbg(dev, "Wakeup enabled by ACPI\n");
return error;
}
-EXPORT_SYMBOL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_device_wakeup - Enable/disable remote wakeup for given device.
+ * @dev: Device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_device_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, 1);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_device_wakeup);
+
+/**
+ * acpi_pm_set_bridge_wakeup - Enable/disable remote wakeup for given bridge.
+ * @dev: Bridge device to enable/disable to generate wakeup events.
+ * @enable: Whether to enable or disable the wakeup functionality.
+ */
+int acpi_pm_set_bridge_wakeup(struct device *dev, bool enable)
+{
+ return __acpi_pm_set_device_wakeup(dev, enable, INT_MAX);
+}
+EXPORT_SYMBOL_GPL(acpi_pm_set_bridge_wakeup);
/**
* acpi_dev_pm_low_power - Put ACPI device into a low-power state.
@@ -800,13 +867,15 @@ int acpi_dev_runtime_suspend(struct device *dev)
remote_wakeup = dev_pm_qos_flags(dev, PM_QOS_FLAG_REMOTE_WAKEUP) >
PM_QOS_FLAGS_NONE;
- error = acpi_device_wakeup(adev, ACPI_STATE_S0, remote_wakeup);
- if (remote_wakeup && error)
- return -EAGAIN;
+ if (remote_wakeup) {
+ error = acpi_device_wakeup_enable(adev, ACPI_STATE_S0);
+ if (error)
+ return -EAGAIN;
+ }
error = acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ if (error && remote_wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -829,7 +898,7 @@ int acpi_dev_runtime_resume(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_runtime_resume);
@@ -884,13 +953,15 @@ int acpi_dev_suspend_late(struct device *dev)
target_state = acpi_target_system_state();
wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
- error = acpi_device_wakeup(adev, target_state, wakeup);
- if (wakeup && error)
- return error;
+ if (wakeup) {
+ error = acpi_device_wakeup_enable(adev, target_state);
+ if (error)
+ return error;
+ }
error = acpi_dev_pm_low_power(dev, adev, target_state);
- if (error)
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ if (error && wakeup)
+ acpi_device_wakeup_disable(adev);
return error;
}
@@ -913,7 +984,7 @@ int acpi_dev_resume_early(struct device *dev)
return 0;
error = acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_UNKNOWN, false);
+ acpi_device_wakeup_disable(adev);
return error;
}
EXPORT_SYMBOL_GPL(acpi_dev_resume_early);
@@ -1056,7 +1127,7 @@ static void acpi_dev_pm_detach(struct device *dev, bool power_off)
*/
dev_pm_qos_hide_latency_limit(dev);
dev_pm_qos_hide_flags(dev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
acpi_dev_pm_low_power(dev, adev, ACPI_STATE_S0);
}
}
@@ -1100,7 +1171,7 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on)
dev_pm_domain_set(dev, &acpi_general_pm_domain);
if (power_on) {
acpi_dev_pm_full_power(adev);
- acpi_device_wakeup(adev, ACPI_STATE_S0, false);
+ acpi_device_wakeup_disable(adev);
}
dev->pm_domain->detach = acpi_dev_pm_detach;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 0c00208b423e..2305e1ab978e 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -585,7 +585,7 @@ static struct attribute *dock_attributes[] = {
NULL
};
-static struct attribute_group dock_attribute_group = {
+static const struct attribute_group dock_attribute_group = {
.attrs = dock_attributes
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 62068a5e814f..236b14324780 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -112,8 +112,7 @@ enum {
EC_FLAGS_EVT_HANDLER_INSTALLED, /* _Qxx handlers installed */
EC_FLAGS_STARTED, /* Driver is started */
EC_FLAGS_STOPPED, /* Driver is stopped */
- EC_FLAGS_COMMAND_STORM, /* GPE storms occurred to the
- * current command processing */
+ EC_FLAGS_GPE_MASKED, /* GPE masked */
};
#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
@@ -425,19 +424,19 @@ static void acpi_ec_complete_request(struct acpi_ec *ec)
wake_up(&ec->wait);
}
-static void acpi_ec_set_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_mask_gpe(struct acpi_ec *ec)
{
- if (!test_bit(flag, &ec->flags)) {
+ if (!test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
acpi_ec_disable_gpe(ec, false);
ec_dbg_drv("Polling enabled");
- set_bit(flag, &ec->flags);
+ set_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
}
}
-static void acpi_ec_clear_storm(struct acpi_ec *ec, u8 flag)
+static void acpi_ec_unmask_gpe(struct acpi_ec *ec)
{
- if (test_bit(flag, &ec->flags)) {
- clear_bit(flag, &ec->flags);
+ if (test_bit(EC_FLAGS_GPE_MASKED, &ec->flags)) {
+ clear_bit(EC_FLAGS_GPE_MASKED, &ec->flags);
acpi_ec_enable_gpe(ec, false);
ec_dbg_drv("Polling disabled");
}
@@ -464,7 +463,7 @@ static bool acpi_ec_submit_flushable_request(struct acpi_ec *ec)
static void acpi_ec_submit_query(struct acpi_ec *ec)
{
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
if (!acpi_ec_event_enabled(ec))
return;
if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
@@ -480,7 +479,7 @@ static void acpi_ec_complete_query(struct acpi_ec *ec)
if (test_and_clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
ec_dbg_evt("Command(%s) unblocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
}
static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
@@ -700,7 +699,7 @@ err:
++t->irq_count;
/* Allow triggering on 0 threshold */
if (t->irq_count == ec_storm_threshold)
- acpi_ec_set_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_mask_gpe(ec);
}
}
out:
@@ -798,7 +797,7 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
spin_lock_irqsave(&ec->lock, tmp);
if (t->irq_count == ec_storm_threshold)
- acpi_ec_clear_storm(ec, EC_FLAGS_COMMAND_STORM);
+ acpi_ec_unmask_gpe(ec);
ec_dbg_req("Command(%s) stopped", acpi_ec_cmd_string(t->command));
ec->curr = NULL;
/* Disable GPE for command processing (IBF=0/OBF=1) */
@@ -1586,9 +1585,7 @@ static bool acpi_is_boot_ec(struct acpi_ec *ec)
{
if (!boot_ec)
return false;
- if (ec->handle == boot_ec->handle &&
- ec->gpe == boot_ec->gpe &&
- ec->command_addr == boot_ec->command_addr &&
+ if (ec->command_addr == boot_ec->command_addr &&
ec->data_addr == boot_ec->data_addr)
return true;
return false;
@@ -1613,6 +1610,13 @@ static int acpi_ec_add(struct acpi_device *device)
if (acpi_is_boot_ec(ec)) {
boot_ec_is_ecdt = false;
+ /*
+ * Trust PNP0C09 namespace location rather than ECDT ID.
+ *
+ * But trust ECDT GPE rather than _GPE because of ASUS quirks,
+ * so do not change boot_ec->gpe to ec->gpe.
+ */
+ boot_ec->handle = ec->handle;
acpi_handle_debug(ec->handle, "duplicated.\n");
acpi_ec_free(ec);
ec = boot_ec;
@@ -1741,24 +1745,26 @@ error:
* functioning ECDT EC first in order to handle the events.
* https://bugzilla.kernel.org/show_bug.cgi?id=115021
*/
-int __init acpi_ec_ecdt_start(void)
+static int __init acpi_ec_ecdt_start(void)
{
acpi_handle handle;
if (!boot_ec)
return -ENODEV;
- /*
- * The DSDT EC should have already been started in
- * acpi_ec_add().
- */
+ /* In case acpi_ec_ecdt_start() is called after acpi_ec_add() */
if (!boot_ec_is_ecdt)
return -ENODEV;
/*
* At this point, the namespace and the GPE is initialized, so
* start to find the namespace objects and handle the events.
+ *
+ * Note: ec->handle can be valid if this function is called after
+ * acpi_ec_add(), hence the fast path.
*/
- if (!acpi_ec_ecdt_get_handle(&handle))
+ if (boot_ec->handle != ACPI_ROOT_OBJECT)
+ handle = boot_ec->handle;
+ else if (!acpi_ec_ecdt_get_handle(&handle))
return -ENODEV;
return acpi_config_boot_ec(boot_ec, handle, true, true);
}
@@ -1803,7 +1809,7 @@ static int ec_honor_ecdt_gpe(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id ec_dmi_table[] __initdata = {
+static const struct dmi_system_id ec_dmi_table[] __initconst = {
{
ec_correct_ecdt, "MSI MS-171F", {
DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star"),
@@ -2003,20 +2009,17 @@ static inline void acpi_ec_query_exit(void)
int __init acpi_ec_init(void)
{
int result;
+ int ecdt_fail, dsdt_fail;
/* register workqueue for _Qxx evaluations */
result = acpi_ec_query_init();
if (result)
- goto err_exit;
- /* Now register the driver for the EC */
- result = acpi_bus_register_driver(&acpi_ec_driver);
- if (result)
- goto err_exit;
+ return result;
-err_exit:
- if (result)
- acpi_ec_query_exit();
- return result;
+ /* Drivers must be started after acpi_ec_query_init() */
+ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ ecdt_fail = acpi_ec_ecdt_start();
+ return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
/* EC driver currently not unloadable */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 58dd7ab3c653..4361c4415b4f 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
int acpi_ec_init(void);
int acpi_ec_ecdt_probe(void);
int acpi_ec_dsdt_probe(void);
-int acpi_ec_ecdt_start(void);
void acpi_ec_block_transactions(void);
void acpi_ec_unblock_transactions(void);
int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
@@ -233,6 +232,12 @@ static inline void suspend_nvs_restore(void) {}
void acpi_init_properties(struct acpi_device *adev);
void acpi_free_properties(struct acpi_device *adev);
+#ifdef CONFIG_X86
+void acpi_extract_apple_properties(struct acpi_device *adev);
+#else
+static inline void acpi_extract_apple_properties(struct acpi_device *adev) {}
+#endif
+
/*--------------------------------------------------------------------------
Watchdog
-------------------------------------------------------------------------- */
diff --git a/drivers/acpi/nfit/Kconfig b/drivers/acpi/nfit/Kconfig
index 6d3351452ea2..929ba4da0b30 100644
--- a/drivers/acpi/nfit/Kconfig
+++ b/drivers/acpi/nfit/Kconfig
@@ -2,7 +2,7 @@ config ACPI_NFIT
tristate "ACPI NVDIMM Firmware Interface Table (NFIT)"
depends on PHYS_ADDR_T_64BIT
depends on BLK_DEV
- depends on ARCH_HAS_MMIO_FLUSH
+ depends on ARCH_HAS_PMEM_API
select LIBNVDIMM
help
Infrastructure to probe ACPI 6 compliant platforms for
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 19182d091587..9c2c49b6a240 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -228,6 +228,10 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
if (cmd == ND_CMD_CALL) {
call_pkg = buf;
func = call_pkg->nd_command;
+
+ for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
+ if (call_pkg->nd_reserved2[i])
+ return -EINVAL;
}
if (nvdimm) {
@@ -1674,8 +1678,19 @@ static ssize_t range_index_show(struct device *dev,
}
static DEVICE_ATTR_RO(range_index);
+static ssize_t ecc_unit_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);
+
+ return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
+}
+static DEVICE_ATTR_RO(ecc_unit_size);
+
static struct attribute *acpi_nfit_region_attributes[] = {
&dev_attr_range_index.attr,
+ &dev_attr_ecc_unit_size.attr,
NULL,
};
@@ -1804,6 +1819,7 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
spa->range_index, i);
+ struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
if (!memdev || !nfit_mem->dcr) {
dev_err(dev, "%s: failed to find DCR\n", __func__);
@@ -1811,13 +1827,13 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
}
map->region_offset = memdev->region_offset;
- map->serial_number = nfit_mem->dcr->serial_number;
+ map->serial_number = dcr->serial_number;
map2->region_offset = memdev->region_offset;
- map2->serial_number = nfit_mem->dcr->serial_number;
- map2->vendor_id = nfit_mem->dcr->vendor_id;
- map2->manufacturing_date = nfit_mem->dcr->manufacturing_date;
- map2->manufacturing_location = nfit_mem->dcr->manufacturing_location;
+ map2->serial_number = dcr->serial_number;
+ map2->vendor_id = dcr->vendor_id;
+ map2->manufacturing_date = dcr->manufacturing_date;
+ map2->manufacturing_location = dcr->manufacturing_location;
}
/* v1.1 namespaces */
@@ -1835,6 +1851,28 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
cmp_map_compat, NULL);
nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);
+ /* record the result of the sort for the mapping position */
+ for (i = 0; i < nr; i++) {
+ struct nfit_set_info_map2 *map2 = &info2->mapping[i];
+ int j;
+
+ for (j = 0; j < nr; j++) {
+ struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
+ struct nvdimm *nvdimm = mapping->nvdimm;
+ struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
+ struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
+
+ if (map2->serial_number == dcr->serial_number &&
+ map2->vendor_id == dcr->vendor_id &&
+ map2->manufacturing_date == dcr->manufacturing_date &&
+ map2->manufacturing_location
+ == dcr->manufacturing_location) {
+ mapping->position = i;
+ break;
+ }
+ }
+ }
+
ndr_desc->nd_set = nd_set;
devm_kfree(dev, info);
devm_kfree(dev, info2);
@@ -1930,7 +1968,7 @@ static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
else {
if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
- mmio_flush_range((void __force *)
+ arch_invalidate_pmem((void __force *)
mmio->addr.aperture + offset, c);
memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
@@ -2884,7 +2922,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
* need to be interruptible while waiting.
*/
INIT_WORK_ONSTACK(&flush.work, flush_probe);
- COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
+ init_completion(&flush.cmp);
queue_work(nfit_wq, &flush.work);
mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c
index 723bee58bbcf..76998a51bf99 100644
--- a/drivers/acpi/osi.c
+++ b/drivers/acpi/osi.c
@@ -27,6 +27,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "internal.h"
@@ -257,12 +258,11 @@ bool acpi_osi_is_win8(void)
}
EXPORT_SYMBOL(acpi_osi_is_win8);
-static void __init acpi_osi_dmi_darwin(bool enable,
- const struct dmi_system_id *d)
+static void __init acpi_osi_dmi_darwin(void)
{
- pr_notice("DMI detected to setup _OSI(\"Darwin\"): %s\n", d->ident);
+ pr_notice("DMI detected to setup _OSI(\"Darwin\"): Apple hardware\n");
osi_config.darwin_dmi = 1;
- __acpi_osi_setup_darwin(enable);
+ __acpi_osi_setup_darwin(true);
}
static void __init acpi_osi_dmi_linux(bool enable,
@@ -273,13 +273,6 @@ static void __init acpi_osi_dmi_linux(bool enable,
__acpi_osi_setup_linux(enable);
}
-static int __init dmi_enable_osi_darwin(const struct dmi_system_id *d)
-{
- acpi_osi_dmi_darwin(true, d);
-
- return 0;
-}
-
static int __init dmi_enable_osi_linux(const struct dmi_system_id *d)
{
acpi_osi_dmi_linux(true, d);
@@ -319,7 +312,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
* Note that _OSI("Linux")/_OSI("Darwin") determined here can be overridden
* by acpi_osi=!Linux/acpi_osi=!Darwin command line options.
*/
-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
{
.callback = dmi_disable_osi_vista,
.ident = "Fujitsu Siemens",
@@ -481,30 +474,16 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "1015PX"),
},
},
-
- /*
- * Enable _OSI("Darwin") for all apple platforms.
- */
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- },
- },
- {
- .callback = dmi_enable_osi_darwin,
- .ident = "Apple hardware",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
- },
- },
{}
};
static __init void acpi_osi_dmi_blacklisted(void)
{
dmi_check_system(acpi_osi_dmi_table);
+
+ /* Enable _OSI("Darwin") for Apple platforms. */
+ if (x86_apple_machine)
+ acpi_osi_dmi_darwin();
}
int __init early_acpi_osi_init(void)
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 9eec3095e6c3..6fc204a52493 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
#include <linux/acpi.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include <acpi/apei.h> /* for acpi_hest_init() */
#include "internal.h"
@@ -431,8 +432,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm)
* been called successfully. We know the feature set supported by the
* platform, so avoid calling _OSC at all
*/
-
- if (dmi_match(DMI_SYS_VENDOR, "Apple Inc.")) {
+ if (x86_apple_machine) {
root->osc_control_set = ~OSC_PCI_EXPRESS_PME_CONTROL;
decode_osc_control(root, "OS assumes control of",
root->osc_control_set);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index f62c68e24317..e90b61f7d2db 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -174,7 +174,7 @@ static int do_sta_before_sun(const struct dmi_system_id *d)
return 0;
}
-static struct dmi_system_id acpi_pci_slot_dmi_table[] __initdata = {
+static const struct dmi_system_id acpi_pci_slot_dmi_table[] __initconst = {
/*
* Fujitsu Primequest machines will return 1023 to indicate an
* error if the _SUN method is evaluated on SxFy objects that
diff --git a/drivers/acpi/pmic/intel_pmic_xpower.c b/drivers/acpi/pmic/intel_pmic_xpower.c
index 3b7d5be5b7ed..6c99d3f81095 100644
--- a/drivers/acpi/pmic/intel_pmic_xpower.c
+++ b/drivers/acpi/pmic/intel_pmic_xpower.c
@@ -27,6 +27,9 @@
#define GPI1_LDO_ON (3 << 0)
#define GPI1_LDO_OFF (4 << 0)
+#define AXP288_ADC_TS_PIN_GPADC 0xf2
+#define AXP288_ADC_TS_PIN_ON 0xf3
+
static struct pmic_table power_table[] = {
{
.address = 0x00,
@@ -209,11 +212,23 @@ static int intel_xpower_pmic_update_power(struct regmap *regmap, int reg,
static int intel_xpower_pmic_get_raw_temp(struct regmap *regmap, int reg)
{
u8 buf[2];
+ int ret;
- if (regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2))
- return -EIO;
+ ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_PIN_GPADC);
+ if (ret)
+ return ret;
+
+ /* After switching to the GPADC pin give things some time to settle */
+ usleep_range(6000, 10000);
+
+ ret = regmap_bulk_read(regmap, AXP288_GP_ADC_H, buf, 2);
+ if (ret == 0)
+ ret = (buf[0] << 4) + ((buf[1] >> 4) & 0x0f);
+
+ regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON);
- return (buf[0] << 4) + ((buf[1] >> 4) & 0x0F);
+ return ret;
}
static struct intel_pmic_opregion_data intel_xpower_pmic_opregion_data = {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 591d1dd3f04e..9d6aff22684e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -237,7 +237,7 @@ static int __acpi_processor_start(struct acpi_device *device)
result = acpi_cppc_processor_probe(pr);
if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
- dev_warn(&device->dev, "CPPC data invalid or not present\n");
+ dev_dbg(&device->dev, "CPPC data invalid or not present\n");
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5c8aa9cf62d7..2736e25e9dc6 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -48,6 +48,8 @@
#define _COMPONENT ACPI_PROCESSOR_COMPONENT
ACPI_MODULE_NAME("processor_idle");
+#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
+
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
module_param(max_cstate, uint, 0000);
static unsigned int nocst __read_mostly;
@@ -708,8 +710,6 @@ static DEFINE_RAW_SPINLOCK(c3_lock);
static void acpi_idle_enter_bm(struct acpi_processor *pr,
struct acpi_processor_cx *cx, bool timer_bc)
{
- acpi_unlazy_tlb(smp_processor_id());
-
/*
* Must be done before busmaster disable as we might need to
* access HPET !
@@ -761,7 +761,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
if (cx->type != ACPI_STATE_C1) {
if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
- index = CPUIDLE_DRIVER_STATE_START;
+ index = ACPI_IDLE_STATE_START;
cx = per_cpu(acpi_cstate[index], dev->cpu);
} else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
@@ -789,7 +789,7 @@ static int acpi_idle_enter(struct cpuidle_device *dev,
return index;
}
-static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
+static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
@@ -813,7 +813,7 @@ static void acpi_idle_enter_freeze(struct cpuidle_device *dev,
static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
struct cpuidle_device *dev)
{
- int i, count = CPUIDLE_DRIVER_STATE_START;
+ int i, count = ACPI_IDLE_STATE_START;
struct acpi_processor_cx *cx;
if (max_cstate == 0)
@@ -840,7 +840,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
static int acpi_processor_setup_cstates(struct acpi_processor *pr)
{
- int i, count = CPUIDLE_DRIVER_STATE_START;
+ int i, count;
struct acpi_processor_cx *cx;
struct cpuidle_state *state;
struct cpuidle_driver *drv = &acpi_idle_driver;
@@ -848,6 +848,13 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
if (max_cstate == 0)
max_cstate = 1;
+ if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
+ cpuidle_poll_state_init(drv);
+ count = 1;
+ } else {
+ count = 0;
+ }
+
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
cx = &pr->power.states[i];
@@ -867,14 +874,14 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
drv->safe_state_index = count;
}
/*
- * Halt-induced C1 is not good for ->enter_freeze, because it
+ * Halt-induced C1 is not good for ->enter_s2idle, because it
* re-enables interrupts on exit. Moreover, C1 is generally not
* particularly interesting from the suspend-to-idle angle, so
* avoid C1 and the situations in which we may need to fall back
* to it altogether.
*/
if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
- state->enter_freeze = acpi_idle_enter_freeze;
+ state->enter_s2idle = acpi_idle_enter_s2idle;
count++;
if (count == CPUIDLE_STATE_MAX)
@@ -1291,7 +1298,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
return -EINVAL;
drv->safe_state_index = -1;
- for (i = CPUIDLE_DRIVER_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
+ for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
drv->states[i].name[0] = '\0';
drv->states[i].desc[0] = '\0';
}
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c
index 7cfbda4d7c51..74f738cb6073 100644
--- a/drivers/acpi/processor_pdc.c
+++ b/drivers/acpi/processor_pdc.c
@@ -173,7 +173,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
+static const struct dmi_system_id processor_idle_dmi_table[] __initconst = {
{
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 917c789f953d..c1c216163de3 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -19,21 +19,19 @@
#include "internal.h"
-static int acpi_data_get_property_array(struct acpi_device_data *data,
+static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj);
-/* ACPI _DSD device properties UUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
-static const u8 prp_uuid[16] = {
- 0x14, 0xd8, 0xff, 0xda, 0xba, 0x6e, 0x8c, 0x4d,
- 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01
-};
-/* ACPI _DSD data subnodes UUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
-static const u8 ads_uuid[16] = {
- 0xe6, 0xe3, 0xb8, 0xdb, 0x86, 0x58, 0xa6, 0x4b,
- 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b
-};
+/* ACPI _DSD device properties GUID: daffd814-6eba-4d8c-8a91-bc9bbf4aa301 */
+static const guid_t prp_guid =
+ GUID_INIT(0xdaffd814, 0x6eba, 0x4d8c,
+ 0x8a, 0x91, 0xbc, 0x9b, 0xbf, 0x4a, 0xa3, 0x01);
+/* ACPI _DSD data subnodes GUID: dbb8e3e6-5886-4ba6-8795-1319f52a966b */
+static const guid_t ads_guid =
+ GUID_INIT(0xdbb8e3e6, 0x5886, 0x4ba6,
+ 0x87, 0x95, 0x13, 0x19, 0xf5, 0x2a, 0x96, 0x6b);
static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
const union acpi_object *desc,
@@ -56,8 +54,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc,
return false;
dn->name = link->package.elements[0].string.pointer;
- dn->fwnode.type = FWNODE_ACPI_DATA;
- dn->fwnode.ops = &acpi_fwnode_ops;
+ dn->fwnode.ops = &acpi_data_fwnode_ops;
dn->parent = parent;
INIT_LIST_HEAD(&dn->data.subnodes);
@@ -190,22 +187,23 @@ static bool acpi_enumerate_nondev_subnodes(acpi_handle scope,
{
int i;
- /* Look for the ACPI data subnodes UUID. */
+ /* Look for the ACPI data subnodes GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *uuid, *links;
+ const union acpi_object *guid, *links;
- uuid = &desc->package.elements[i];
+ guid = &desc->package.elements[i];
links = &desc->package.elements[i + 1];
/*
- * The first element must be a UUID and the second one must be
+ * The first element must be a GUID and the second one must be
* a package.
*/
- if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
- || links->type != ACPI_TYPE_PACKAGE)
+ if (guid->type != ACPI_TYPE_BUFFER ||
+ guid->buffer.length != 16 ||
+ links->type != ACPI_TYPE_PACKAGE)
break;
- if (memcmp(uuid->buffer.pointer, ads_uuid, sizeof(ads_uuid)))
+ if (!guid_equal((guid_t *)guid->buffer.pointer, &ads_guid))
continue;
return acpi_add_nondev_subnodes(scope, links, &data->subnodes,
@@ -298,26 +296,27 @@ static bool acpi_extract_properties(const union acpi_object *desc,
if (desc->package.count % 2)
return false;
- /* Look for the device properties UUID. */
+ /* Look for the device properties GUID. */
for (i = 0; i < desc->package.count; i += 2) {
- const union acpi_object *uuid, *properties;
+ const union acpi_object *guid, *properties;
- uuid = &desc->package.elements[i];
+ guid = &desc->package.elements[i];
properties = &desc->package.elements[i + 1];
/*
- * The first element must be a UUID and the second one must be
+ * The first element must be a GUID and the second one must be
* a package.
*/
- if (uuid->type != ACPI_TYPE_BUFFER || uuid->buffer.length != 16
- || properties->type != ACPI_TYPE_PACKAGE)
+ if (guid->type != ACPI_TYPE_BUFFER ||
+ guid->buffer.length != 16 ||
+ properties->type != ACPI_TYPE_PACKAGE)
break;
- if (memcmp(uuid->buffer.pointer, prp_uuid, sizeof(prp_uuid)))
+ if (!guid_equal((guid_t *)guid->buffer.pointer, &prp_guid))
continue;
/*
- * We found the matching UUID. Now validate the format of the
+ * We found the matching GUID. Now validate the format of the
* package immediately following it.
*/
if (!acpi_properties_format_valid(properties))
@@ -339,6 +338,9 @@ void acpi_init_properties(struct acpi_device *adev)
INIT_LIST_HEAD(&adev->data.subnodes);
+ if (!adev->handle)
+ return;
+
/*
* Check if ACPI_DT_NAMESPACE_HID is present and inthat case we fill in
* Device Tree compatible properties for this device.
@@ -373,6 +375,9 @@ void acpi_init_properties(struct acpi_device *adev)
if (acpi_of && !adev->flags.of_compatible_ok)
acpi_handle_info(adev->handle,
ACPI_DT_NAMESPACE_HID " requires 'compatible' property\n");
+
+ if (!adev->data.pointer)
+ acpi_extract_apple_properties(adev);
}
static void acpi_destroy_nondev_subnodes(struct list_head *list)
@@ -418,7 +423,7 @@ void acpi_free_properties(struct acpi_device *adev)
* %-EINVAL if the property doesn't exist,
* %-EPROTO if the property value type doesn't match @type.
*/
-static int acpi_data_get_property(struct acpi_device_data *data,
+static int acpi_data_get_property(const struct acpi_device_data *data,
const char *name, acpi_object_type type,
const union acpi_object **obj)
{
@@ -460,20 +465,21 @@ static int acpi_data_get_property(struct acpi_device_data *data,
* @type: Expected property type.
* @obj: Location to store the property value (if not %NULL).
*/
-int acpi_dev_get_property(struct acpi_device *adev, const char *name,
+int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj)
{
return adev ? acpi_data_get_property(&adev->data, name, type, obj) : -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_dev_get_property);
-static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *fwnode)
+static const struct acpi_device_data *
+acpi_device_data_of_node(const struct fwnode_handle *fwnode)
{
- if (fwnode->type == FWNODE_ACPI) {
- struct acpi_device *adev = to_acpi_device_node(fwnode);
+ if (is_acpi_device_node(fwnode)) {
+ const struct acpi_device *adev = to_acpi_device_node(fwnode);
return &adev->data;
- } else if (fwnode->type == FWNODE_ACPI_DATA) {
- struct acpi_data_node *dn = to_acpi_data_node(fwnode);
+ } else if (is_acpi_data_node(fwnode)) {
+ const struct acpi_data_node *dn = to_acpi_data_node(fwnode);
return &dn->data;
}
return NULL;
@@ -485,8 +491,8 @@ static struct acpi_device_data *acpi_device_data_of_node(struct fwnode_handle *f
* @propname: Name of the property.
* @valptr: Location to store a pointer to the property value (if not %NULL).
*/
-int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
- void **valptr)
+int acpi_node_prop_get(const struct fwnode_handle *fwnode,
+ const char *propname, void **valptr)
{
return acpi_data_get_property(acpi_device_data_of_node(fwnode),
propname, ACPI_TYPE_ANY,
@@ -512,7 +518,7 @@ int acpi_node_prop_get(struct fwnode_handle *fwnode, const char *propname,
* %-EPROTO if the property is not a package or the type of its elements
* doesn't match @type.
*/
-static int acpi_data_get_property_array(struct acpi_device_data *data,
+static int acpi_data_get_property_array(const struct acpi_device_data *data,
const char *name,
acpi_object_type type,
const union acpi_object **obj)
@@ -572,13 +578,13 @@ static int acpi_data_get_property_array(struct acpi_device_data *data,
*
* Return: %0 on success, negative error code on failure.
*/
-int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
+int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *propname, size_t index, size_t num_args,
struct acpi_reference_args *args)
{
const union acpi_object *element, *end;
const union acpi_object *obj;
- struct acpi_device_data *data;
+ const struct acpi_device_data *data;
struct acpi_device *device;
int ret, idx = 0;
@@ -674,7 +680,7 @@ int __acpi_node_get_property_reference(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference);
-static int acpi_data_prop_read_single(struct acpi_device_data *data,
+static int acpi_data_prop_read_single(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype, void *val)
{
@@ -813,7 +819,7 @@ static int acpi_copy_property_array_string(const union acpi_object *items,
return nval;
}
-static int acpi_data_prop_read(struct acpi_device_data *data,
+static int acpi_data_prop_read(const struct acpi_device_data *data,
const char *propname,
enum dev_prop_type proptype,
void *val, size_t nval)
@@ -867,7 +873,7 @@ static int acpi_data_prop_read(struct acpi_device_data *data,
return ret;
}
-int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
+int acpi_dev_prop_read(const struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval)
{
return adev ? acpi_data_prop_read(&adev->data, propname, proptype, val, nval) : -EINVAL;
@@ -885,8 +891,9 @@ int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
* of the property. Otherwise, read at most @nval values to the array at the
* location pointed to by @val.
*/
-int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
- enum dev_prop_type proptype, void *val, size_t nval)
+int acpi_node_prop_read(const struct fwnode_handle *fwnode,
+ const char *propname, enum dev_prop_type proptype,
+ void *val, size_t nval)
{
return acpi_data_prop_read(acpi_device_data_of_node(fwnode),
propname, proptype, val, nval);
@@ -897,13 +904,15 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the device's child nodes or a null handle.
*/
-struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child)
{
- struct acpi_device *adev = to_acpi_device_node(fwnode);
- struct list_head *head, *next;
+ const struct acpi_device *adev = to_acpi_device_node(fwnode);
+ struct acpi_device *child_adev = NULL;
+ const struct list_head *head;
+ struct list_head *next;
- if (!child || child->type == FWNODE_ACPI) {
+ if (!child || is_acpi_device_node(child)) {
if (adev)
head = &adev->children;
else
@@ -913,26 +922,27 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
goto nondev;
if (child) {
- adev = to_acpi_device_node(child);
- next = adev->node.next;
+ child_adev = to_acpi_device_node(child);
+ next = child_adev->node.next;
if (next == head) {
child = NULL;
goto nondev;
}
- adev = list_entry(next, struct acpi_device, node);
+ child_adev = list_entry(next, struct acpi_device, node);
} else {
- adev = list_first_entry(head, struct acpi_device, node);
+ child_adev = list_first_entry(head, struct acpi_device,
+ node);
}
- return acpi_fwnode_handle(adev);
+ return acpi_fwnode_handle(child_adev);
}
nondev:
- if (!child || child->type == FWNODE_ACPI_DATA) {
- struct acpi_data_node *data = to_acpi_data_node(fwnode);
+ if (!child || is_acpi_data_node(child)) {
+ const struct acpi_data_node *data = to_acpi_data_node(fwnode);
struct acpi_data_node *dn;
- if (adev)
- head = &adev->data.subnodes;
+ if (child_adev)
+ head = &child_adev->data.subnodes;
else if (data)
head = &data->data.subnodes;
else
@@ -963,7 +973,7 @@ struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
* Returns parent node of an ACPI device or data firmware node or %NULL if
* not available.
*/
-struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode)
+struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
{
if (is_acpi_data_node(fwnode)) {
/* All data nodes have parent pointer so just return that */
@@ -992,8 +1002,8 @@ struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode)
* %NULL if there is no next endpoint, ERR_PTR() in case of error. In case
* of success the next endpoint is returned.
*/
-struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
- struct fwnode_handle *prev)
+struct fwnode_handle *acpi_graph_get_next_endpoint(
+ const struct fwnode_handle *fwnode, struct fwnode_handle *prev)
{
struct fwnode_handle *port = NULL;
struct fwnode_handle *endpoint;
@@ -1040,14 +1050,15 @@ struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
* the child node on success, NULL otherwise.
*/
static struct fwnode_handle *acpi_graph_get_child_prop_value(
- struct fwnode_handle *fwnode, const char *prop_name, unsigned int val)
+ const struct fwnode_handle *fwnode, const char *prop_name,
+ unsigned int val)
{
struct fwnode_handle *child;
fwnode_for_each_child_node(fwnode, child) {
u32 nr;
- if (!fwnode_property_read_u32(fwnode, prop_name, &nr))
+ if (fwnode_property_read_u32(child, prop_name, &nr))
continue;
if (val == nr)
@@ -1069,17 +1080,18 @@ static struct fwnode_handle *acpi_graph_get_child_prop_value(
* fields requested by the caller. Returns %0 in case of success and
* negative errno otherwise.
*/
-int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+int acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode,
struct fwnode_handle **parent,
struct fwnode_handle **port,
struct fwnode_handle **endpoint)
{
+ struct fwnode_handle *fwnode;
unsigned int port_nr, endpoint_nr;
struct acpi_reference_args args;
int ret;
memset(&args, 0, sizeof(args));
- ret = acpi_node_get_property_reference(fwnode, "remote-endpoint", 0,
+ ret = acpi_node_get_property_reference(__fwnode, "remote-endpoint", 0,
&args);
if (ret)
return ret;
@@ -1121,7 +1133,7 @@ int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
return 0;
}
-static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode)
+static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
if (!is_acpi_device_node(fwnode))
return false;
@@ -1129,16 +1141,17 @@ static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode)
return acpi_device_is_present(to_acpi_device_node(fwnode));
}
-static bool acpi_fwnode_property_present(struct fwnode_handle *fwnode,
+static bool acpi_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !acpi_node_prop_get(fwnode, propname, NULL);
}
-static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode,
- const char *propname,
- unsigned int elem_size,
- void *val, size_t nval)
+static int
+acpi_fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ unsigned int elem_size, void *val,
+ size_t nval)
{
enum dev_prop_type type;
@@ -1162,16 +1175,17 @@ static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode,
return acpi_node_prop_read(fwnode, propname, type, val, nval);
}
-static int acpi_fwnode_property_read_string_array(struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
+static int
+acpi_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname, const char **val,
+ size_t nval)
{
return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING,
val, nval);
}
static struct fwnode_handle *
-acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+acpi_fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
const char *childname)
{
struct fwnode_handle *child;
@@ -1187,8 +1201,34 @@ acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode,
return NULL;
}
+static int
+acpi_fwnode_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int args_count, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ struct acpi_reference_args acpi_args;
+ unsigned int i;
+ int ret;
+
+ ret = __acpi_node_get_property_reference(fwnode, prop, index,
+ args_count, &acpi_args);
+ if (ret < 0)
+ return ret;
+ if (!args)
+ return 0;
+
+ args->nargs = acpi_args.nargs;
+ args->fwnode = acpi_fwnode_handle(acpi_args.adev);
+
+ for (i = 0; i < NR_FWNODE_REFERENCE_ARGS; i++)
+ args->args[i] = i < acpi_args.nargs ? acpi_args.args[i] : 0;
+
+ return 0;
+}
+
static struct fwnode_handle *
-acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+acpi_fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
struct fwnode_handle *endpoint;
@@ -1201,7 +1241,7 @@ acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
}
static struct fwnode_handle *
-acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
+acpi_fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint = NULL;
@@ -1210,7 +1250,13 @@ acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
return endpoint;
}
-static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+static struct fwnode_handle *
+acpi_fwnode_get_parent(struct fwnode_handle *fwnode)
+{
+ return acpi_node_get_parent(fwnode);
+}
+
+static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode);
@@ -1223,16 +1269,27 @@ static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
return 0;
}
-const struct fwnode_operations acpi_fwnode_ops = {
- .device_is_available = acpi_fwnode_device_is_available,
- .property_present = acpi_fwnode_property_present,
- .property_read_int_array = acpi_fwnode_property_read_int_array,
- .property_read_string_array = acpi_fwnode_property_read_string_array,
- .get_parent = acpi_node_get_parent,
- .get_next_child_node = acpi_get_next_subnode,
- .get_named_child_node = acpi_fwnode_get_named_child_node,
- .graph_get_next_endpoint = acpi_fwnode_graph_get_next_endpoint,
- .graph_get_remote_endpoint = acpi_fwnode_graph_get_remote_endpoint,
- .graph_get_port_parent = acpi_node_get_parent,
- .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint,
-};
+#define DECLARE_ACPI_FWNODE_OPS(ops) \
+ const struct fwnode_operations ops = { \
+ .device_is_available = acpi_fwnode_device_is_available, \
+ .property_present = acpi_fwnode_property_present, \
+ .property_read_int_array = \
+ acpi_fwnode_property_read_int_array, \
+ .property_read_string_array = \
+ acpi_fwnode_property_read_string_array, \
+ .get_parent = acpi_node_get_parent, \
+ .get_next_child_node = acpi_get_next_subnode, \
+ .get_named_child_node = acpi_fwnode_get_named_child_node, \
+ .get_reference_args = acpi_fwnode_get_reference_args, \
+ .graph_get_next_endpoint = \
+ acpi_fwnode_graph_get_next_endpoint, \
+ .graph_get_remote_endpoint = \
+ acpi_fwnode_graph_get_remote_endpoint, \
+ .graph_get_port_parent = acpi_fwnode_get_parent, \
+ .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, \
+ }; \
+ EXPORT_SYMBOL_GPL(ops)
+
+DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
+DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
+const struct fwnode_operations acpi_static_fwnode_ops;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index cd4c4271dc4c..d85e010ee2cc 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,6 +573,35 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares,
return AE_OK;
}
+static int __acpi_dev_get_resources(struct acpi_device *adev,
+ struct list_head *list,
+ int (*preproc)(struct acpi_resource *, void *),
+ void *preproc_data, char *method)
+{
+ struct res_proc_context c;
+ acpi_status status;
+
+ if (!adev || !adev->handle || !list_empty(list))
+ return -EINVAL;
+
+ if (!acpi_has_method(adev->handle, method))
+ return 0;
+
+ c.list = list;
+ c.preproc = preproc;
+ c.preproc_data = preproc_data;
+ c.count = 0;
+ c.error = 0;
+ status = acpi_walk_resources(adev->handle, method,
+ acpi_dev_process_resource, &c);
+ if (ACPI_FAILURE(status)) {
+ acpi_dev_free_resource_list(list);
+ return c.error ? c.error : -EIO;
+ }
+
+ return c.count;
+}
+
/**
* acpi_dev_get_resources - Get current resources of a device.
* @adev: ACPI device node to get the resources for.
@@ -601,30 +630,45 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
int (*preproc)(struct acpi_resource *, void *),
void *preproc_data)
{
- struct res_proc_context c;
- acpi_status status;
+ return __acpi_dev_get_resources(adev, list, preproc, preproc_data,
+ METHOD_NAME__CRS);
+}
+EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
- if (!adev || !adev->handle || !list_empty(list))
- return -EINVAL;
+static int is_memory(struct acpi_resource *ares, void *not_used)
+{
+ struct resource_win win;
+ struct resource *res = &win.res;
- if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
- return 0;
+ memset(&win, 0, sizeof(win));
- c.list = list;
- c.preproc = preproc;
- c.preproc_data = preproc_data;
- c.count = 0;
- c.error = 0;
- status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
- acpi_dev_process_resource, &c);
- if (ACPI_FAILURE(status)) {
- acpi_dev_free_resource_list(list);
- return c.error ? c.error : -EIO;
- }
+ return !(acpi_dev_resource_memory(ares, res)
+ || acpi_dev_resource_address_space(ares, &win)
+ || acpi_dev_resource_ext_address_space(ares, &win));
+}
- return c.count;
+/**
+ * acpi_dev_get_dma_resources - Get current DMA resources of a device.
+ * @adev: ACPI device node to get the resources for.
+ * @list: Head of the resultant list of resources (must be empty).
+ *
+ * Evaluate the _DMA method for the given device node and process its
+ * output.
+ *
+ * The resultant struct resource objects are put on the list pointed to
+ * by @list, that must be empty initially, as members of struct
+ * resource_entry objects. Callers of this routine should use
+ * %acpi_dev_free_resource_list() to free that list.
+ *
+ * The number of resources in the output list is returned on success,
+ * an error code reflecting the error condition is returned otherwise.
+ */
+int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
+{
+ return __acpi_dev_get_resources(adev, list, is_memory, NULL,
+ METHOD_NAME__DMA);
}
-EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
+EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index ad0b13ad4bbb..a2428e9462dd 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -31,7 +31,7 @@
#include <linux/jiffies.h>
#include <linux/delay.h>
#include <linux/power_supply.h>
-#include <linux/dmi.h>
+#include <linux/platform_data/x86/apple.h>
#include "sbshc.h"
#include "battery.h"
@@ -58,8 +58,6 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-static bool sbs_manager_broken;
-
#define MAX_SBS_BAT 4
#define ACPI_SBS_BLOCK_MAX 32
@@ -476,7 +474,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
return count;
}
-static struct device_attribute alarm_attr = {
+static const struct device_attribute alarm_attr = {
.attr = {.name = "alarm", .mode = 0644},
.show = acpi_battery_alarm_show,
.store = acpi_battery_alarm_store,
@@ -632,31 +630,12 @@ static void acpi_sbs_callback(void *context)
}
}
-static int disable_sbs_manager(const struct dmi_system_id *d)
-{
- sbs_manager_broken = true;
- return 0;
-}
-
-static struct dmi_system_id acpi_sbs_dmi_table[] = {
- {
- .callback = disable_sbs_manager,
- .ident = "Apple",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc.")
- },
- },
- { },
-};
-
static int acpi_sbs_add(struct acpi_device *device)
{
struct acpi_sbs *sbs;
int result = 0;
int id;
- dmi_check_system(acpi_sbs_dmi_table);
-
sbs = kzalloc(sizeof(struct acpi_sbs), GFP_KERNEL);
if (!sbs) {
result = -ENOMEM;
@@ -677,7 +656,7 @@ static int acpi_sbs_add(struct acpi_device *device)
result = 0;
- if (!sbs_manager_broken) {
+ if (!x86_apple_machine) {
result = acpi_manager_get_info(sbs);
if (!result) {
sbs->manager_present = 1;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 33897298f03e..602f8ff212f2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <linux/nls.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/x86/apple.h>
#include <asm/pgtable.h>
@@ -1360,6 +1361,85 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
}
/**
+ * acpi_dma_get_range() - Get device DMA parameters.
+ *
+ * @dev: device to configure
+ * @dma_addr: pointer device DMA address result
+ * @offset: pointer to the DMA offset result
+ * @size: pointer to DMA range size result
+ *
+ * Evaluate DMA regions and return respectively DMA region start, offset
+ * and size in dma_addr, offset and size on parsing success; it does not
+ * update the passed in values on failure.
+ *
+ * Return 0 on success, < 0 on failure.
+ */
+int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
+ u64 *size)
+{
+ struct acpi_device *adev;
+ LIST_HEAD(list);
+ struct resource_entry *rentry;
+ int ret;
+ struct device *dma_dev = dev;
+ u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
+
+ /*
+ * Walk the device tree chasing an ACPI companion with a _DMA
+ * object while we go. Stop if we find a device with an ACPI
+ * companion containing a _DMA method.
+ */
+ do {
+ adev = ACPI_COMPANION(dma_dev);
+ if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
+ break;
+
+ dma_dev = dma_dev->parent;
+ } while (dma_dev);
+
+ if (!dma_dev)
+ return -ENODEV;
+
+ if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
+ acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
+ return -EINVAL;
+ }
+
+ ret = acpi_dev_get_dma_resources(adev, &list);
+ if (ret > 0) {
+ list_for_each_entry(rentry, &list, node) {
+ if (dma_offset && rentry->offset != dma_offset) {
+ ret = -EINVAL;
+ dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
+ goto out;
+ }
+ dma_offset = rentry->offset;
+
+ /* Take lower and upper limits */
+ if (rentry->res->start < dma_start)
+ dma_start = rentry->res->start;
+ if (rentry->res->end > dma_end)
+ dma_end = rentry->res->end;
+ }
+
+ if (dma_start >= dma_end) {
+ ret = -EINVAL;
+ dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
+ goto out;
+ }
+
+ *dma_addr = dma_start - dma_offset;
+ len = dma_end - dma_start;
+ *size = max(len, len + 1);
+ *offset = dma_offset;
+ }
+ out:
+ acpi_dev_free_resource_list(&list);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+/**
* acpi_dma_configure - Set-up DMA configuration for the device.
* @dev: The pointer to the device
* @attr: device dma attributes
@@ -1367,20 +1447,16 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
{
const struct iommu_ops *iommu;
- u64 size;
+ u64 dma_addr = 0, size = 0;
- iort_set_dma_mask(dev);
+ iort_dma_setup(dev, &dma_addr, &size);
iommu = iort_iommu_configure(dev);
if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER;
- size = max(dev->coherent_dma_mask, dev->coherent_dma_mask + 1);
- /*
- * Assume dma valid range starts at 0 and covers the whole
- * coherent_dma_mask.
- */
- arch_setup_dma_ops(dev, 0, size, iommu, attr == DEV_DMA_COHERENT);
+ arch_setup_dma_ops(dev, dma_addr, size,
+ iommu, attr == DEV_DMA_COHERENT);
return 0;
}
@@ -1452,6 +1528,12 @@ static bool acpi_is_spi_i2c_slave(struct acpi_device *device)
struct list_head resource_list;
bool is_spi_i2c_slave = false;
+ /* Macs use device properties in lieu of _CRS resources */
+ if (x86_apple_machine &&
+ (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
+ fwnode_property_present(&device->fwnode, "i2cAddress")))
+ return true;
+
INIT_LIST_HEAD(&resource_list);
acpi_dev_get_resources(device, &resource_list, acpi_check_spi_i2c_slave,
&is_spi_i2c_slave);
@@ -1467,8 +1549,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device->device_type = type;
device->handle = handle;
device->parent = acpi_bus_get_parent(handle);
- device->fwnode.type = FWNODE_ACPI;
- device->fwnode.ops = &acpi_fwnode_ops;
+ device->fwnode.ops = &acpi_device_fwnode_ops;
acpi_set_device_status(device, sta);
acpi_device_get_busid(device);
acpi_set_pnp_ids(handle, &device->pnp, type);
@@ -2058,6 +2139,9 @@ int __init acpi_scan_init(void)
acpi_get_spcr_uart_addr();
}
+ acpi_gpe_apply_masked_gpes();
+ acpi_update_all_gpes();
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
@@ -2082,10 +2166,6 @@ int __init acpi_scan_init(void)
}
}
- acpi_gpe_apply_masked_gpes();
- acpi_update_all_gpes();
- acpi_ec_ecdt_start();
-
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fa8243c5c062..6804ddab3052 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -160,7 +160,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
-static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
+static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
{
.callback = init_old_suspend_ordering,
.ident = "Abit KN9 (nForce4 variant)",
@@ -669,6 +669,7 @@ static const struct acpi_device_id lps0_device_ids[] = {
#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
#define ACPI_LPS0_SCREEN_OFF 3
#define ACPI_LPS0_SCREEN_ON 4
#define ACPI_LPS0_ENTRY 5
@@ -680,6 +681,166 @@ static acpi_handle lps0_device_handle;
static guid_t lps0_dsm_guid;
static char lps0_dsm_func_mask;
+/* Device constraint entry structure */
+struct lpi_device_info {
+ char *name;
+ int enabled;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint {
+ int uid;
+ int min_dstate;
+ int function_states;
+};
+
+struct lpi_constraints {
+ acpi_handle handle;
+ int min_dstate;
+};
+
+static struct lpi_constraints *lpi_constraints_table;
+static int lpi_constraints_table_size;
+
+static void lpi_device_get_constraints(void)
+{
+ union acpi_object *out_obj;
+ int i;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ if (!out_obj)
+ return;
+
+ lpi_constraints_table = kcalloc(out_obj->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ struct lpi_constraints *constraint;
+ acpi_status status;
+ union acpi_object *package = &out_obj->package.elements[i];
+ struct lpi_device_info info = { };
+ int package_count = 0, j;
+
+ if (!package)
+ continue;
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *element =
+ &(package->package.elements[j]);
+
+ switch (element->type) {
+ case ACPI_TYPE_INTEGER:
+ info.enabled = element->integer.value;
+ break;
+ case ACPI_TYPE_STRING:
+ info.name = element->string.pointer;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ package_count = element->package.count;
+ info.package = element->package.elements;
+ break;
+ }
+ }
+
+ if (!info.enabled || !info.package || !info.name)
+ continue;
+
+ constraint = &lpi_constraints_table[lpi_constraints_table_size];
+
+ status = acpi_get_handle(NULL, info.name, &constraint->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "index:%d Name:%s\n", i, info.name);
+
+ constraint->min_dstate = -1;
+
+ for (j = 0; j < package_count; ++j) {
+ union acpi_object *info_obj = &info.package[j];
+ union acpi_object *cnstr_pkg;
+ union acpi_object *obj;
+ struct lpi_device_constraint dev_info;
+
+ switch (info_obj->type) {
+ case ACPI_TYPE_INTEGER:
+ /* version */
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (info_obj->package.count < 2)
+ break;
+
+ cnstr_pkg = info_obj->package.elements;
+ obj = &cnstr_pkg[0];
+ dev_info.uid = obj->integer.value;
+ obj = &cnstr_pkg[1];
+ dev_info.min_dstate = obj->integer.value;
+
+ acpi_handle_debug(lps0_device_handle,
+ "uid:%d min_dstate:%s\n",
+ dev_info.uid,
+ acpi_power_state_string(dev_info.min_dstate));
+
+ constraint->min_dstate = dev_info.min_dstate;
+ break;
+ }
+ }
+
+ if (constraint->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+
+ lpi_constraints_table_size++;
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_check_constraints(void)
+{
+ int i;
+
+ for (i = 0; i < lpi_constraints_table_size; ++i) {
+ struct acpi_device *adev;
+
+ if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev))
+ continue;
+
+ acpi_handle_debug(adev->handle,
+ "LPI: required min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+
+ if (!adev->flags.power_manageable) {
+ acpi_handle_info(adev->handle, "LPI: Device not power manageble\n");
+ continue;
+ }
+
+ if (adev->power.state < lpi_constraints_table[i].min_dstate)
+ acpi_handle_info(adev->handle,
+ "LPI: Constraint not met; min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+ }
+}
+
static void acpi_sleep_run_lps0_dsm(unsigned int func)
{
union acpi_object *out_obj;
@@ -714,6 +875,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) {
lps0_dsm_func_mask = bitmask;
lps0_device_handle = adev->handle;
+ /*
+ * Use suspend-to-idle by default if the default
+ * suspend mode was not set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
}
acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
@@ -723,6 +890,9 @@ static int lps0_device_attach(struct acpi_device *adev,
"_DSM function 0 evaluation failed\n");
}
ACPI_FREE(out_obj);
+
+ lpi_device_get_constraints();
+
return 0;
}
@@ -731,14 +901,14 @@ static struct acpi_scan_handler lps0_handler = {
.attach = lps0_device_attach,
};
-static int acpi_freeze_begin(void)
+static int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
s2idle_in_progress = true;
return 0;
}
-static int acpi_freeze_prepare(void)
+static int acpi_s2idle_prepare(void)
{
if (lps0_device_handle) {
acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
@@ -758,8 +928,12 @@ static int acpi_freeze_prepare(void)
return 0;
}
-static void acpi_freeze_wake(void)
+static void acpi_s2idle_wake(void)
{
+
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
/*
* If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means
* that the SCI has triggered while suspended, so cancel the wakeup in
@@ -772,7 +946,7 @@ static void acpi_freeze_wake(void)
}
}
-static void acpi_freeze_sync(void)
+static void acpi_s2idle_sync(void)
{
/*
* Process all pending events in case there are any wakeup ones.
@@ -785,7 +959,7 @@ static void acpi_freeze_sync(void)
s2idle_wakeup = false;
}
-static void acpi_freeze_restore(void)
+static void acpi_s2idle_restore(void)
{
if (acpi_sci_irq_valid())
disable_irq_wake(acpi_sci_irq);
@@ -798,19 +972,19 @@ static void acpi_freeze_restore(void)
}
}
-static void acpi_freeze_end(void)
+static void acpi_s2idle_end(void)
{
s2idle_in_progress = false;
acpi_scan_lock_release();
}
-static const struct platform_freeze_ops acpi_freeze_ops = {
- .begin = acpi_freeze_begin,
- .prepare = acpi_freeze_prepare,
- .wake = acpi_freeze_wake,
- .sync = acpi_freeze_sync,
- .restore = acpi_freeze_restore,
- .end = acpi_freeze_end,
+static const struct platform_s2idle_ops acpi_s2idle_ops = {
+ .begin = acpi_s2idle_begin,
+ .prepare = acpi_s2idle_prepare,
+ .wake = acpi_s2idle_wake,
+ .sync = acpi_s2idle_sync,
+ .restore = acpi_s2idle_restore,
+ .end = acpi_s2idle_end,
};
static void acpi_sleep_suspend_setup(void)
@@ -825,7 +999,7 @@ static void acpi_sleep_suspend_setup(void)
&acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_scan_add_handler(&lps0_handler);
- freeze_set_ops(&acpi_freeze_ops);
+ s2idle_set_ops(&acpi_s2idle_ops);
}
#else /* !CONFIG_SUSPEND */
@@ -870,7 +1044,7 @@ static struct syscore_ops acpi_sleep_syscore_ops = {
.resume = acpi_restore_bm_rld,
};
-void acpi_sleep_syscore_init(void)
+static void acpi_sleep_syscore_init(void)
{
register_syscore_ops(&acpi_sleep_syscore_ops);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 98aa8c808a33..324b35bfe781 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -53,17 +53,24 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h)
*/
static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb)
{
+ bool xgene_8250 = false;
+
if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE)
return false;
- if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE))
+ if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE) &&
+ memcmp(tb->header.oem_id, "HPE ", ACPI_OEM_ID_SIZE))
return false;
if (!memcmp(tb->header.oem_table_id, "XGENESPC",
ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0)
- return true;
+ xgene_8250 = true;
- return false;
+ if (!memcmp(tb->header.oem_table_id, "ProLiant",
+ ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 1)
+ xgene_8250 = true;
+
+ return xgene_8250;
}
/**
@@ -105,16 +112,17 @@ int __init parse_spcr(bool earlycon)
}
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- switch (table->serial_port.access_width) {
+ switch (ACPI_ACCESS_BIT_WIDTH((
+ table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
- case ACPI_ACCESS_SIZE_BYTE:
+ case 8:
iotype = "mmio";
break;
- case ACPI_ACCESS_SIZE_WORD:
+ case 16:
iotype = "mmio16";
break;
- case ACPI_ACCESS_SIZE_DWORD:
+ case 32:
iotype = "mmio32";
break;
}
@@ -181,11 +189,19 @@ int __init parse_spcr(bool earlycon)
uart = "qdf2400_e44";
}
- if (xgene_8250_erratum_present(table))
+ if (xgene_8250_erratum_present(table)) {
iotype = "mmio32";
- snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
- table->serial_port.address, baud_rate);
+ /* for xgene v1 and v2 we don't know the clock rate of the
+ * UART so don't attempt to change to the baud rate state
+ * in the table because driver cannot calculate the dividers
+ */
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
+ table->serial_port.address);
+ } else {
+ snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype,
+ table->serial_port.address, baud_rate);
+ }
pr_info("console: %s\n", opts);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index e414fabf7315..78a5a23010ab 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -2,6 +2,8 @@
* sysfs.c - ACPI sysfs interface to userspace.
*/
+#define pr_fmt(fmt) "ACPI: " fmt
+
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
@@ -306,11 +308,13 @@ module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
/*
* ACPI table sysfs I/F:
* /sys/firmware/acpi/tables/
+ * /sys/firmware/acpi/tables/data/
* /sys/firmware/acpi/tables/dynamic/
*/
static LIST_HEAD(acpi_table_attr_list);
static struct kobject *tables_kobj;
+static struct kobject *tables_data_kobj;
static struct kobject *dynamic_tables_kobj;
static struct kobject *hotplug_kobj;
@@ -325,6 +329,11 @@ struct acpi_table_attr {
struct list_head node;
};
+struct acpi_data_attr {
+ struct bin_attribute attr;
+ u64 addr;
+};
+
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr, char *buf,
loff_t offset, size_t count)
@@ -420,6 +429,70 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
return AE_OK;
}
+static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct acpi_data_attr *data_attr;
+ void __iomem *base;
+ ssize_t rc;
+
+ data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
+
+ base = acpi_os_map_memory(data_attr->addr, data_attr->attr.size);
+ if (!base)
+ return -ENOMEM;
+ rc = memory_read_from_buffer(buf, count, &offset, base,
+ data_attr->attr.size);
+ acpi_os_unmap_memory(base, data_attr->attr.size);
+
+ return rc;
+}
+
+static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
+{
+ struct acpi_table_bert *bert = th;
+
+ if (bert->header.length < sizeof(struct acpi_table_bert) ||
+ bert->region_length < sizeof(struct acpi_hest_generic_status)) {
+ kfree(data_attr);
+ return -EINVAL;
+ }
+ data_attr->addr = bert->address;
+ data_attr->attr.size = bert->region_length;
+ data_attr->attr.attr.name = "BERT";
+
+ return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
+}
+
+static struct acpi_data_obj {
+ char *name;
+ int (*fn)(void *, struct acpi_data_attr *);
+} acpi_data_objs[] = {
+ { ACPI_SIG_BERT, acpi_bert_data_init },
+};
+
+#define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
+
+static int acpi_table_data_init(struct acpi_table_header *th)
+{
+ struct acpi_data_attr *data_attr;
+ int i;
+
+ for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
+ if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
+ data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
+ if (!data_attr)
+ return -ENOMEM;
+ sysfs_attr_init(&data_attr->attr.attr);
+ data_attr->attr.read = acpi_data_show;
+ data_attr->attr.attr.mode = 0400;
+ return acpi_data_objs[i].fn(th, data_attr);
+ }
+ }
+ return 0;
+}
+
static int acpi_tables_sysfs_init(void)
{
struct acpi_table_attr *table_attr;
@@ -432,6 +505,10 @@ static int acpi_tables_sysfs_init(void)
if (!tables_kobj)
goto err;
+ tables_data_kobj = kobject_create_and_add("data", tables_kobj);
+ if (!tables_data_kobj)
+ goto err_tables_data;
+
dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
if (!dynamic_tables_kobj)
goto err_dynamic_tables;
@@ -456,13 +533,17 @@ static int acpi_tables_sysfs_init(void)
return ret;
}
list_add_tail(&table_attr->node, &acpi_table_attr_list);
+ acpi_table_data_init(table_header);
}
kobject_uevent(tables_kobj, KOBJ_ADD);
+ kobject_uevent(tables_data_kobj, KOBJ_ADD);
kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
return 0;
err_dynamic_tables:
+ kobject_put(tables_data_kobj);
+err_tables_data:
kobject_put(tables_kobj);
err:
return -ENOMEM;
@@ -552,11 +633,15 @@ static void fixed_event_count(u32 event_number)
static void acpi_global_event_handler(u32 event_type, acpi_handle device,
u32 event_number, void *context)
{
- if (event_type == ACPI_EVENT_TYPE_GPE)
+ if (event_type == ACPI_EVENT_TYPE_GPE) {
gpe_count(event_number);
-
- if (event_type == ACPI_EVENT_TYPE_FIXED)
+ pr_debug("GPE event 0x%02x\n", event_number);
+ } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
fixed_event_count(event_number);
+ pr_debug("Fixed event 0x%02x\n", event_number);
+ } else {
+ pr_debug("Other event 0x%02x\n", event_number);
+ }
}
static int get_status(u32 index, acpi_event_status *status,
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index ff425390bfa8..80ce2a7d224b 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -740,10 +740,10 @@ int __init acpi_table_init(void)
if (acpi_verify_table_checksum) {
pr_info("Early table checksum verification enabled\n");
- acpi_gbl_verify_table_checksum = TRUE;
+ acpi_gbl_enable_table_validation = TRUE;
} else {
pr_info("Early table checksum verification disabled\n");
- acpi_gbl_verify_table_checksum = FALSE;
+ acpi_gbl_enable_table_validation = FALSE;
}
status = acpi_initialize_tables(initial_tables, ACPI_MAX_TABLES, 0);
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 1d0417b87cb7..551b71a24b85 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -1209,7 +1209,7 @@ static int thermal_psv(const struct dmi_system_id *d) {
return 0;
}
-static struct dmi_system_id thermal_dmi_table[] __initdata = {
+static const struct dmi_system_id thermal_dmi_table[] __initconst = {
/*
* Award BIOS on this AOpen makes thermal control almost worthless.
* http://bugzilla.kernel.org/show_bug.cgi?id=8842
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index b9d956c916f5..0a9e5979aaa9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -816,3 +816,39 @@ static int __init acpi_backlight(char *str)
return 1;
}
__setup("acpi_backlight=", acpi_backlight);
+
+/**
+ * acpi_match_platform_list - Check if the system matches with a given list
+ * @plat: pointer to acpi_platform_list table terminated by a NULL entry
+ *
+ * Return the matched index if the system is found in the platform list.
+ * Otherwise, return a negative error code.
+ */
+int acpi_match_platform_list(const struct acpi_platform_list *plat)
+{
+ struct acpi_table_header hdr;
+ int idx = 0;
+
+ if (acpi_disabled)
+ return -ENODEV;
+
+ for (; plat->oem_id[0]; plat++, idx++) {
+ if (ACPI_FAILURE(acpi_get_table_header(plat->table, 0, &hdr)))
+ continue;
+
+ if (strncmp(plat->oem_id, hdr.oem_id, ACPI_OEM_ID_SIZE))
+ continue;
+
+ if (strncmp(plat->oem_table_id, hdr.oem_table_id, ACPI_OEM_TABLE_ID_SIZE))
+ continue;
+
+ if ((plat->pred == all_versions) ||
+ (plat->pred == less_than_or_equal && hdr.oem_revision <= plat->oem_revision) ||
+ (plat->pred == greater_than_or_equal && hdr.oem_revision >= plat->oem_revision) ||
+ (plat->pred == equal && hdr.oem_revision == plat->oem_revision))
+ return idx;
+ }
+
+ return -ENODEV;
+}
+EXPORT_SYMBOL(acpi_match_platform_list);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index d179e8d9177d..601e5d372887 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -103,6 +103,12 @@ static int video_detect_force_native(const struct dmi_system_id *d)
return 0;
}
+static int video_detect_force_none(const struct dmi_system_id *d)
+{
+ acpi_backlight_dmi = acpi_backlight_none;
+ return 0;
+}
+
static const struct dmi_system_id video_detect_dmi_table[] = {
/* On Samsung X360, the BIOS will set a flag (VDRV) if generic
* ACPI backlight device is used. This flag will definitively break
@@ -313,6 +319,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
},
},
+ {
+ .callback = video_detect_force_none,
+ .ident = "Dell OptiPlex 9020M",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 9020M"),
+ },
+ },
{ },
};
diff --git a/drivers/acpi/x86/apple.c b/drivers/acpi/x86/apple.c
new file mode 100644
index 000000000000..51b4cf9f25da
--- /dev/null
+++ b/drivers/acpi/x86/apple.c
@@ -0,0 +1,141 @@
+/*
+ * apple.c - Apple ACPI quirks
+ * Copyright (C) 2017 Lukas Wunner <lukas@wunner.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2) as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitmap.h>
+#include <linux/platform_data/x86/apple.h>
+#include <linux/uuid.h>
+
+/* Apple _DSM device properties GUID */
+static const guid_t apple_prp_guid =
+ GUID_INIT(0xa0b5b7c6, 0x1318, 0x441c,
+ 0xb0, 0xc9, 0xfe, 0x69, 0x5e, 0xaf, 0x94, 0x9b);
+
+/**
+ * acpi_extract_apple_properties - retrieve and convert Apple _DSM properties
+ * @adev: ACPI device for which to retrieve the properties
+ *
+ * Invoke Apple's custom _DSM once to check the protocol version and once more
+ * to retrieve the properties. They are marshalled up in a single package as
+ * alternating key/value elements, unlike _DSD which stores them as a package
+ * of 2-element packages. Convert to _DSD format and make them available under
+ * the primary fwnode.
+ */
+void acpi_extract_apple_properties(struct acpi_device *adev)
+{
+ unsigned int i, j = 0, newsize = 0, numprops, numvalid;
+ union acpi_object *props, *newprops;
+ unsigned long *valid = NULL;
+ void *free_space;
+
+ if (!x86_apple_machine)
+ return;
+
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 0,
+ NULL, ACPI_TYPE_BUFFER);
+ if (!props)
+ return;
+
+ if (!props->buffer.length)
+ goto out_free;
+
+ if (props->buffer.pointer[0] != 3) {
+ acpi_handle_info(adev->handle, FW_INFO
+ "unsupported properties version %*ph\n",
+ props->buffer.length, props->buffer.pointer);
+ goto out_free;
+ }
+
+ ACPI_FREE(props);
+ props = acpi_evaluate_dsm_typed(adev->handle, &apple_prp_guid, 1, 1,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!props)
+ return;
+
+ numprops = props->package.count / 2;
+ if (!numprops)
+ goto out_free;
+
+ valid = kcalloc(BITS_TO_LONGS(numprops), sizeof(long), GFP_KERNEL);
+ if (!valid)
+ goto out_free;
+
+ /* newsize = key length + value length of each tuple */
+ for (i = 0; i < numprops; i++) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+
+ if ( key->type != ACPI_TYPE_STRING ||
+ (val->type != ACPI_TYPE_INTEGER &&
+ val->type != ACPI_TYPE_BUFFER))
+ continue; /* skip invalid properties */
+
+ __set_bit(i, valid);
+ newsize += key->string.length + 1;
+ if ( val->type == ACPI_TYPE_BUFFER)
+ newsize += val->buffer.length;
+ }
+
+ numvalid = bitmap_weight(valid, numprops);
+ if (numprops > numvalid)
+ acpi_handle_info(adev->handle, FW_INFO
+ "skipped %u properties: wrong type\n",
+ numprops - numvalid);
+ if (numvalid == 0)
+ goto out_free;
+
+ /* newsize += top-level package + 3 objects for each key/value tuple */
+ newsize += (1 + 3 * numvalid) * sizeof(union acpi_object);
+ newprops = ACPI_ALLOCATE_ZEROED(newsize);
+ if (!newprops)
+ goto out_free;
+
+ /* layout: top-level package | packages | key/value tuples | strings */
+ newprops->type = ACPI_TYPE_PACKAGE;
+ newprops->package.count = numvalid;
+ newprops->package.elements = &newprops[1];
+ free_space = &newprops[1 + 3 * numvalid];
+
+ for_each_set_bit(i, valid, numprops) {
+ union acpi_object *key = &props->package.elements[i * 2];
+ union acpi_object *val = &props->package.elements[i * 2 + 1];
+ unsigned int k = 1 + numvalid + j * 2; /* index into newprops */
+ unsigned int v = k + 1;
+
+ newprops[1 + j].type = ACPI_TYPE_PACKAGE;
+ newprops[1 + j].package.count = 2;
+ newprops[1 + j].package.elements = &newprops[k];
+
+ newprops[k].type = ACPI_TYPE_STRING;
+ newprops[k].string.length = key->string.length;
+ newprops[k].string.pointer = free_space;
+ memcpy(free_space, key->string.pointer, key->string.length);
+ free_space += key->string.length + 1;
+
+ newprops[v].type = val->type;
+ if (val->type == ACPI_TYPE_INTEGER) {
+ newprops[v].integer.value = val->integer.value;
+ } else {
+ newprops[v].buffer.length = val->buffer.length;
+ newprops[v].buffer.pointer = free_space;
+ memcpy(free_space, val->buffer.pointer,
+ val->buffer.length);
+ free_space += val->buffer.length;
+ }
+ j++; /* count valid properties */
+ }
+ WARN_ON(free_space != (void *)newprops + newsize);
+
+ adev->data.properties = newprops;
+ adev->data.pointer = newprops;
+
+out_free:
+ ACPI_FREE(props);
+ kfree(valid);
+}
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 832e885349b1..9801d852bd56 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@ config ANDROID_BINDER_IPC
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
depends on ANDROID_BINDER_IPC
- default "binder,hwbinder"
+ default "binder,hwbinder,vndbinder"
---help---
Default value for the binder.devices parameter.
@@ -32,7 +32,7 @@ config ANDROID_BINDER_DEVICES
therefore logically separated from the other devices.
config ANDROID_BINDER_IPC_32BIT
- bool
+ bool "Use old (Android 4.4 and earlier) 32-bit binder API"
depends on !64BIT && ANDROID_BINDER_IPC
default y
---help---
@@ -44,6 +44,16 @@ config ANDROID_BINDER_IPC_32BIT
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 3b7e4b072c58..a01254c43ee3 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f7665c31feca..d055b3f2a207 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -15,6 +15,40 @@
*
*/
+/*
+ * Locking overview
+ *
+ * There are 3 main spinlocks which must be acquired in the
+ * order shown:
+ *
+ * 1) proc->outer_lock : protects binder_ref
+ * binder_proc_lock() and binder_proc_unlock() are
+ * used to acq/rel.
+ * 2) node->lock : protects most fields of binder_node.
+ * binder_node_lock() and binder_node_unlock() are
+ * used to acq/rel
+ * 3) proc->inner_lock : protects the thread and node lists
+ * (proc->threads, proc->waiting_threads, proc->nodes)
+ * and all todo lists associated with the binder_proc
+ * (proc->todo, thread->todo, proc->delivered_death and
+ * node->async_todo), as well as thread->transaction_stack
+ * binder_inner_proc_lock() and binder_inner_proc_unlock()
+ * are used to acq/rel
+ *
+ * Any lock under procA must never be nested under any lock at the same
+ * level or below on procB.
+ *
+ * Functions that require a lock held on entry indicate which lock
+ * in the suffix of the function name:
+ *
+ * foo_olocked() : requires node->outer_lock
+ * foo_nlocked() : requires node->lock
+ * foo_ilocked() : requires proc->inner_lock
+ * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
+ * foo_nilocked(): requires node->lock and proc->inner_lock
+ * ...
+ */
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/cacheflush.h>
@@ -24,7 +58,6 @@
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
-#include <linux/mm.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/nsproxy.h>
@@ -35,30 +68,31 @@
#include <linux/sched/mm.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
#include <linux/pid_namespace.h>
#include <linux/security.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
#define BINDER_IPC_32BIT 1
#endif
#include <uapi/linux/android/binder.h>
+#include "binder_alloc.h"
#include "binder_trace.h"
-static DEFINE_MUTEX(binder_main_lock);
+static HLIST_HEAD(binder_deferred_list);
static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
+static DEFINE_MUTEX(binder_procs_lock);
+
static HLIST_HEAD(binder_dead_nodes);
+static DEFINE_SPINLOCK(binder_dead_nodes_lock);
static struct dentry *binder_debugfs_dir_entry_root;
static struct dentry *binder_debugfs_dir_entry_proc;
-static int binder_last_id;
+static atomic_t binder_last_id;
#define BINDER_DEBUG_ENTRY(name) \
static int binder_##name##_open(struct inode *inode, struct file *file) \
@@ -88,8 +122,6 @@ BINDER_DEBUG_ENTRY(proc);
#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
enum {
BINDER_DEBUG_USER_ERROR = 1U << 0,
BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
@@ -104,17 +136,13 @@ enum {
BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
BINDER_DEBUG_FREE_BUFFER = 1U << 11,
BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
- BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
+ BINDER_DEBUG_SPINLOCKS = 1U << 14,
};
static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
module_param_named(devices, binder_devices_param, charp, 0444);
@@ -171,26 +199,27 @@ enum binder_stat_types {
};
struct binder_stats {
- int br[_IOC_NR(BR_FAILED_REPLY) + 1];
- int bc[_IOC_NR(BC_REPLY_SG) + 1];
- int obj_created[BINDER_STAT_COUNT];
- int obj_deleted[BINDER_STAT_COUNT];
+ atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
+ atomic_t obj_created[BINDER_STAT_COUNT];
+ atomic_t obj_deleted[BINDER_STAT_COUNT];
};
static struct binder_stats binder_stats;
static inline void binder_stats_deleted(enum binder_stat_types type)
{
- binder_stats.obj_deleted[type]++;
+ atomic_inc(&binder_stats.obj_deleted[type]);
}
static inline void binder_stats_created(enum binder_stat_types type)
{
- binder_stats.obj_created[type]++;
+ atomic_inc(&binder_stats.obj_created[type]);
}
struct binder_transaction_log_entry {
int debug_id;
+ int debug_id_done;
int call_type;
int from_proc;
int from_thread;
@@ -200,11 +229,14 @@ struct binder_transaction_log_entry {
int to_node;
int data_size;
int offsets_size;
+ int return_error_line;
+ uint32_t return_error;
+ uint32_t return_error_param;
const char *context_name;
};
struct binder_transaction_log {
- int next;
- int full;
+ atomic_t cur;
+ bool full;
struct binder_transaction_log_entry entry[32];
};
static struct binder_transaction_log binder_transaction_log;
@@ -214,19 +246,26 @@ static struct binder_transaction_log_entry *binder_transaction_log_add(
struct binder_transaction_log *log)
{
struct binder_transaction_log_entry *e;
+ unsigned int cur = atomic_inc_return(&log->cur);
- e = &log->entry[log->next];
- memset(e, 0, sizeof(*e));
- log->next++;
- if (log->next == ARRAY_SIZE(log->entry)) {
- log->next = 0;
+ if (cur >= ARRAY_SIZE(log->entry))
log->full = 1;
- }
+ e = &log->entry[cur % ARRAY_SIZE(log->entry)];
+ WRITE_ONCE(e->debug_id_done, 0);
+ /*
+ * write-barrier to synchronize access to e->debug_id_done.
+ * We make sure the initialized 0 value is seen before
+ * memset() other fields are zeroed by memset.
+ */
+ smp_wmb();
+ memset(e, 0, sizeof(*e));
return e;
}
struct binder_context {
struct binder_node *binder_context_mgr_node;
+ struct mutex context_mgr_node_lock;
+
kuid_t binder_context_mgr_uid;
const char *name;
};
@@ -237,11 +276,20 @@ struct binder_device {
struct binder_context context;
};
+/**
+ * struct binder_work - work enqueued on a worklist
+ * @entry: node enqueued on list
+ * @type: type of work to be performed
+ *
+ * There are separate work lists for proc, thread, and node (async).
+ */
struct binder_work {
struct list_head entry;
+
enum {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
@@ -249,8 +297,72 @@ struct binder_work {
} type;
};
+struct binder_error {
+ struct binder_work work;
+ uint32_t cmd;
+};
+
+/**
+ * struct binder_node - binder node bookkeeping
+ * @debug_id: unique ID for debugging
+ * (invariant after initialized)
+ * @lock: lock for node fields
+ * @work: worklist element for node work
+ * (protected by @proc->inner_lock)
+ * @rb_node: element for proc->nodes tree
+ * (protected by @proc->inner_lock)
+ * @dead_node: element for binder_dead_nodes list
+ * (protected by binder_dead_nodes_lock)
+ * @proc: binder_proc that owns this node
+ * (invariant after initialized)
+ * @refs: list of references on this node
+ * (protected by @lock)
+ * @internal_strong_refs: used to take strong references when
+ * initiating a transaction
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_weak_refs: weak user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @local_strong_refs: strong user refs from local process
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @tmp_refs: temporary kernel refs
+ * (protected by @proc->inner_lock while @proc
+ * is valid, and by binder_dead_nodes_lock
+ * if @proc is NULL. During inc/dec and node release
+ * it is also protected by @lock to provide safety
+ * as the node dies and @proc becomes NULL)
+ * @ptr: userspace pointer for node
+ * (invariant, no lock needed)
+ * @cookie: userspace cookie for node
+ * (invariant, no lock needed)
+ * @has_strong_ref: userspace notified of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_strong_ref: userspace has acked notification of strong ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_weak_ref: userspace notified of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @pending_weak_ref: userspace has acked notification of weak ref
+ * (protected by @proc->inner_lock if @proc
+ * and by @lock)
+ * @has_async_transaction: async transaction to node in progress
+ * (protected by @lock)
+ * @accept_fds: file descriptor operations supported for node
+ * (invariant after initialized)
+ * @min_priority: minimum scheduling priority
+ * (invariant after initialized)
+ * @async_todo: list of async work items
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder nodes.
+ */
struct binder_node {
int debug_id;
+ spinlock_t lock;
struct binder_work work;
union {
struct rb_node rb_node;
@@ -261,88 +373,167 @@ struct binder_node {
int internal_strong_refs;
int local_weak_refs;
int local_strong_refs;
+ int tmp_refs;
binder_uintptr_t ptr;
binder_uintptr_t cookie;
- unsigned has_strong_ref:1;
- unsigned pending_strong_ref:1;
- unsigned has_weak_ref:1;
- unsigned pending_weak_ref:1;
- unsigned has_async_transaction:1;
- unsigned accept_fds:1;
- unsigned min_priority:8;
+ struct {
+ /*
+ * bitfield elements protected by
+ * proc inner_lock
+ */
+ u8 has_strong_ref:1;
+ u8 pending_strong_ref:1;
+ u8 has_weak_ref:1;
+ u8 pending_weak_ref:1;
+ };
+ struct {
+ /*
+ * invariant after initialization
+ */
+ u8 accept_fds:1;
+ u8 min_priority;
+ };
+ bool has_async_transaction;
struct list_head async_todo;
};
struct binder_ref_death {
+ /**
+ * @work: worklist element for death notifications
+ * (protected by inner_lock of the proc that
+ * this ref belongs to)
+ */
struct binder_work work;
binder_uintptr_t cookie;
};
+/**
+ * struct binder_ref_data - binder_ref counts and id
+ * @debug_id: unique ID for the ref
+ * @desc: unique userspace handle for ref
+ * @strong: strong ref count (debugging only if not locked)
+ * @weak: weak ref count (debugging only if not locked)
+ *
+ * Structure to hold ref count and ref id information. Since
+ * the actual ref can only be accessed with a lock, this structure
+ * is used to return information about the ref to callers of
+ * ref inc/dec functions.
+ */
+struct binder_ref_data {
+ int debug_id;
+ uint32_t desc;
+ int strong;
+ int weak;
+};
+
+/**
+ * struct binder_ref - struct to track references on nodes
+ * @data: binder_ref_data containing id, handle, and current refcounts
+ * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
+ * @rb_node_node: node for lookup by @node in proc's rb_tree
+ * @node_entry: list entry for node->refs list in target node
+ * (protected by @node->lock)
+ * @proc: binder_proc containing ref
+ * @node: binder_node of target node. When cleaning up a
+ * ref for deletion in binder_cleanup_ref, a non-NULL
+ * @node indicates the node must be freed
+ * @death: pointer to death notification (ref_death) if requested
+ * (protected by @node->lock)
+ *
+ * Structure to track references from procA to target node (on procB). This
+ * structure is unsafe to access without holding @proc->outer_lock.
+ */
struct binder_ref {
/* Lookups needed: */
/* node + proc => ref (transaction) */
/* desc + proc => ref (transaction, inc/dec ref) */
/* node => refs + procs (proc exit) */
- int debug_id;
+ struct binder_ref_data data;
struct rb_node rb_node_desc;
struct rb_node rb_node_node;
struct hlist_node node_entry;
struct binder_proc *proc;
struct binder_node *node;
- uint32_t desc;
- int strong;
- int weak;
struct binder_ref_death *death;
};
-struct binder_buffer {
- struct list_head entry; /* free and allocated entries by address */
- struct rb_node rb_node; /* free entry by size or allocated entry */
- /* by address */
- unsigned free:1;
- unsigned allow_user_free:1;
- unsigned async_transaction:1;
- unsigned debug_id:29;
-
- struct binder_transaction *transaction;
-
- struct binder_node *target_node;
- size_t data_size;
- size_t offsets_size;
- size_t extra_buffers_size;
- uint8_t data[0];
-};
-
enum binder_deferred_state {
BINDER_DEFERRED_PUT_FILES = 0x01,
BINDER_DEFERRED_FLUSH = 0x02,
BINDER_DEFERRED_RELEASE = 0x04,
};
+/**
+ * struct binder_proc - binder process bookkeeping
+ * @proc_node: element for binder_procs list
+ * @threads: rbtree of binder_threads in this proc
+ * (protected by @inner_lock)
+ * @nodes: rbtree of binder nodes associated with
+ * this proc ordered by node->ptr
+ * (protected by @inner_lock)
+ * @refs_by_desc: rbtree of refs ordered by ref->desc
+ * (protected by @outer_lock)
+ * @refs_by_node: rbtree of refs ordered by ref->node
+ * (protected by @outer_lock)
+ * @waiting_threads: threads currently waiting for proc work
+ * (protected by @inner_lock)
+ * @pid PID of group_leader of process
+ * (invariant after initialized)
+ * @tsk task_struct for group_leader of process
+ * (invariant after initialized)
+ * @files files_struct for process
+ * (invariant after initialized)
+ * @deferred_work_node: element for binder_deferred_list
+ * (protected by binder_deferred_lock)
+ * @deferred_work: bitmap of deferred work to perform
+ * (protected by binder_deferred_lock)
+ * @is_dead: process is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @inner_lock)
+ * @todo: list of work for this process
+ * (protected by @inner_lock)
+ * @wait: wait queue head to wait for proc work
+ * (invariant after initialized)
+ * @stats: per-process binder statistics
+ * (atomics, no lock needed)
+ * @delivered_death: list of delivered death notification
+ * (protected by @inner_lock)
+ * @max_threads: cap on number of binder threads
+ * (protected by @inner_lock)
+ * @requested_threads: number of binder threads requested but not
+ * yet started. In current implementation, can
+ * only be 0 or 1.
+ * (protected by @inner_lock)
+ * @requested_threads_started: number binder threads started
+ * (protected by @inner_lock)
+ * @tmp_ref: temporary reference to indicate proc is in use
+ * (protected by @inner_lock)
+ * @default_priority: default scheduler priority
+ * (invariant after initialized)
+ * @debugfs_entry: debugfs node
+ * @alloc: binder allocator bookkeeping
+ * @context: binder_context for this proc
+ * (invariant after initialized)
+ * @inner_lock: can nest under outer_lock and/or node lock
+ * @outer_lock: no nesting under innor or node lock
+ * Lock order: 1) outer, 2) node, 3) inner
+ *
+ * Bookkeeping structure for binder processes
+ */
struct binder_proc {
struct hlist_node proc_node;
struct rb_root threads;
struct rb_root nodes;
struct rb_root refs_by_desc;
struct rb_root refs_by_node;
+ struct list_head waiting_threads;
int pid;
- struct vm_area_struct *vma;
- struct mm_struct *vma_vm_mm;
struct task_struct *tsk;
struct files_struct *files;
struct hlist_node deferred_work_node;
int deferred_work;
- void *buffer;
- ptrdiff_t user_buffer_offset;
-
- struct list_head buffers;
- struct rb_root free_buffers;
- struct rb_root allocated_buffers;
- size_t free_async_space;
+ bool is_dead;
- struct page **pages;
- size_t buffer_size;
- uint32_t buffer_free;
struct list_head todo;
wait_queue_head_t wait;
struct binder_stats stats;
@@ -350,10 +541,13 @@ struct binder_proc {
int max_threads;
int requested_threads;
int requested_threads_started;
- int ready_threads;
+ int tmp_ref;
long default_priority;
struct dentry *debugfs_entry;
+ struct binder_alloc alloc;
struct binder_context *context;
+ spinlock_t inner_lock;
+ spinlock_t outer_lock;
};
enum {
@@ -362,22 +556,58 @@ enum {
BINDER_LOOPER_STATE_EXITED = 0x04,
BINDER_LOOPER_STATE_INVALID = 0x08,
BINDER_LOOPER_STATE_WAITING = 0x10,
- BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+ BINDER_LOOPER_STATE_POLL = 0x20,
};
+/**
+ * struct binder_thread - binder thread bookkeeping
+ * @proc: binder process for this thread
+ * (invariant after initialization)
+ * @rb_node: element for proc->threads rbtree
+ * (protected by @proc->inner_lock)
+ * @waiting_thread_node: element for @proc->waiting_threads list
+ * (protected by @proc->inner_lock)
+ * @pid: PID for this thread
+ * (invariant after initialization)
+ * @looper: bitmap of looping state
+ * (only accessed by this thread)
+ * @looper_needs_return: looping thread needs to exit driver
+ * (no lock needed)
+ * @transaction_stack: stack of in-progress transactions for this thread
+ * (protected by @proc->inner_lock)
+ * @todo: list of work to do for this thread
+ * (protected by @proc->inner_lock)
+ * @return_error: transaction errors reported by this thread
+ * (only accessed by this thread)
+ * @reply_error: transaction errors reported by target thread
+ * (protected by @proc->inner_lock)
+ * @wait: wait queue for thread work
+ * @stats: per-thread statistics
+ * (atomics, no lock needed)
+ * @tmp_ref: temporary reference to indicate thread is in use
+ * (atomic since @proc->inner_lock cannot
+ * always be acquired)
+ * @is_dead: thread is dead and awaiting free
+ * when outstanding transactions are cleaned up
+ * (protected by @proc->inner_lock)
+ *
+ * Bookkeeping structure for binder threads.
+ */
struct binder_thread {
struct binder_proc *proc;
struct rb_node rb_node;
+ struct list_head waiting_thread_node;
int pid;
- int looper;
+ int looper; /* only modified by this thread */
+ bool looper_need_return; /* can be written by other thread */
struct binder_transaction *transaction_stack;
struct list_head todo;
- uint32_t return_error; /* Write failed, return error code in read buf */
- uint32_t return_error2; /* Write failed, return error code in read */
- /* buffer. Used when sending a reply to a dead process that */
- /* we are also waiting on */
+ struct binder_error return_error;
+ struct binder_error reply_error;
wait_queue_head_t wait;
struct binder_stats stats;
+ atomic_t tmp_ref;
+ bool is_dead;
};
struct binder_transaction {
@@ -397,10 +627,253 @@ struct binder_transaction {
long priority;
long saved_priority;
kuid_t sender_euid;
+ /**
+ * @lock: protects @from, @to_proc, and @to_thread
+ *
+ * @from, @to_proc, and @to_thread can be set to NULL
+ * during thread teardown
+ */
+ spinlock_t lock;
};
+/**
+ * binder_proc_lock() - Acquire outer lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->outer_lock. Used to protect binder_ref
+ * structures associated with the given proc.
+ */
+#define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
+static void
+_binder_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->outer_lock);
+}
+
+/**
+ * binder_proc_unlock() - Release spinlock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_proc_lock()
+ */
+#define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
+static void
+_binder_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->outer_lock);
+}
+
+/**
+ * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Acquires proc->inner_lock. Used to protect todo lists
+ */
+#define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
+static void
+_binder_inner_proc_lock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&proc->inner_lock);
+}
+
+/**
+ * binder_inner_proc_unlock() - Release inner lock for given binder_proc
+ * @proc: struct binder_proc to acquire
+ *
+ * Release lock acquired via binder_inner_proc_lock()
+ */
+#define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
+static void
+_binder_inner_proc_unlock(struct binder_proc *proc, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&proc->inner_lock);
+}
+
+/**
+ * binder_node_lock() - Acquire spinlock for given binder_node
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. Used to protect binder_node fields
+ */
+#define binder_node_lock(node) _binder_node_lock(node, __LINE__)
+static void
+_binder_node_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+}
+
+/**
+ * binder_node_unlock() - Release spinlock for given binder_proc
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
+static void
+_binder_node_unlock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_unlock(&node->lock);
+}
+
+/**
+ * binder_node_inner_lock() - Acquire node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Acquires node->lock. If node->proc also acquires
+ * proc->inner_lock. Used to protect binder_node fields
+ */
+#define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
+static void
+_binder_node_inner_lock(struct binder_node *node, int line)
+{
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ spin_lock(&node->lock);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+}
+
+/**
+ * binder_node_unlock() - Release node and inner locks
+ * @node: struct binder_node to acquire
+ *
+ * Release lock acquired via binder_node_lock()
+ */
+#define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
+static void
+_binder_node_inner_unlock(struct binder_node *node, int line)
+{
+ struct binder_proc *proc = node->proc;
+
+ binder_debug(BINDER_DEBUG_SPINLOCKS,
+ "%s: line=%d\n", __func__, line);
+ if (proc)
+ binder_inner_proc_unlock(proc);
+ spin_unlock(&node->lock);
+}
+
+static bool binder_worklist_empty_ilocked(struct list_head *list)
+{
+ return list_empty(list);
+}
+
+/**
+ * binder_worklist_empty() - Check if no items on the work list
+ * @proc: binder_proc associated with list
+ * @list: list to check
+ *
+ * Return: true if there are no items on list, else false
+ */
+static bool binder_worklist_empty(struct binder_proc *proc,
+ struct list_head *list)
+{
+ bool ret;
+
+ binder_inner_proc_lock(proc);
+ ret = binder_worklist_empty_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return ret;
+}
+
+static void
+binder_enqueue_work_ilocked(struct binder_work *work,
+ struct list_head *target_list)
+{
+ BUG_ON(target_list == NULL);
+ BUG_ON(work->entry.next && !list_empty(&work->entry));
+ list_add_tail(&work->entry, target_list);
+}
+
+/**
+ * binder_enqueue_work() - Add an item to the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to add to list
+ * @target_list: list to add work to
+ *
+ * Adds the work to the specified list. Asserts that work
+ * is not already on a list.
+ */
+static void
+binder_enqueue_work(struct binder_proc *proc,
+ struct binder_work *work,
+ struct list_head *target_list)
+{
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(work, target_list);
+ binder_inner_proc_unlock(proc);
+}
+
+static void
+binder_dequeue_work_ilocked(struct binder_work *work)
+{
+ list_del_init(&work->entry);
+}
+
+/**
+ * binder_dequeue_work() - Removes an item from the work list
+ * @proc: binder_proc associated with list
+ * @work: struct binder_work to remove from list
+ *
+ * Removes the specified work item from whatever list it is on.
+ * Can safely be called if work is not on any list.
+ */
+static void
+binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
+{
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(work);
+ binder_inner_proc_unlock(proc);
+}
+
+static struct binder_work *binder_dequeue_work_head_ilocked(
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ w = list_first_entry_or_null(list, struct binder_work, entry);
+ if (w)
+ list_del_init(&w->entry);
+ return w;
+}
+
+/**
+ * binder_dequeue_work_head() - Dequeues the item at head of list
+ * @proc: binder_proc associated with list
+ * @list: list to dequeue head
+ *
+ * Removes the head of the list if there are items on the list
+ *
+ * Return: pointer dequeued binder_work, NULL if list was empty
+ */
+static struct binder_work *binder_dequeue_work_head(
+ struct binder_proc *proc,
+ struct list_head *list)
+{
+ struct binder_work *w;
+
+ binder_inner_proc_lock(proc);
+ w = binder_dequeue_work_head_ilocked(list);
+ binder_inner_proc_unlock(proc);
+ return w;
+}
+
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+static void binder_free_thread(struct binder_thread *thread);
+static void binder_free_proc(struct binder_proc *proc);
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
{
@@ -451,462 +924,159 @@ static long task_close_fd(struct binder_proc *proc, unsigned int fd)
return retval;
}
-static inline void binder_lock(const char *tag)
-{
- trace_binder_lock(tag);
- mutex_lock(&binder_main_lock);
- trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
- trace_binder_unlock(tag);
- mutex_unlock(&binder_main_lock);
-}
-
-static void binder_set_nice(long nice)
-{
- long min_nice;
-
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
- return;
- }
- min_nice = rlimit_to_nice(current->signal->rlim[RLIMIT_NICE].rlim_cur);
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
- return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
-}
-
-static size_t binder_buffer_size(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static bool binder_has_work_ilocked(struct binder_thread *thread,
+ bool do_proc_work)
{
- if (list_is_last(&buffer->entry, &proc->buffers))
- return proc->buffer + proc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return !binder_worklist_empty_ilocked(&thread->todo) ||
+ thread->looper_need_return ||
+ (do_proc_work &&
+ !binder_worklist_empty_ilocked(&thread->proc->todo));
}
-static void binder_insert_free_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
{
- struct rb_node **p = &proc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
+ bool has_work;
- BUG_ON(!new_buffer->free);
+ binder_inner_proc_lock(thread->proc);
+ has_work = binder_has_work_ilocked(thread, do_proc_work);
+ binder_inner_proc_unlock(thread->proc);
- new_buffer_size = binder_buffer_size(proc, new_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %p\n",
- proc->pid, new_buffer_size, new_buffer);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+ return has_work;
}
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
- struct binder_buffer *new_buffer)
+static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
{
- struct rb_node **p = &proc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
-
- BUG_ON(new_buffer->free);
-
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
-
- if (new_buffer < buffer)
- p = &parent->rb_left;
- else if (new_buffer > buffer)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+ return !thread->transaction_stack &&
+ binder_worklist_empty_ilocked(&thread->todo) &&
+ (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
+ BINDER_LOOPER_STATE_REGISTERED));
}
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
- uintptr_t user_ptr)
+static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
+ bool sync)
{
- struct rb_node *n = proc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
-
- kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
-
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
+ struct rb_node *n;
+ struct binder_thread *thread;
- if (kern_ptr < buffer)
- n = n->rb_left;
- else if (kern_ptr > buffer)
- n = n->rb_right;
- else
- return buffer;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ thread = rb_entry(n, struct binder_thread, rb_node);
+ if (thread->looper & BINDER_LOOPER_STATE_POLL &&
+ binder_available_for_proc_work_ilocked(thread)) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
+ else
+ wake_up_interruptible(&thread->wait);
+ }
}
- return NULL;
}
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+/**
+ * binder_select_thread_ilocked() - selects a thread for doing proc work.
+ * @proc: process to select a thread from
+ *
+ * Note that calling this function moves the thread off the waiting_threads
+ * list, so it can only be woken up by the caller of this function, or a
+ * signal. Therefore, callers *should* always wake up the thread this function
+ * returns.
+ *
+ * Return: If there's a thread currently waiting for process work,
+ * returns that thread. Otherwise returns NULL.
+ */
+static struct binder_thread *
+binder_select_thread_ilocked(struct binder_proc *proc)
{
- void *page_addr;
- unsigned long user_page_addr;
- struct page **page;
- struct mm_struct *mm;
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %p-%p\n", proc->pid,
- allocate ? "allocate" : "free", start, end);
-
- if (end <= start)
- return 0;
-
- trace_binder_update_page_range(proc, allocate, start, end);
-
- if (vma)
- mm = NULL;
- else
- mm = get_task_mm(proc->tsk);
-
- if (mm) {
- down_write(&mm->mmap_sem);
- vma = proc->vma;
- if (vma && mm != proc->vma_vm_mm) {
- pr_err("%d: vma mm and task mm mismatch\n",
- proc->pid);
- vma = NULL;
- }
- }
-
- if (allocate == 0)
- goto free_range;
-
- if (vma == NULL) {
- pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- proc->pid);
- goto err_no_vma;
- }
-
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
+ struct binder_thread *thread;
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ assert_spin_locked(&proc->inner_lock);
+ thread = list_first_entry_or_null(&proc->waiting_threads,
+ struct binder_thread,
+ waiting_thread_node);
- BUG_ON(*page);
- *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
- if (*page == NULL) {
- pr_err("%d: binder_alloc_buf failed for page at %p\n",
- proc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- ret = map_kernel_range_noflush((unsigned long)page_addr,
- PAGE_SIZE, PAGE_KERNEL, page);
- flush_cache_vmap((unsigned long)page_addr,
- (unsigned long)page_addr + PAGE_SIZE);
- if (ret != 1) {
- pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
- proc->pid, page_addr);
- goto err_map_kernel_failed;
- }
- user_page_addr =
- (uintptr_t)page_addr + proc->user_buffer_offset;
- ret = vm_insert_page(vma, user_page_addr, page[0]);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- proc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return 0;
+ if (thread)
+ list_del_init(&thread->waiting_thread_node);
-free_range:
- for (page_addr = end - PAGE_SIZE; page_addr >= start;
- page_addr -= PAGE_SIZE) {
- page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
- if (vma)
- zap_page_range(vma, (uintptr_t)page_addr +
- proc->user_buffer_offset, PAGE_SIZE);
-err_vm_insert_page_failed:
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
- __free_page(*page);
- *page = NULL;
-err_alloc_page_failed:
- ;
- }
-err_no_vma:
- if (mm) {
- up_write(&mm->mmap_sem);
- mmput(mm);
- }
- return -ENOMEM;
+ return thread;
}
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
+/**
+ * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
+ * @proc: process to wake up a thread in
+ * @thread: specific thread to wake-up (may be NULL)
+ * @sync: whether to do a synchronous wake-up
+ *
+ * This function wakes up a thread in the @proc process.
+ * The caller may provide a specific thread to wake-up in
+ * the @thread parameter. If @thread is NULL, this function
+ * will wake up threads that have called poll().
+ *
+ * Note that for this function to work as expected, callers
+ * should first call binder_select_thread() to find a thread
+ * to handle the work (if they don't have a thread already),
+ * and pass the result into the @thread parameter.
+ */
+static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
+ struct binder_thread *thread,
+ bool sync)
{
- struct rb_node *n = proc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void *has_page_addr;
- void *end_page_addr;
- size_t size, data_offsets_size;
-
- if (proc->vma == NULL) {
- pr_err("%d: binder_alloc_buf, no vma\n",
- proc->pid);
- return NULL;
- }
-
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
-
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
- proc->pid, data_size, offsets_size);
- return NULL;
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_user_error("%d: got transaction with invalid extra_buffers_size %zd\n",
- proc->pid, extra_buffers_size);
- return NULL;
- }
- if (is_async &&
- proc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- proc->pid, size);
- return NULL;
- }
+ assert_spin_locked(&proc->inner_lock);
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_buffer_size(proc, buffer);
-
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
- }
- }
- if (best_fit == NULL) {
- pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
- proc->pid, size);
- return NULL;
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_buffer_size(proc, buffer);
- }
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
- proc->pid, size, buffer, buffer_size);
-
- has_page_addr =
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
+ if (thread) {
+ if (sync)
+ wake_up_interruptible_sync(&thread->wait);
else
- buffer_size = size + sizeof(struct binder_buffer);
- }
- end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- if (binder_update_page_range(proc, 1,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
- return NULL;
-
- rb_erase(best_fit, &proc->free_buffers);
- buffer->free = 0;
- binder_insert_allocated_buffer(proc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(proc, new_buffer);
- }
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %p\n",
- proc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->extra_buffers_size = extra_buffers_size;
- buffer->async_transaction = is_async;
- if (is_async) {
- proc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
+ wake_up_interruptible(&thread->wait);
+ return;
}
- return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ /* Didn't find a thread waiting for proc work; this can happen
+ * in two scenarios:
+ * 1. All threads are busy handling transactions
+ * In that case, one of those threads should call back into
+ * the kernel driver soon and pick up this work.
+ * 2. Threads are using the (e)poll interface, in which case
+ * they may be blocked on the waitqueue without having been
+ * added to waiting_threads. For this case, we just iterate
+ * over all threads not handling transaction work, and
+ * wake them all up. We wake all because we don't know whether
+ * a thread that called into (e)poll is handling non-binder
+ * work currently.
+ */
+ binder_wakeup_poll_threads_ilocked(proc, sync);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
+ struct binder_thread *thread = binder_select_thread_ilocked(proc);
-static void binder_delete_free_buffer(struct binder_proc *proc,
- struct binder_buffer *buffer)
-{
- struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
- BUG_ON(proc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
- BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
-
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p share page with %p\n",
- proc->pid, buffer, prev);
- }
- }
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
- proc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(proc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
- }
+ binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static void binder_free_buf(struct binder_proc *proc,
- struct binder_buffer *buffer)
+static void binder_set_nice(long nice)
{
- size_t size, buffer_size;
-
- buffer_size = binder_buffer_size(proc, buffer);
-
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %p size %zd buffer_size %zd\n",
- proc->pid, buffer, size, buffer_size);
-
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < proc->buffer);
- BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
- if (buffer->async_transaction) {
- proc->free_async_space += size + sizeof(struct binder_buffer);
-
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- proc->pid, size, proc->free_async_space);
- }
+ long min_nice;
- binder_update_page_range(proc, 0,
- (void *)PAGE_ALIGN((uintptr_t)buffer->data),
- (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
- NULL);
- rb_erase(&buffer->rb_node, &proc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &proc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
-
- if (next->free) {
- rb_erase(&next->rb_node, &proc->free_buffers);
- binder_delete_free_buffer(proc, next);
- }
- }
- if (proc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
-
- if (prev->free) {
- binder_delete_free_buffer(proc, buffer);
- rb_erase(&prev->rb_node, &proc->free_buffers);
- buffer = prev;
- }
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
}
- binder_insert_free_buffer(proc, buffer);
+ min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: nice value %ld not allowed use %ld instead\n",
+ current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice <= MAX_NICE)
+ return;
+ binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
}
-static struct binder_node *binder_get_node(struct binder_proc *proc,
- binder_uintptr_t ptr)
+static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
+ binder_uintptr_t ptr)
{
struct rb_node *n = proc->nodes.rb_node;
struct binder_node *node;
+ assert_spin_locked(&proc->inner_lock);
+
while (n) {
node = rb_entry(n, struct binder_node, rb_node);
@@ -914,21 +1084,46 @@ static struct binder_node *binder_get_node(struct binder_proc *proc,
n = n->rb_left;
else if (ptr > node->ptr)
n = n->rb_right;
- else
+ else {
+ /*
+ * take an implicit weak reference
+ * to ensure node stays alive until
+ * call to binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
return node;
+ }
}
return NULL;
}
-static struct binder_node *binder_new_node(struct binder_proc *proc,
- binder_uintptr_t ptr,
- binder_uintptr_t cookie)
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ binder_uintptr_t ptr)
+{
+ struct binder_node *node;
+
+ binder_inner_proc_lock(proc);
+ node = binder_get_node_ilocked(proc, ptr);
+ binder_inner_proc_unlock(proc);
+ return node;
+}
+
+static struct binder_node *binder_init_node_ilocked(
+ struct binder_proc *proc,
+ struct binder_node *new_node,
+ struct flat_binder_object *fp)
{
struct rb_node **p = &proc->nodes.rb_node;
struct rb_node *parent = NULL;
struct binder_node *node;
+ binder_uintptr_t ptr = fp ? fp->binder : 0;
+ binder_uintptr_t cookie = fp ? fp->cookie : 0;
+ __u32 flags = fp ? fp->flags : 0;
+
+ assert_spin_locked(&proc->inner_lock);
while (*p) {
+
parent = *p;
node = rb_entry(parent, struct binder_node, rb_node);
@@ -936,33 +1131,74 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
p = &(*p)->rb_left;
else if (ptr > node->ptr)
p = &(*p)->rb_right;
- else
- return NULL;
+ else {
+ /*
+ * A matching node is already in
+ * the rb tree. Abandon the init
+ * and return it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ return node;
+ }
}
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL)
- return NULL;
+ node = new_node;
binder_stats_created(BINDER_STAT_NODE);
+ node->tmp_refs++;
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, &proc->nodes);
- node->debug_id = ++binder_last_id;
+ node->debug_id = atomic_inc_return(&binder_last_id);
node->proc = proc;
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
+ node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d:%d node %d u%016llx c%016llx created\n",
proc->pid, current->pid, node->debug_id,
(u64)node->ptr, (u64)node->cookie);
+
return node;
}
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
- struct list_head *target_list)
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ struct flat_binder_object *fp)
{
+ struct binder_node *node;
+ struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
+
+ if (!new_node)
+ return NULL;
+ binder_inner_proc_lock(proc);
+ node = binder_init_node_ilocked(proc, new_node, fp);
+ binder_inner_proc_unlock(proc);
+ if (node != new_node)
+ /*
+ * The node was already added by another thread
+ */
+ kfree(new_node);
+
+ return node;
+}
+
+static void binder_free_node(struct binder_node *node)
+{
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+}
+
+static int binder_inc_node_nilocked(struct binder_node *node, int strong,
+ int internal,
+ struct list_head *target_list)
+{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal) {
if (target_list == NULL &&
@@ -978,8 +1214,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
} else
node->local_strong_refs++;
if (!node->has_strong_ref && target_list) {
- list_del_init(&node->work.entry);
- list_add_tail(&node->work.entry, target_list);
+ binder_dequeue_work_ilocked(&node->work);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
} else {
if (!internal)
@@ -990,58 +1226,169 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
node->debug_id);
return -EINVAL;
}
- list_add_tail(&node->work.entry, target_list);
+ binder_enqueue_work_ilocked(&node->work, target_list);
}
}
return 0;
}
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ int ret;
+
+ binder_node_inner_lock(node);
+ ret = binder_inc_node_nilocked(node, strong, internal, target_list);
+ binder_node_inner_unlock(node);
+
+ return ret;
+}
+
+static bool binder_dec_node_nilocked(struct binder_node *node,
+ int strong, int internal)
{
+ struct binder_proc *proc = node->proc;
+
+ assert_spin_locked(&node->lock);
+ if (proc)
+ assert_spin_locked(&proc->inner_lock);
if (strong) {
if (internal)
node->internal_strong_refs--;
else
node->local_strong_refs--;
if (node->local_strong_refs || node->internal_strong_refs)
- return 0;
+ return false;
} else {
if (!internal)
node->local_weak_refs--;
- if (node->local_weak_refs || !hlist_empty(&node->refs))
- return 0;
+ if (node->local_weak_refs || node->tmp_refs ||
+ !hlist_empty(&node->refs))
+ return false;
}
- if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+
+ if (proc && (node->has_strong_ref || node->has_weak_ref)) {
if (list_empty(&node->work.entry)) {
- list_add_tail(&node->work.entry, &node->proc->todo);
- wake_up_interruptible(&node->proc->wait);
+ binder_enqueue_work_ilocked(&node->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
} else {
if (hlist_empty(&node->refs) && !node->local_strong_refs &&
- !node->local_weak_refs) {
- list_del_init(&node->work.entry);
- if (node->proc) {
- rb_erase(&node->rb_node, &node->proc->nodes);
+ !node->local_weak_refs && !node->tmp_refs) {
+ if (proc) {
+ binder_dequeue_work_ilocked(&node->work);
+ rb_erase(&node->rb_node, &proc->nodes);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"refless node %d deleted\n",
node->debug_id);
} else {
+ BUG_ON(!list_empty(&node->work.entry));
+ spin_lock(&binder_dead_nodes_lock);
+ /*
+ * tmp_refs could have changed so
+ * check it again
+ */
+ if (node->tmp_refs) {
+ spin_unlock(&binder_dead_nodes_lock);
+ return false;
+ }
hlist_del(&node->dead_node);
+ spin_unlock(&binder_dead_nodes_lock);
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"dead node %d deleted\n",
node->debug_id);
}
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ return true;
}
}
+ return false;
+}
- return 0;
+static void binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ free_node = binder_dec_node_nilocked(node, strong, internal);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
+}
+
+static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
+{
+ /*
+ * No call to binder_inc_node() is needed since we
+ * don't need to inform userspace of any changes to
+ * tmp_refs
+ */
+ node->tmp_refs++;
+}
+
+/**
+ * binder_inc_node_tmpref() - take a temporary reference on node
+ * @node: node to reference
+ *
+ * Take reference on node to prevent the node from being freed
+ * while referenced only by a local variable. The inner lock is
+ * needed to serialize with the node work on the queue (which
+ * isn't needed after the node is dead). If the node is dead
+ * (node->proc is NULL), use binder_dead_nodes_lock to protect
+ * node->tmp_refs against dead-node-only cases where the node
+ * lock cannot be acquired (eg traversing the dead node list to
+ * print nodes)
+ */
+static void binder_inc_node_tmpref(struct binder_node *node)
+{
+ binder_node_lock(node);
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ else
+ spin_lock(&binder_dead_nodes_lock);
+ binder_inc_node_tmpref_ilocked(node);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ else
+ spin_unlock(&binder_dead_nodes_lock);
+ binder_node_unlock(node);
+}
+
+/**
+ * binder_dec_node_tmpref() - remove a temporary reference on node
+ * @node: node to reference
+ *
+ * Release temporary reference on node taken via binder_inc_node_tmpref()
+ */
+static void binder_dec_node_tmpref(struct binder_node *node)
+{
+ bool free_node;
+
+ binder_node_inner_lock(node);
+ if (!node->proc)
+ spin_lock(&binder_dead_nodes_lock);
+ node->tmp_refs--;
+ BUG_ON(node->tmp_refs < 0);
+ if (!node->proc)
+ spin_unlock(&binder_dead_nodes_lock);
+ /*
+ * Call binder_dec_node() to check if all refcounts are 0
+ * and cleanup is needed. Calling with strong=0 and internal=1
+ * causes no actual reference to be released in binder_dec_node().
+ * If that changes, a change is needed here too.
+ */
+ free_node = binder_dec_node_nilocked(node, 0, 1);
+ binder_node_inner_unlock(node);
+ if (free_node)
+ binder_free_node(node);
}
+static void binder_put_node(struct binder_node *node)
+{
+ binder_dec_node_tmpref(node);
+}
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- u32 desc, bool need_strong_ref)
+static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
+ u32 desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1049,11 +1396,11 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc) {
+ if (desc < ref->data.desc) {
n = n->rb_left;
- } else if (desc > ref->desc) {
+ } else if (desc > ref->data.desc) {
n = n->rb_right;
- } else if (need_strong_ref && !ref->strong) {
+ } else if (need_strong_ref && !ref->data.strong) {
binder_user_error("tried to use weak ref as strong ref\n");
return NULL;
} else {
@@ -1063,14 +1410,34 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
return NULL;
}
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
- struct binder_node *node)
+/**
+ * binder_get_ref_for_node_olocked() - get the ref associated with given node
+ * @proc: binder_proc that owns the ref
+ * @node: binder_node of target
+ * @new_ref: newly allocated binder_ref to be initialized or %NULL
+ *
+ * Look up the ref for the given node and return it if it exists
+ *
+ * If it doesn't exist and the caller provides a newly allocated
+ * ref, initialize the fields of the newly allocated ref and insert
+ * into the given proc rb_trees and node refs list.
+ *
+ * Return: the ref for node. It is possible that another thread
+ * allocated/initialized the ref first in which case the
+ * returned ref would be different than the passed-in
+ * new_ref. new_ref must be kfree'd by the caller in
+ * this case.
+ */
+static struct binder_ref *binder_get_ref_for_node_olocked(
+ struct binder_proc *proc,
+ struct binder_node *node,
+ struct binder_ref *new_ref)
{
- struct rb_node *n;
+ struct binder_context *context = proc->context;
struct rb_node **p = &proc->refs_by_node.rb_node;
struct rb_node *parent = NULL;
- struct binder_ref *ref, *new_ref;
- struct binder_context *context = proc->context;
+ struct binder_ref *ref;
+ struct rb_node *n;
while (*p) {
parent = *p;
@@ -1083,22 +1450,22 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
else
return ref;
}
- new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
- if (new_ref == NULL)
+ if (!new_ref)
return NULL;
+
binder_stats_created(BINDER_STAT_REF);
- new_ref->debug_id = ++binder_last_id;
+ new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
new_ref->proc = proc;
new_ref->node = node;
rb_link_node(&new_ref->rb_node_node, parent, p);
rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
- new_ref->desc = (node == context->binder_context_mgr_node) ? 0 : 1;
+ new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (ref->desc > new_ref->desc)
+ if (ref->data.desc > new_ref->data.desc)
break;
- new_ref->desc = ref->desc + 1;
+ new_ref->data.desc = ref->data.desc + 1;
}
p = &proc->refs_by_desc.rb_node;
@@ -1106,121 +1473,423 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
parent = *p;
ref = rb_entry(parent, struct binder_ref, rb_node_desc);
- if (new_ref->desc < ref->desc)
+ if (new_ref->data.desc < ref->data.desc)
p = &(*p)->rb_left;
- else if (new_ref->desc > ref->desc)
+ else if (new_ref->data.desc > ref->data.desc)
p = &(*p)->rb_right;
else
BUG();
}
rb_link_node(&new_ref->rb_node_desc, parent, p);
rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
- if (node) {
- hlist_add_head(&new_ref->node_entry, &node->refs);
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for node %d\n",
- proc->pid, new_ref->debug_id, new_ref->desc,
- node->debug_id);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d new ref %d desc %d for dead node\n",
- proc->pid, new_ref->debug_id, new_ref->desc);
- }
+ binder_node_lock(node);
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d new ref %d desc %d for node %d\n",
+ proc->pid, new_ref->data.debug_id, new_ref->data.desc,
+ node->debug_id);
+ binder_node_unlock(node);
return new_ref;
}
-static void binder_delete_ref(struct binder_ref *ref)
+static void binder_cleanup_ref_olocked(struct binder_ref *ref)
{
+ bool delete_node = false;
+
binder_debug(BINDER_DEBUG_INTERNAL_REFS,
"%d delete ref %d desc %d for node %d\n",
- ref->proc->pid, ref->debug_id, ref->desc,
+ ref->proc->pid, ref->data.debug_id, ref->data.desc,
ref->node->debug_id);
rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
- if (ref->strong)
- binder_dec_node(ref->node, 1, 1);
+
+ binder_node_inner_lock(ref->node);
+ if (ref->data.strong)
+ binder_dec_node_nilocked(ref->node, 1, 1);
+
hlist_del(&ref->node_entry);
- binder_dec_node(ref->node, 0, 1);
+ delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
+ binder_node_inner_unlock(ref->node);
+ /*
+ * Clear ref->node unless we want the caller to free the node
+ */
+ if (!delete_node) {
+ /*
+ * The caller uses ref->node to determine
+ * whether the node needs to be freed. Clear
+ * it since the node is still alive.
+ */
+ ref->node = NULL;
+ }
+
if (ref->death) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d delete ref %d desc %d has death notification\n",
- ref->proc->pid, ref->debug_id, ref->desc);
- list_del(&ref->death->work.entry);
- kfree(ref->death);
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc);
+ binder_dequeue_work(ref->proc, &ref->death->work);
binder_stats_deleted(BINDER_STAT_DEATH);
}
- kfree(ref);
binder_stats_deleted(BINDER_STAT_REF);
}
-static int binder_inc_ref(struct binder_ref *ref, int strong,
- struct list_head *target_list)
+/**
+ * binder_inc_ref_olocked() - increment the ref for given handle
+ * @ref: ref to be incremented
+ * @strong: if true, strong increment, else weak
+ * @target_list: list to queue node work on
+ *
+ * Increment the ref. @ref->proc->outer_lock must be held on entry
+ *
+ * Return: 0, if successful, else errno
+ */
+static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
{
int ret;
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
ret = binder_inc_node(ref->node, 1, 1, target_list);
if (ret)
return ret;
}
- ref->strong++;
+ ref->data.strong++;
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
ret = binder_inc_node(ref->node, 0, 1, target_list);
if (ret)
return ret;
}
- ref->weak++;
+ ref->data.weak++;
}
return 0;
}
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
+/**
+ * binder_dec_ref() - dec the ref for given handle
+ * @ref: ref to be decremented
+ * @strong: if true, strong decrement, else weak
+ *
+ * Decrement the ref.
+ *
+ * Return: true if ref is cleaned up and ready to be freed
+ */
+static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
{
if (strong) {
- if (ref->strong == 0) {
+ if (ref->data.strong == 0) {
binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
- }
- ref->strong--;
- if (ref->strong == 0) {
- int ret;
-
- ret = binder_dec_node(ref->node, strong, 1);
- if (ret)
- return ret;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
+ ref->data.strong--;
+ if (ref->data.strong == 0)
+ binder_dec_node(ref->node, strong, 1);
} else {
- if (ref->weak == 0) {
+ if (ref->data.weak == 0) {
binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
- ref->proc->pid, ref->debug_id,
- ref->desc, ref->strong, ref->weak);
- return -EINVAL;
+ ref->proc->pid, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak);
+ return false;
}
- ref->weak--;
+ ref->data.weak--;
}
- if (ref->strong == 0 && ref->weak == 0)
- binder_delete_ref(ref);
- return 0;
+ if (ref->data.strong == 0 && ref->data.weak == 0) {
+ binder_cleanup_ref_olocked(ref);
+ return true;
+ }
+ return false;
}
-static void binder_pop_transaction(struct binder_thread *target_thread,
- struct binder_transaction *t)
+/**
+ * binder_get_node_from_ref() - get the node from the given proc/desc
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @need_strong_ref: if true, only return node if ref is strong
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, return the associated binder_node
+ *
+ * Return: a binder_node or NULL if not found or not strong when strong required
+ */
+static struct binder_node *binder_get_node_from_ref(
+ struct binder_proc *proc,
+ u32 desc, bool need_strong_ref,
+ struct binder_ref_data *rdata)
{
- if (target_thread) {
- BUG_ON(target_thread->transaction_stack != t);
- BUG_ON(target_thread->transaction_stack->from != target_thread);
- target_thread->transaction_stack =
- target_thread->transaction_stack->from_parent;
- t->from = NULL;
+ struct binder_node *node;
+ struct binder_ref *ref;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
+ if (!ref)
+ goto err_no_ref;
+ node = ref->node;
+ /*
+ * Take an implicit reference on the node to ensure
+ * it stays alive until the call to binder_put_node()
+ */
+ binder_inc_node_tmpref(node);
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ return node;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return NULL;
+}
+
+/**
+ * binder_free_ref() - free the binder_ref
+ * @ref: ref to free
+ *
+ * Free the binder_ref. Free the binder_node indicated by ref->node
+ * (if non-NULL) and the binder_ref_death indicated by ref->death.
+ */
+static void binder_free_ref(struct binder_ref *ref)
+{
+ if (ref->node)
+ binder_free_node(ref->node);
+ kfree(ref->death);
+ kfree(ref);
+}
+
+/**
+ * binder_update_ref_for_handle() - inc/dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @increment: true=inc reference, false=dec reference
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and ref handle, increment or decrement the ref
+ * according to "increment" arg.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_update_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool increment, bool strong,
+ struct binder_ref_data *rdata)
+{
+ int ret = 0;
+ struct binder_ref *ref;
+ bool delete_ref = false;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, desc, strong);
+ if (!ref) {
+ ret = -EINVAL;
+ goto err_no_ref;
+ }
+ if (increment)
+ ret = binder_inc_ref_olocked(ref, strong, NULL);
+ else
+ delete_ref = binder_dec_ref_olocked(ref, strong);
+
+ if (rdata)
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+
+ if (delete_ref)
+ binder_free_ref(ref);
+ return ret;
+
+err_no_ref:
+ binder_proc_unlock(proc);
+ return ret;
+}
+
+/**
+ * binder_dec_ref_for_handle() - dec the ref for given handle
+ * @proc: proc containing the ref
+ * @desc: the handle associated with the ref
+ * @strong: true=strong reference, false=weak reference
+ * @rdata: the id/refcount data for the ref
+ *
+ * Just calls binder_update_ref_for_handle() to decrement the ref.
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_dec_ref_for_handle(struct binder_proc *proc,
+ uint32_t desc, bool strong, struct binder_ref_data *rdata)
+{
+ return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
+}
+
+
+/**
+ * binder_inc_ref_for_node() - increment the ref for given proc/node
+ * @proc: proc containing the ref
+ * @node: target node
+ * @strong: true=strong reference, false=weak reference
+ * @target_list: worklist to use if node is incremented
+ * @rdata: the id/refcount data for the ref
+ *
+ * Given a proc and node, increment the ref. Create the ref if it
+ * doesn't already exist
+ *
+ * Return: 0 if successful, else errno
+ */
+static int binder_inc_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node,
+ bool strong,
+ struct list_head *target_list,
+ struct binder_ref_data *rdata)
+{
+ struct binder_ref *ref;
+ struct binder_ref *new_ref = NULL;
+ int ret = 0;
+
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, NULL);
+ if (!ref) {
+ binder_proc_unlock(proc);
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (!new_ref)
+ return -ENOMEM;
+ binder_proc_lock(proc);
+ ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
+ }
+ ret = binder_inc_ref_olocked(ref, strong, target_list);
+ *rdata = ref->data;
+ binder_proc_unlock(proc);
+ if (new_ref && ref != new_ref)
+ /*
+ * Another thread created the ref first so
+ * free the one we allocated
+ */
+ kfree(new_ref);
+ return ret;
+}
+
+static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ BUG_ON(!target_thread);
+ assert_spin_locked(&target_thread->proc->inner_lock);
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+}
+
+/**
+ * binder_thread_dec_tmpref() - decrement thread->tmp_ref
+ * @thread: thread to decrement
+ *
+ * A thread needs to be kept alive while being used to create or
+ * handle a transaction. binder_get_txn_from() is used to safely
+ * extract t->from from a binder_transaction and keep the thread
+ * indicated by t->from from being freed. When done with that
+ * binder_thread, this function is called to decrement the
+ * tmp_ref and free if appropriate (thread has been released
+ * and no transaction being processed by the driver)
+ */
+static void binder_thread_dec_tmpref(struct binder_thread *thread)
+{
+ /*
+ * atomic is used to protect the counter value while
+ * it cannot reach zero or thread->is_dead is false
+ */
+ binder_inner_proc_lock(thread->proc);
+ atomic_dec(&thread->tmp_ref);
+ if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
+ binder_inner_proc_unlock(thread->proc);
+ binder_free_thread(thread);
+ return;
+ }
+ binder_inner_proc_unlock(thread->proc);
+}
+
+/**
+ * binder_proc_dec_tmpref() - decrement proc->tmp_ref
+ * @proc: proc to decrement
+ *
+ * A binder_proc needs to be kept alive while being used to create or
+ * handle a transaction. proc->tmp_ref is incremented when
+ * creating a new transaction or the binder_proc is currently in-use
+ * by threads that are being released. When done with the binder_proc,
+ * this function is called to decrement the counter and free the
+ * proc if appropriate (proc has been released, all threads have
+ * been released and not currenly in-use to process a transaction).
+ */
+static void binder_proc_dec_tmpref(struct binder_proc *proc)
+{
+ binder_inner_proc_lock(proc);
+ proc->tmp_ref--;
+ if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
+ !proc->tmp_ref) {
+ binder_inner_proc_unlock(proc);
+ binder_free_proc(proc);
+ return;
+ }
+ binder_inner_proc_unlock(proc);
+}
+
+/**
+ * binder_get_txn_from() - safely extract the "from" thread in transaction
+ * @t: binder transaction for t->from
+ *
+ * Atomically return the "from" thread and increment the tmp_ref
+ * count for the thread to ensure it stays alive until
+ * binder_thread_dec_tmpref() is called.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ spin_lock(&t->lock);
+ from = t->from;
+ if (from)
+ atomic_inc(&from->tmp_ref);
+ spin_unlock(&t->lock);
+ return from;
+}
+
+/**
+ * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
+ * @t: binder transaction for t->from
+ *
+ * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
+ * to guarantee that the thread cannot be released while operating on it.
+ * The caller must call binder_inner_proc_unlock() to release the inner lock
+ * as well as call binder_dec_thread_txn() to release the reference.
+ *
+ * Return: the value of t->from
+ */
+static struct binder_thread *binder_get_txn_from_and_acq_inner(
+ struct binder_transaction *t)
+{
+ struct binder_thread *from;
+
+ from = binder_get_txn_from(t);
+ if (!from)
+ return NULL;
+ binder_inner_proc_lock(from->proc);
+ if (t->from) {
+ BUG_ON(from != t->from);
+ return from;
}
- t->need_reply = 0;
+ binder_inner_proc_unlock(from->proc);
+ binder_thread_dec_tmpref(from);
+ return NULL;
+}
+
+static void binder_free_transaction(struct binder_transaction *t)
+{
if (t->buffer)
t->buffer->transaction = NULL;
kfree(t);
@@ -1235,30 +1904,28 @@ static void binder_send_failed_reply(struct binder_transaction *t,
BUG_ON(t->flags & TF_ONE_WAY);
while (1) {
- target_thread = t->from;
+ target_thread = binder_get_txn_from_and_acq_inner(t);
if (target_thread) {
- if (target_thread->return_error != BR_OK &&
- target_thread->return_error2 == BR_OK) {
- target_thread->return_error2 =
- target_thread->return_error;
- target_thread->return_error = BR_OK;
- }
- if (target_thread->return_error == BR_OK) {
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "send failed reply for transaction %d to %d:%d\n",
- t->debug_id,
- target_thread->proc->pid,
- target_thread->pid);
-
- binder_pop_transaction(target_thread, t);
- target_thread->return_error = error_code;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "send failed reply for transaction %d to %d:%d\n",
+ t->debug_id,
+ target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction_ilocked(target_thread, t);
+ if (target_thread->reply_error.cmd == BR_OK) {
+ target_thread->reply_error.cmd = error_code;
+ binder_enqueue_work_ilocked(
+ &target_thread->reply_error.work,
+ &target_thread->todo);
wake_up_interruptible(&target_thread->wait);
} else {
- pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
- target_thread->proc->pid,
- target_thread->pid,
- target_thread->return_error);
+ WARN(1, "Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
+ binder_inner_proc_unlock(target_thread->proc);
+ binder_thread_dec_tmpref(target_thread);
+ binder_free_transaction(t);
return;
}
next = t->from_parent;
@@ -1267,7 +1934,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
"send failed reply for transaction %d, target dead\n",
t->debug_id);
- binder_pop_transaction(target_thread, t);
+ binder_free_transaction(t);
if (next == NULL) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"reply failed, no target thread at root\n");
@@ -1476,24 +2143,26 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
node->debug_id, (u64)node->ptr);
binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
0);
+ binder_put_node(node);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
- struct binder_ref *ref;
+ struct binder_ref_data rdata;
+ int ret;
fp = to_flat_binder_object(hdr);
- ref = binder_get_ref(proc, fp->handle,
- hdr->type == BINDER_TYPE_HANDLE);
- if (ref == NULL) {
- pr_err("transaction release %d bad handle %d\n",
- debug_id, fp->handle);
+ ret = binder_dec_ref_for_handle(proc, fp->handle,
+ hdr->type == BINDER_TYPE_HANDLE, &rdata);
+
+ if (ret) {
+ pr_err("transaction release %d bad handle %d, ret = %d\n",
+ debug_id, fp->handle, ret);
break;
}
binder_debug(BINDER_DEBUG_TRANSACTION,
- " ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, ref->node->debug_id);
- binder_dec_ref(ref, hdr->type == BINDER_TYPE_HANDLE);
+ " ref %d desc %d\n",
+ rdata.debug_id, rdata.desc);
} break;
case BINDER_TYPE_FD: {
@@ -1532,7 +2201,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
* back to kernel address space to access it
*/
parent_buffer = parent->buffer -
- proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &proc->alloc);
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -1564,102 +2234,122 @@ static int binder_translate_binder(struct flat_binder_object *fp,
struct binder_thread *thread)
{
struct binder_node *node;
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_ref_data rdata;
+ int ret = 0;
node = binder_get_node(proc, fp->binder);
if (!node) {
- node = binder_new_node(proc, fp->binder, fp->cookie);
+ node = binder_new_node(proc, fp);
if (!node)
return -ENOMEM;
-
- node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
- node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
}
if (fp->cookie != node->cookie) {
binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid, (u64)fp->binder,
node->debug_id, (u64)fp->cookie,
(u64)node->cookie);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
+ }
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
- ref = binder_get_ref_for_node(target_proc, node);
- if (!ref)
- return -EINVAL;
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ &thread->todo, &rdata);
+ if (ret)
+ goto done;
if (fp->hdr.type == BINDER_TYPE_BINDER)
fp->hdr.type = BINDER_TYPE_HANDLE;
else
fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
fp->binder = 0;
- fp->handle = ref->desc;
+ fp->handle = rdata.desc;
fp->cookie = 0;
- binder_inc_ref(ref, fp->hdr.type == BINDER_TYPE_HANDLE, &thread->todo);
- trace_binder_transaction_node_to_ref(t, node, ref);
+ trace_binder_transaction_node_to_ref(t, node, &rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" node %d u%016llx -> ref %d desc %d\n",
node->debug_id, (u64)node->ptr,
- ref->debug_id, ref->desc);
-
- return 0;
+ rdata.debug_id, rdata.desc);
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_handle(struct flat_binder_object *fp,
struct binder_transaction *t,
struct binder_thread *thread)
{
- struct binder_ref *ref;
struct binder_proc *proc = thread->proc;
struct binder_proc *target_proc = t->to_proc;
+ struct binder_node *node;
+ struct binder_ref_data src_rdata;
+ int ret = 0;
- ref = binder_get_ref(proc, fp->handle,
- fp->hdr.type == BINDER_TYPE_HANDLE);
- if (!ref) {
+ node = binder_get_node_from_ref(proc, fp->handle,
+ fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
+ if (!node) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk))
- return -EPERM;
+ if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ ret = -EPERM;
+ goto done;
+ }
- if (ref->node->proc == target_proc) {
+ binder_node_lock(node);
+ if (node->proc == target_proc) {
if (fp->hdr.type == BINDER_TYPE_HANDLE)
fp->hdr.type = BINDER_TYPE_BINDER;
else
fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
- fp->binder = ref->node->ptr;
- fp->cookie = ref->node->cookie;
- binder_inc_node(ref->node, fp->hdr.type == BINDER_TYPE_BINDER,
- 0, NULL);
- trace_binder_transaction_ref_to_node(t, ref);
+ fp->binder = node->ptr;
+ fp->cookie = node->cookie;
+ if (node->proc)
+ binder_inner_proc_lock(node->proc);
+ binder_inc_node_nilocked(node,
+ fp->hdr.type == BINDER_TYPE_BINDER,
+ 0, NULL);
+ if (node->proc)
+ binder_inner_proc_unlock(node->proc);
+ trace_binder_transaction_ref_to_node(t, node, &src_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> node %d u%016llx\n",
- ref->debug_id, ref->desc, ref->node->debug_id,
- (u64)ref->node->ptr);
+ src_rdata.debug_id, src_rdata.desc, node->debug_id,
+ (u64)node->ptr);
+ binder_node_unlock(node);
} else {
- struct binder_ref *new_ref;
+ int ret;
+ struct binder_ref_data dest_rdata;
- new_ref = binder_get_ref_for_node(target_proc, ref->node);
- if (!new_ref)
- return -EINVAL;
+ binder_node_unlock(node);
+ ret = binder_inc_ref_for_node(target_proc, node,
+ fp->hdr.type == BINDER_TYPE_HANDLE,
+ NULL, &dest_rdata);
+ if (ret)
+ goto done;
fp->binder = 0;
- fp->handle = new_ref->desc;
+ fp->handle = dest_rdata.desc;
fp->cookie = 0;
- binder_inc_ref(new_ref, fp->hdr.type == BINDER_TYPE_HANDLE,
- NULL);
- trace_binder_transaction_ref_to_ref(t, ref, new_ref);
+ trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
+ &dest_rdata);
binder_debug(BINDER_DEBUG_TRANSACTION,
" ref %d desc %d -> ref %d desc %d (node %d)\n",
- ref->debug_id, ref->desc, new_ref->debug_id,
- new_ref->desc, ref->node->debug_id);
+ src_rdata.debug_id, src_rdata.desc,
+ dest_rdata.debug_id, dest_rdata.desc,
+ node->debug_id);
}
- return 0;
+done:
+ binder_put_node(node);
+ return ret;
}
static int binder_translate_fd(int fd,
@@ -1750,7 +2440,8 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
* Since the parent was already fixed up, convert it
* back to the kernel address space to access it
*/
- parent_buffer = parent->buffer - target_proc->user_buffer_offset;
+ parent_buffer = parent->buffer -
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
fd_array = (u32 *)(parent_buffer + fda->parent_offset);
if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
@@ -1818,12 +2509,80 @@ static int binder_fixup_parent(struct binder_transaction *t,
return -EINVAL;
}
parent_buffer = (u8 *)(parent->buffer -
- target_proc->user_buffer_offset);
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc));
*(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
return 0;
}
+/**
+ * binder_proc_transaction() - sends a transaction to a process and wakes it up
+ * @t: transaction to send
+ * @proc: process to send the transaction to
+ * @thread: thread in @proc to send the transaction to (may be NULL)
+ *
+ * This function queues a transaction to the specified process. It will try
+ * to find a thread in the target process to handle the transaction and
+ * wake it up. If no thread is found, the work is queued to the proc
+ * waitqueue.
+ *
+ * If the @thread parameter is not NULL, the transaction is always queued
+ * to the waitlist of that specific thread.
+ *
+ * Return: true if the transactions was successfully queued
+ * false if the target process or thread is dead
+ */
+static bool binder_proc_transaction(struct binder_transaction *t,
+ struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct list_head *target_list = NULL;
+ struct binder_node *node = t->buffer->target_node;
+ bool oneway = !!(t->flags & TF_ONE_WAY);
+ bool wakeup = true;
+
+ BUG_ON(!node);
+ binder_node_lock(node);
+ if (oneway) {
+ BUG_ON(thread);
+ if (node->has_async_transaction) {
+ target_list = &node->async_todo;
+ wakeup = false;
+ } else {
+ node->has_async_transaction = 1;
+ }
+ }
+
+ binder_inner_proc_lock(proc);
+
+ if (proc->is_dead || (thread && thread->is_dead)) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ return false;
+ }
+
+ if (!thread && !target_list)
+ thread = binder_select_thread_ilocked(proc);
+
+ if (thread)
+ target_list = &thread->todo;
+ else if (!target_list)
+ target_list = &proc->todo;
+ else
+ BUG_ON(target_list != &node->async_todo);
+
+ binder_enqueue_work_ilocked(&t->work, target_list);
+
+ if (wakeup)
+ binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
+
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+
+ return true;
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -1835,19 +2594,21 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
- struct binder_proc *target_proc;
+ struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
- struct list_head *target_list;
- wait_queue_head_t *target_wait;
struct binder_transaction *in_reply_to = NULL;
struct binder_transaction_log_entry *e;
- uint32_t return_error;
+ uint32_t return_error = 0;
+ uint32_t return_error_param = 0;
+ uint32_t return_error_line = 0;
struct binder_buffer_object *last_fixup_obj = NULL;
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
+ int t_debug_id = atomic_inc_return(&binder_last_id);
e = binder_transaction_log_add(&binder_transaction_log);
+ e->debug_id = t_debug_id;
e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
e->from_proc = proc->pid;
e->from_thread = thread->pid;
@@ -1857,29 +2618,40 @@ static void binder_transaction(struct binder_proc *proc,
e->context_name = proc->context->name;
if (reply) {
+ binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
+ binder_inner_proc_unlock(proc);
binder_user_error("%d:%d got reply transaction with no transaction stack\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_empty_call_stack;
}
- binder_set_nice(in_reply_to->saved_priority);
if (in_reply_to->to_thread != thread) {
+ spin_lock(&in_reply_to->lock);
binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, in_reply_to->debug_id,
in_reply_to->to_proc ?
in_reply_to->to_proc->pid : 0,
in_reply_to->to_thread ?
in_reply_to->to_thread->pid : 0);
+ spin_unlock(&in_reply_to->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
goto err_bad_call_stack;
}
thread->transaction_stack = in_reply_to->to_parent;
- target_thread = in_reply_to->from;
+ binder_inner_proc_unlock(proc);
+ binder_set_nice(in_reply_to->saved_priority);
+ target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
if (target_thread->transaction_stack != in_reply_to) {
@@ -1888,89 +2660,137 @@ static void binder_transaction(struct binder_proc *proc,
target_thread->transaction_stack ?
target_thread->transaction_stack->debug_id : 0,
in_reply_to->debug_id);
+ binder_inner_proc_unlock(target_thread->proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
in_reply_to = NULL;
target_thread = NULL;
goto err_dead_binder;
}
target_proc = target_thread->proc;
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_thread->proc);
} else {
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle, true);
- if (ref == NULL) {
+ /*
+ * There must already be a strong ref
+ * on this node. If so, do a strong
+ * increment on the node to ensure it
+ * stays alive until the transaction is
+ * done.
+ */
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, tr->target.handle,
+ true);
+ if (ref) {
+ binder_inc_node(ref->node, 1, 0, NULL);
+ target_node = ref->node;
+ }
+ binder_proc_unlock(proc);
+ if (target_node == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- target_node = ref->node;
} else {
+ mutex_lock(&context->context_mgr_node_lock);
target_node = context->binder_context_mgr_node;
if (target_node == NULL) {
return_error = BR_DEAD_REPLY;
+ mutex_unlock(&context->context_mgr_node_lock);
+ return_error_line = __LINE__;
goto err_no_context_mgr_node;
}
+ binder_inc_node(target_node, 1, 0, NULL);
+ mutex_unlock(&context->context_mgr_node_lock);
}
e->to_node = target_node->debug_id;
+ binder_node_lock(target_node);
target_proc = target_node->proc;
if (target_proc == NULL) {
+ binder_node_unlock(target_node);
return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
goto err_dead_binder;
}
+ binder_inner_proc_lock(target_proc);
+ target_proc->tmp_ref++;
+ binder_inner_proc_unlock(target_proc);
+ binder_node_unlock(target_node);
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPERM;
+ return_error_line = __LINE__;
goto err_invalid_target_handle;
}
+ binder_inner_proc_lock(proc);
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
+ spin_lock(&tmp->lock);
binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
proc->pid, thread->pid, tmp->debug_id,
tmp->to_proc ? tmp->to_proc->pid : 0,
tmp->to_thread ?
tmp->to_thread->pid : 0);
+ spin_unlock(&tmp->lock);
+ binder_inner_proc_unlock(proc);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EPROTO;
+ return_error_line = __LINE__;
goto err_bad_call_stack;
}
while (tmp) {
- if (tmp->from && tmp->from->proc == target_proc)
- target_thread = tmp->from;
+ struct binder_thread *from;
+
+ spin_lock(&tmp->lock);
+ from = tmp->from;
+ if (from && from->proc == target_proc) {
+ atomic_inc(&from->tmp_ref);
+ target_thread = from;
+ spin_unlock(&tmp->lock);
+ break;
+ }
+ spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
+ binder_inner_proc_unlock(proc);
}
- if (target_thread) {
+ if (target_thread)
e->to_thread = target_thread->pid;
- target_list = &target_thread->todo;
- target_wait = &target_thread->wait;
- } else {
- target_list = &target_proc->todo;
- target_wait = &target_proc->wait;
- }
e->to_proc = target_proc->pid;
/* TODO: reuse incoming transaction for reply */
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (t == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_t_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
goto err_alloc_tcomplete_failed;
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = ++binder_last_id;
- e->debug_id = t->debug_id;
+ t->debug_id = t_debug_id;
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
@@ -2004,11 +2824,18 @@ static void binder_transaction(struct binder_proc *proc,
trace_binder_transaction(reply, t, target_node);
- t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
- if (t->buffer == NULL) {
- return_error = BR_FAILED_REPLY;
+ if (IS_ERR(t->buffer)) {
+ /*
+ * -ESRCH indicates VMA cleared. The target is dying.
+ */
+ return_error_param = PTR_ERR(t->buffer);
+ return_error = return_error_param == -ESRCH ?
+ BR_DEAD_REPLY : BR_FAILED_REPLY;
+ return_error_line = __LINE__;
+ t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
t->buffer->allow_user_free = 0;
@@ -2016,9 +2843,6 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = t;
t->buffer->target_node = target_node;
trace_binder_transaction_alloc_buf(t->buffer);
- if (target_node)
- binder_inc_node(target_node, 1, 0, NULL);
-
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
@@ -2028,6 +2852,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid data ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (copy_from_user(offp, (const void __user *)(uintptr_t)
@@ -2035,12 +2861,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
proc->pid, thread->pid, (u64)tr->offsets_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
@@ -2048,6 +2878,8 @@ static void binder_transaction(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)extra_buffers_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
off_end = (void *)off_start + tr->offsets_size;
@@ -2064,6 +2896,8 @@ static void binder_transaction(struct binder_proc *proc,
(u64)off_min,
(u64)t->buffer->data_size);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
@@ -2078,6 +2912,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_binder(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2089,6 +2925,8 @@ static void binder_transaction(struct binder_proc *proc,
ret = binder_translate_handle(fp, t, thread);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
} break;
@@ -2100,6 +2938,8 @@ static void binder_transaction(struct binder_proc *proc,
if (target_fd < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = target_fd;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
fp->pad_binder = 0;
@@ -2116,6 +2956,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
if (!binder_validate_fixup(t->buffer, off_start,
@@ -2125,12 +2967,16 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_parent;
}
ret = binder_translate_fd_array(fda, parent, t, thread,
in_reply_to);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = parent;
@@ -2146,6 +2992,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with too large buffer\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_offset;
}
if (copy_from_user(sg_bufp,
@@ -2153,12 +3001,15 @@ static void binder_transaction(struct binder_proc *proc,
bp->buffer, bp->length)) {
binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
proc->pid, thread->pid);
+ return_error_param = -EFAULT;
return_error = BR_FAILED_REPLY;
+ return_error_line = __LINE__;
goto err_copy_data_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)sg_bufp +
- target_proc->user_buffer_offset;
+ binder_alloc_get_user_buffer_offset(
+ &target_proc->alloc);
sg_bufp += ALIGN(bp->length, sizeof(u64));
ret = binder_fixup_parent(t, thread, bp, off_start,
@@ -2167,6 +3018,8 @@ static void binder_transaction(struct binder_proc *proc,
last_fixup_min_off);
if (ret < 0) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
goto err_translate_failed;
}
last_fixup_obj = bp;
@@ -2176,38 +3029,60 @@ static void binder_transaction(struct binder_proc *proc,
binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, hdr->type);
return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
goto err_bad_object_type;
}
}
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ binder_enqueue_work(proc, tcomplete, &thread->todo);
+ t->work.type = BINDER_WORK_TRANSACTION;
+
if (reply) {
+ binder_inner_proc_lock(target_proc);
+ if (target_thread->is_dead) {
+ binder_inner_proc_unlock(target_proc);
+ goto err_dead_proc_or_thread;
+ }
BUG_ON(t->buffer->async_transaction != 0);
- binder_pop_transaction(target_thread, in_reply_to);
+ binder_pop_transaction_ilocked(target_thread, in_reply_to);
+ binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
+ binder_inner_proc_unlock(target_proc);
+ wake_up_interruptible_sync(&target_thread->wait);
+ binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
+ binder_inner_proc_lock(proc);
t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(proc);
+ if (!binder_proc_transaction(t, target_proc, target_thread)) {
+ binder_inner_proc_lock(proc);
+ binder_pop_transaction_ilocked(thread, t);
+ binder_inner_proc_unlock(proc);
+ goto err_dead_proc_or_thread;
+ }
} else {
BUG_ON(target_node == NULL);
BUG_ON(t->buffer->async_transaction != 1);
- if (target_node->has_async_transaction) {
- target_list = &target_node->async_todo;
- target_wait = NULL;
- } else
- target_node->has_async_transaction = 1;
- }
- t->work.type = BINDER_WORK_TRANSACTION;
- list_add_tail(&t->work.entry, target_list);
- tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- list_add_tail(&tcomplete->entry, &thread->todo);
- if (target_wait) {
- if (reply || !(t->flags & TF_ONE_WAY))
- wake_up_interruptible_sync(target_wait);
- else
- wake_up_interruptible(target_wait);
+ if (!binder_proc_transaction(t, target_proc, NULL))
+ goto err_dead_proc_or_thread;
}
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ binder_proc_dec_tmpref(target_proc);
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
return;
+err_dead_proc_or_thread:
+ return_error = BR_DEAD_REPLY;
+ return_error_line = __LINE__;
err_translate_failed:
err_bad_object_type:
err_bad_offset:
@@ -2215,8 +3090,9 @@ err_bad_parent:
err_copy_data_failed:
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ target_node = NULL;
t->buffer->transaction = NULL;
- binder_free_buf(target_proc, t->buffer);
+ binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
@@ -2229,24 +3105,49 @@ err_empty_call_stack:
err_dead_binder:
err_invalid_target_handle:
err_no_context_mgr_node:
+ if (target_thread)
+ binder_thread_dec_tmpref(target_thread);
+ if (target_proc)
+ binder_proc_dec_tmpref(target_proc);
+ if (target_node)
+ binder_dec_node(target_node, 1, 0);
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d transaction failed %d, size %lld-%lld\n",
- proc->pid, thread->pid, return_error,
- (u64)tr->data_size, (u64)tr->offsets_size);
+ "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
+ proc->pid, thread->pid, return_error, return_error_param,
+ (u64)tr->data_size, (u64)tr->offsets_size,
+ return_error_line);
{
struct binder_transaction_log_entry *fe;
+ e->return_error = return_error;
+ e->return_error_param = return_error_param;
+ e->return_error_line = return_error_line;
fe = binder_transaction_log_add(&binder_transaction_log_failed);
*fe = *e;
+ /*
+ * write barrier to synchronize with initialization
+ * of log entry
+ */
+ smp_wmb();
+ WRITE_ONCE(e->debug_id_done, t_debug_id);
+ WRITE_ONCE(fe->debug_id_done, t_debug_id);
}
- BUG_ON(thread->return_error != BR_OK);
+ BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
- thread->return_error = BR_TRANSACTION_COMPLETE;
+ thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
binder_send_failed_reply(in_reply_to, return_error);
- } else
- thread->return_error = return_error;
+ } else {
+ thread->return_error.cmd = return_error;
+ binder_enqueue_work(thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ }
}
static int binder_thread_write(struct binder_proc *proc,
@@ -2260,15 +3161,17 @@ static int binder_thread_write(struct binder_proc *proc,
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
- while (ptr < end && thread->return_error == BR_OK) {
+ while (ptr < end && thread->return_error.cmd == BR_OK) {
+ int ret;
+
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
- binder_stats.bc[_IOC_NR(cmd)]++;
- proc->stats.bc[_IOC_NR(cmd)]++;
- thread->stats.bc[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
@@ -2276,53 +3179,61 @@ static int binder_thread_write(struct binder_proc *proc,
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
- struct binder_ref *ref;
const char *debug_string;
+ bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
+ bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
+ struct binder_ref_data rdata;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
+
ptr += sizeof(uint32_t);
- if (target == 0 && context->binder_context_mgr_node &&
- (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
- ref = binder_get_ref_for_node(proc,
- context->binder_context_mgr_node);
- if (ref->desc != target) {
- binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
- proc->pid, thread->pid,
- ref->desc);
- }
- } else
- ref = binder_get_ref(proc, target,
- cmd == BC_ACQUIRE ||
- cmd == BC_RELEASE);
- if (ref == NULL) {
- binder_user_error("%d:%d refcount change on invalid ref %d\n",
- proc->pid, thread->pid, target);
- break;
+ ret = -1;
+ if (increment && !target) {
+ struct binder_node *ctx_mgr_node;
+ mutex_lock(&context->context_mgr_node_lock);
+ ctx_mgr_node = context->binder_context_mgr_node;
+ if (ctx_mgr_node)
+ ret = binder_inc_ref_for_node(
+ proc, ctx_mgr_node,
+ strong, NULL, &rdata);
+ mutex_unlock(&context->context_mgr_node_lock);
+ }
+ if (ret)
+ ret = binder_update_ref_for_handle(
+ proc, target, increment, strong,
+ &rdata);
+ if (!ret && rdata.desc != target) {
+ binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
+ proc->pid, thread->pid,
+ target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
- binder_inc_ref(ref, 0, NULL);
break;
case BC_ACQUIRE:
debug_string = "Acquire";
- binder_inc_ref(ref, 1, NULL);
break;
case BC_RELEASE:
debug_string = "Release";
- binder_dec_ref(ref, 1);
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
- binder_dec_ref(ref, 0);
+ break;
+ }
+ if (ret) {
+ binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
+ proc->pid, thread->pid, debug_string,
+ strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
- proc->pid, thread->pid, debug_string, ref->debug_id,
- ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ "%d:%d %s ref %d desc %d s %d w %d\n",
+ proc->pid, thread->pid, debug_string,
+ rdata.debug_id, rdata.desc, rdata.strong,
+ rdata.weak);
break;
}
case BC_INCREFS_DONE:
@@ -2330,6 +3241,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
+ bool free_node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
@@ -2354,13 +3266,17 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
+ binder_put_node(node);
break;
}
+ binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
@@ -2369,16 +3285,23 @@ static int binder_thread_write(struct binder_proc *proc,
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
- binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ free_node = binder_dec_node_nilocked(node,
+ cmd == BC_ACQUIRE_DONE, 0);
+ WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s node %d ls %d lw %d\n",
+ "%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
- node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ node->debug_id, node->local_strong_refs,
+ node->local_weak_refs, node->tmp_refs);
+ binder_node_inner_unlock(node);
+ binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
@@ -2396,7 +3319,8 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- buffer = binder_buffer_lookup(proc, data_ptr);
+ buffer = binder_alloc_prepare_to_free(&proc->alloc,
+ data_ptr);
if (buffer == NULL) {
binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid, (u64)data_ptr);
@@ -2418,15 +3342,27 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->transaction = NULL;
}
if (buffer->async_transaction && buffer->target_node) {
- BUG_ON(!buffer->target_node->has_async_transaction);
- if (list_empty(&buffer->target_node->async_todo))
- buffer->target_node->has_async_transaction = 0;
- else
- list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ struct binder_node *buf_node;
+ struct binder_work *w;
+
+ buf_node = buffer->target_node;
+ binder_node_inner_lock(buf_node);
+ BUG_ON(!buf_node->has_async_transaction);
+ BUG_ON(buf_node->proc != proc);
+ w = binder_dequeue_work_head_ilocked(
+ &buf_node->async_todo);
+ if (!w) {
+ buf_node->has_async_transaction = 0;
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ }
+ binder_node_inner_unlock(buf_node);
}
trace_binder_transaction_buffer_release(buffer);
binder_transaction_buffer_release(proc, buffer, NULL);
- binder_free_buf(proc, buffer);
+ binder_alloc_free_buf(&proc->alloc, buffer);
break;
}
@@ -2457,6 +3393,7 @@ static int binder_thread_write(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
+ binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
@@ -2470,6 +3407,7 @@ static int binder_thread_write(struct binder_proc *proc,
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
@@ -2494,7 +3432,7 @@ static int binder_thread_write(struct binder_proc *proc,
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
- struct binder_ref_death *death;
+ struct binder_ref_death *death = NULL;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2502,7 +3440,29 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target, false);
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ /*
+ * Allocate memory for death notification
+ * before taking lock
+ */
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ WARN_ON(thread->return_error.cmd !=
+ BR_OK);
+ thread->return_error.cmd = BR_ERROR;
+ binder_enqueue_work(
+ thread->proc,
+ &thread->return_error.work,
+ &thread->todo);
+ binder_debug(
+ BINDER_DEBUG_FAILED_TRANSACTION,
+ "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ }
+ binder_proc_lock(proc);
+ ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
@@ -2510,6 +3470,8 @@ static int binder_thread_write(struct binder_proc *proc,
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
@@ -2519,21 +3481,18 @@ static int binder_thread_write(struct binder_proc *proc,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
- (u64)cookie, ref->debug_id, ref->desc,
- ref->strong, ref->weak, ref->node->debug_id);
+ (u64)cookie, ref->data.debug_id,
+ ref->data.desc, ref->data.strong,
+ ref->data.weak, ref->node->debug_id);
+ binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
- break;
- }
- death = kzalloc(sizeof(*death), GFP_KERNEL);
- if (death == NULL) {
- thread->return_error = BR_ERROR;
- binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
- "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
- proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
+ kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
@@ -2542,17 +3501,19 @@ static int binder_thread_write(struct binder_proc *proc,
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&ref->death->work.entry, &thread->todo);
- } else {
- list_add_tail(&ref->death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
- }
+
+ binder_inner_proc_lock(proc);
+ binder_enqueue_work_ilocked(
+ &ref->death->work, &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
death = ref->death;
@@ -2561,22 +3522,35 @@ static int binder_thread_write(struct binder_proc *proc,
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
break;
}
ref->death = NULL;
+ binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(
+ proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
+ binder_inner_proc_unlock(proc);
}
+ binder_node_unlock(ref->node);
+ binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
@@ -2587,8 +3561,13 @@ static int binder_thread_write(struct binder_proc *proc,
return -EFAULT;
ptr += sizeof(cookie);
- list_for_each_entry(w, &proc->delivered_death, entry) {
- struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ binder_inner_proc_lock(proc);
+ list_for_each_entry(w, &proc->delivered_death,
+ entry) {
+ struct binder_ref_death *tmp_death =
+ container_of(w,
+ struct binder_ref_death,
+ work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
@@ -2602,19 +3581,25 @@ static int binder_thread_write(struct binder_proc *proc,
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
+ binder_inner_proc_unlock(proc);
break;
}
-
- list_del_init(&death->work.entry);
+ binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
- if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
- list_add_tail(&death->work.entry, &thread->todo);
- } else {
- list_add_tail(&death->work.entry, &proc->todo);
- wake_up_interruptible(&proc->wait);
+ if (thread->looper &
+ (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))
+ binder_enqueue_work_ilocked(
+ &death->work, &thread->todo);
+ else {
+ binder_enqueue_work_ilocked(
+ &death->work,
+ &proc->todo);
+ binder_wakeup_proc_ilocked(proc);
}
}
+ binder_inner_proc_unlock(proc);
} break;
default:
@@ -2632,23 +3617,79 @@ static void binder_stat_br(struct binder_proc *proc,
{
trace_binder_return(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
- binder_stats.br[_IOC_NR(cmd)]++;
- proc->stats.br[_IOC_NR(cmd)]++;
- thread->stats.br[_IOC_NR(cmd)]++;
+ atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
+ atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
}
}
-static int binder_has_proc_work(struct binder_proc *proc,
- struct binder_thread *thread)
+static int binder_has_thread_work(struct binder_thread *thread)
{
- return !list_empty(&proc->todo) ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ return !binder_worklist_empty(thread->proc, &thread->todo) ||
+ thread->looper_need_return;
}
-static int binder_has_thread_work(struct binder_thread *thread)
+static int binder_put_node_cmd(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user **ptrp,
+ binder_uintptr_t node_ptr,
+ binder_uintptr_t node_cookie,
+ int node_debug_id,
+ uint32_t cmd, const char *cmd_name)
{
- return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
- (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+ void __user *ptr = *ptrp;
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
+ proc->pid, thread->pid, cmd_name, node_debug_id,
+ (u64)node_ptr, (u64)node_cookie);
+
+ *ptrp = ptr;
+ return 0;
+}
+
+static int binder_wait_for_work(struct binder_thread *thread,
+ bool do_proc_work)
+{
+ DEFINE_WAIT(wait);
+ struct binder_proc *proc = thread->proc;
+ int ret = 0;
+
+ freezer_do_not_count();
+ binder_inner_proc_lock(proc);
+ for (;;) {
+ prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
+ if (binder_has_work_ilocked(thread, do_proc_work))
+ break;
+ if (do_proc_work)
+ list_add(&thread->waiting_thread_node,
+ &proc->waiting_threads);
+ binder_inner_proc_unlock(proc);
+ schedule();
+ binder_inner_proc_lock(proc);
+ list_del_init(&thread->waiting_thread_node);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ }
+ finish_wait(&thread->wait, &wait);
+ binder_inner_proc_unlock(proc);
+ freezer_count();
+
+ return ret;
}
static int binder_thread_read(struct binder_proc *proc,
@@ -2670,37 +3711,15 @@ static int binder_thread_read(struct binder_proc *proc,
}
retry:
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo);
-
- if (thread->return_error != BR_OK && ptr < end) {
- if (thread->return_error2 != BR_OK) {
- if (put_user(thread->return_error2, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error2);
- if (ptr == end)
- goto done;
- thread->return_error2 = BR_OK;
- }
- if (put_user(thread->return_error, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- binder_stat_br(proc, thread, thread->return_error);
- thread->return_error = BR_OK;
- goto done;
- }
-
+ binder_inner_proc_lock(proc);
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
+ binder_inner_proc_unlock(proc);
thread->looper |= BINDER_LOOPER_STATE_WAITING;
- if (wait_for_proc_work)
- proc->ready_threads++;
-
- binder_unlock(__func__);
trace_binder_wait_for_work(wait_for_proc_work,
!!thread->transaction_stack,
- !list_empty(&thread->todo));
+ !binder_worklist_empty(proc, &thread->todo));
if (wait_for_proc_work) {
if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))) {
@@ -2710,23 +3729,15 @@ retry:
binder_stop_on_user_error < 2);
}
binder_set_nice(proc->default_priority);
- if (non_block) {
- if (!binder_has_proc_work(proc, thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
- } else {
- if (non_block) {
- if (!binder_has_thread_work(thread))
- ret = -EAGAIN;
- } else
- ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
}
- binder_lock(__func__);
+ if (non_block) {
+ if (!binder_has_work(thread, wait_for_proc_work))
+ ret = -EAGAIN;
+ } else {
+ ret = binder_wait_for_work(thread, wait_for_proc_work);
+ }
- if (wait_for_proc_work)
- proc->ready_threads--;
thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
if (ret)
@@ -2735,31 +3746,52 @@ retry:
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
- struct binder_work *w;
+ struct binder_work *w = NULL;
+ struct list_head *list = NULL;
struct binder_transaction *t = NULL;
+ struct binder_thread *t_from;
+
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&thread->todo))
+ list = &thread->todo;
+ else if (!binder_worklist_empty_ilocked(&proc->todo) &&
+ wait_for_proc_work)
+ list = &proc->todo;
+ else {
+ binder_inner_proc_unlock(proc);
- if (!list_empty(&thread->todo)) {
- w = list_first_entry(&thread->todo, struct binder_work,
- entry);
- } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
- w = list_first_entry(&proc->todo, struct binder_work,
- entry);
- } else {
/* no data added */
- if (ptr - buffer == 4 &&
- !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+ if (ptr - buffer == 4 && !thread->looper_need_return)
goto retry;
break;
}
- if (end - ptr < sizeof(tr) + 4)
+ if (end - ptr < sizeof(tr) + 4) {
+ binder_inner_proc_unlock(proc);
break;
+ }
+ w = binder_dequeue_work_head_ilocked(list);
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
+ binder_inner_proc_unlock(proc);
t = container_of(w, struct binder_transaction, work);
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ WARN_ON(e->cmd == BR_OK);
+ binder_inner_proc_unlock(proc);
+ if (put_user(e->cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ e->cmd = BR_OK;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
+ binder_inner_proc_unlock(proc);
cmd = BR_TRANSACTION_COMPLETE;
if (put_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
@@ -2769,113 +3801,134 @@ retry:
binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
"%d:%d BR_TRANSACTION_COMPLETE\n",
proc->pid, thread->pid);
-
- list_del(&w->entry);
kfree(w);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
} break;
case BINDER_WORK_NODE: {
struct binder_node *node = container_of(w, struct binder_node, work);
- uint32_t cmd = BR_NOOP;
- const char *cmd_name;
- int strong = node->internal_strong_refs || node->local_strong_refs;
- int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-
- if (weak && !node->has_weak_ref) {
- cmd = BR_INCREFS;
- cmd_name = "BR_INCREFS";
+ int strong, weak;
+ binder_uintptr_t node_ptr = node->ptr;
+ binder_uintptr_t node_cookie = node->cookie;
+ int node_debug_id = node->debug_id;
+ int has_weak_ref;
+ int has_strong_ref;
+ void __user *orig_ptr = ptr;
+
+ BUG_ON(proc != node->proc);
+ strong = node->internal_strong_refs ||
+ node->local_strong_refs;
+ weak = !hlist_empty(&node->refs) ||
+ node->local_weak_refs ||
+ node->tmp_refs || strong;
+ has_strong_ref = node->has_strong_ref;
+ has_weak_ref = node->has_weak_ref;
+
+ if (weak && !has_weak_ref) {
node->has_weak_ref = 1;
node->pending_weak_ref = 1;
node->local_weak_refs++;
- } else if (strong && !node->has_strong_ref) {
- cmd = BR_ACQUIRE;
- cmd_name = "BR_ACQUIRE";
+ }
+ if (strong && !has_strong_ref) {
node->has_strong_ref = 1;
node->pending_strong_ref = 1;
node->local_strong_refs++;
- } else if (!strong && node->has_strong_ref) {
- cmd = BR_RELEASE;
- cmd_name = "BR_RELEASE";
+ }
+ if (!strong && has_strong_ref)
node->has_strong_ref = 0;
- } else if (!weak && node->has_weak_ref) {
- cmd = BR_DECREFS;
- cmd_name = "BR_DECREFS";
+ if (!weak && has_weak_ref)
node->has_weak_ref = 0;
- }
- if (cmd != BR_NOOP) {
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(node->ptr,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- if (put_user(node->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
-
- binder_stat_br(proc, thread, cmd);
- binder_debug(BINDER_DEBUG_USER_REFS,
- "%d:%d %s %d u%016llx c%016llx\n",
- proc->pid, thread->pid, cmd_name,
- node->debug_id,
- (u64)node->ptr, (u64)node->cookie);
- } else {
- list_del_init(&w->entry);
- if (!weak && !strong) {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx deleted\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- rb_erase(&node->rb_node, &proc->nodes);
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
- } else {
- binder_debug(BINDER_DEBUG_INTERNAL_REFS,
- "%d:%d node %d u%016llx c%016llx state unchanged\n",
- proc->pid, thread->pid,
- node->debug_id,
- (u64)node->ptr,
- (u64)node->cookie);
- }
- }
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx deleted\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
+ binder_node_lock(node);
+ /*
+ * Acquire the node lock before freeing the
+ * node to serialize with other threads that
+ * may have been holding the node lock while
+ * decrementing this node (avoids race where
+ * this thread frees while the other thread
+ * is unlocking the node after the final
+ * decrement)
+ */
+ binder_node_unlock(node);
+ binder_free_node(node);
+ } else
+ binder_inner_proc_unlock(proc);
+
+ if (weak && !has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_INCREFS, "BR_INCREFS");
+ if (!ret && strong && !has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_ACQUIRE, "BR_ACQUIRE");
+ if (!ret && !strong && has_strong_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_RELEASE, "BR_RELEASE");
+ if (!ret && !weak && has_weak_ref)
+ ret = binder_put_node_cmd(
+ proc, thread, &ptr, node_ptr,
+ node_cookie, node_debug_id,
+ BR_DECREFS, "BR_DECREFS");
+ if (orig_ptr == ptr)
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "%d:%d node %d u%016llx c%016llx state unchanged\n",
+ proc->pid, thread->pid,
+ node_debug_id,
+ (u64)node_ptr,
+ (u64)node_cookie);
+ if (ret)
+ return ret;
} break;
case BINDER_WORK_DEAD_BINDER:
case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
struct binder_ref_death *death;
uint32_t cmd;
+ binder_uintptr_t cookie;
death = container_of(w, struct binder_ref_death, work);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
else
cmd = BR_DEAD_BINDER;
- if (put_user(cmd, (uint32_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(uint32_t);
- if (put_user(death->cookie,
- (binder_uintptr_t __user *)ptr))
- return -EFAULT;
- ptr += sizeof(binder_uintptr_t);
- binder_stat_br(proc, thread, cmd);
+ cookie = death->cookie;
+
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx\n",
proc->pid, thread->pid,
cmd == BR_DEAD_BINDER ?
"BR_DEAD_BINDER" :
"BR_CLEAR_DEATH_NOTIFICATION_DONE",
- (u64)death->cookie);
-
+ (u64)cookie);
if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
- list_del(&w->entry);
+ binder_inner_proc_unlock(proc);
kfree(death);
binder_stats_deleted(BINDER_STAT_DEATH);
- } else
- list_move(&w->entry, &proc->delivered_death);
+ } else {
+ binder_enqueue_work_ilocked(
+ w, &proc->delivered_death);
+ binder_inner_proc_unlock(proc);
+ }
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(cookie,
+ (binder_uintptr_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(binder_uintptr_t);
+ binder_stat_br(proc, thread, cmd);
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
@@ -2907,8 +3960,9 @@ retry:
tr.flags = t->flags;
tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
- if (t->from) {
- struct task_struct *sender = t->from->proc->tsk;
+ t_from = binder_get_txn_from(t);
+ if (t_from) {
+ struct task_struct *sender = t_from->proc->tsk;
tr.sender_pid = task_tgid_nr_ns(sender,
task_active_pid_ns(current));
@@ -2918,18 +3972,24 @@ retry:
tr.data_size = t->buffer->data_size;
tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)(
- (uintptr_t)t->buffer->data +
- proc->user_buffer_offset);
+ tr.data.ptr.buffer = (binder_uintptr_t)
+ ((uintptr_t)t->buffer->data +
+ binder_alloc_get_user_buffer_offset(&proc->alloc));
tr.data.ptr.offsets = tr.data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
- if (put_user(cmd, (uint32_t __user *)ptr))
+ if (put_user(cmd, (uint32_t __user *)ptr)) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr)))
+ if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
return -EFAULT;
+ }
ptr += sizeof(tr);
trace_binder_transaction_received(t);
@@ -2939,21 +3999,22 @@ retry:
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
"BR_REPLY",
- t->debug_id, t->from ? t->from->proc->pid : 0,
- t->from ? t->from->pid : 0, cmd,
+ t->debug_id, t_from ? t_from->proc->pid : 0,
+ t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
(u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
- list_del(&t->work.entry);
+ if (t_from)
+ binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
thread->transaction_stack = t;
+ binder_inner_proc_unlock(thread->proc);
} else {
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
break;
}
@@ -2961,29 +4022,36 @@ retry:
done:
*consumed = ptr - buffer;
- if (proc->requested_threads + proc->ready_threads == 0 &&
+ binder_inner_proc_lock(proc);
+ if (proc->requested_threads == 0 &&
+ list_empty(&thread->proc->waiting_threads) &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
proc->requested_threads++;
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BR_SPAWN_LOOPER\n",
proc->pid, thread->pid);
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
- }
+ } else
+ binder_inner_proc_unlock(proc);
return 0;
}
-static void binder_release_work(struct list_head *list)
+static void binder_release_work(struct binder_proc *proc,
+ struct list_head *list)
{
struct binder_work *w;
- while (!list_empty(list)) {
- w = list_first_entry(list, struct binder_work, entry);
- list_del_init(&w->entry);
+ while (1) {
+ w = binder_dequeue_work_head(proc, list);
+ if (!w)
+ return;
+
switch (w->type) {
case BINDER_WORK_TRANSACTION: {
struct binder_transaction *t;
@@ -2996,11 +4064,17 @@ static void binder_release_work(struct list_head *list)
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered transaction %d\n",
t->debug_id);
- t->buffer->transaction = NULL;
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ binder_free_transaction(t);
}
} break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "undelivered TRANSACTION_ERROR: %u\n",
+ e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE: {
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"undelivered TRANSACTION_COMPLETE\n");
@@ -3027,7 +4101,8 @@ static void binder_release_work(struct list_head *list)
}
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+static struct binder_thread *binder_get_thread_ilocked(
+ struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
@@ -3042,38 +4117,99 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
- break;
+ return thread;
}
- if (*p == NULL) {
- thread = kzalloc(sizeof(*thread), GFP_KERNEL);
- if (thread == NULL)
+ if (!new_thread)
+ return NULL;
+ thread = new_thread;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ atomic_set(&thread->tmp_ref, 0);
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper_need_return = true;
+ thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->return_error.cmd = BR_OK;
+ thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
+ thread->reply_error.cmd = BR_OK;
+ INIT_LIST_HEAD(&new_thread->waiting_thread_node);
+ return thread;
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread;
+ struct binder_thread *new_thread;
+
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, NULL);
+ binder_inner_proc_unlock(proc);
+ if (!thread) {
+ new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (new_thread == NULL)
return NULL;
- binder_stats_created(BINDER_STAT_THREAD);
- thread->proc = proc;
- thread->pid = current->pid;
- init_waitqueue_head(&thread->wait);
- INIT_LIST_HEAD(&thread->todo);
- rb_link_node(&thread->rb_node, parent, p);
- rb_insert_color(&thread->rb_node, &proc->threads);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
- thread->return_error = BR_OK;
- thread->return_error2 = BR_OK;
+ binder_inner_proc_lock(proc);
+ thread = binder_get_thread_ilocked(proc, new_thread);
+ binder_inner_proc_unlock(proc);
+ if (thread != new_thread)
+ kfree(new_thread);
}
return thread;
}
-static int binder_free_thread(struct binder_proc *proc,
- struct binder_thread *thread)
+static void binder_free_proc(struct binder_proc *proc)
+{
+ BUG_ON(!list_empty(&proc->todo));
+ BUG_ON(!list_empty(&proc->delivered_death));
+ binder_alloc_deferred_release(&proc->alloc);
+ put_task_struct(proc->tsk);
+ binder_stats_deleted(BINDER_STAT_PROC);
+ kfree(proc);
+}
+
+static void binder_free_thread(struct binder_thread *thread)
+{
+ BUG_ON(!list_empty(&thread->todo));
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_proc_dec_tmpref(thread->proc);
+ kfree(thread);
+}
+
+static int binder_thread_release(struct binder_proc *proc,
+ struct binder_thread *thread)
{
struct binder_transaction *t;
struct binder_transaction *send_reply = NULL;
int active_transactions = 0;
+ struct binder_transaction *last_t = NULL;
+ binder_inner_proc_lock(thread->proc);
+ /*
+ * take a ref on the proc so it survives
+ * after we remove this thread from proc->threads.
+ * The corresponding dec is when we actually
+ * free the thread in binder_free_thread()
+ */
+ proc->tmp_ref++;
+ /*
+ * take a ref on this thread to ensure it
+ * survives while we are releasing it
+ */
+ atomic_inc(&thread->tmp_ref);
rb_erase(&thread->rb_node, &proc->threads);
t = thread->transaction_stack;
- if (t && t->to_thread == thread)
- send_reply = t;
+ if (t) {
+ spin_lock(&t->lock);
+ if (t->to_thread == thread)
+ send_reply = t;
+ }
+ thread->is_dead = true;
+
while (t) {
+ last_t = t;
active_transactions++;
binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
"release %d:%d transaction %d %s, still active\n",
@@ -3094,12 +4230,16 @@ static int binder_free_thread(struct binder_proc *proc,
t = t->from_parent;
} else
BUG();
+ spin_unlock(&last_t->lock);
+ if (t)
+ spin_lock(&t->lock);
}
+ binder_inner_proc_unlock(thread->proc);
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
- binder_release_work(&thread->todo);
- kfree(thread);
- binder_stats_deleted(BINDER_STAT_THREAD);
+ binder_release_work(proc, &thread->todo);
+ binder_thread_dec_tmpref(thread);
return active_transactions;
}
@@ -3108,30 +4248,24 @@ static unsigned int binder_poll(struct file *filp,
{
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread = NULL;
- int wait_for_proc_work;
-
- binder_lock(__func__);
+ bool wait_for_proc_work;
thread = binder_get_thread(proc);
- wait_for_proc_work = thread->transaction_stack == NULL &&
- list_empty(&thread->todo) && thread->return_error == BR_OK;
+ binder_inner_proc_lock(thread->proc);
+ thread->looper |= BINDER_LOOPER_STATE_POLL;
+ wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
- binder_unlock(__func__);
+ binder_inner_proc_unlock(thread->proc);
+
+ if (binder_has_work(thread, wait_for_proc_work))
+ return POLLIN;
+
+ poll_wait(filp, &thread->wait, wait);
+
+ if (binder_has_thread_work(thread))
+ return POLLIN;
- if (wait_for_proc_work) {
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- poll_wait(filp, &proc->wait, wait);
- if (binder_has_proc_work(proc, thread))
- return POLLIN;
- } else {
- if (binder_has_thread_work(thread))
- return POLLIN;
- poll_wait(filp, &thread->wait, wait);
- if (binder_has_thread_work(thread))
- return POLLIN;
- }
return 0;
}
@@ -3178,8 +4312,10 @@ static int binder_ioctl_write_read(struct file *filp,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
- if (!list_empty(&proc->todo))
- wake_up_interruptible(&proc->wait);
+ binder_inner_proc_lock(proc);
+ if (!binder_worklist_empty_ilocked(&proc->todo))
+ binder_wakeup_proc_ilocked(proc);
+ binder_inner_proc_unlock(proc);
if (ret < 0) {
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
@@ -3204,9 +4340,10 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
int ret = 0;
struct binder_proc *proc = filp->private_data;
struct binder_context *context = proc->context;
-
+ struct binder_node *new_node;
kuid_t curr_euid = current_euid();
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node) {
pr_err("BINDER_SET_CONTEXT_MGR already set\n");
ret = -EBUSY;
@@ -3227,19 +4364,49 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- context->binder_context_mgr_node = binder_new_node(proc, 0, 0);
- if (!context->binder_context_mgr_node) {
+ new_node = binder_new_node(proc, NULL);
+ if (!new_node) {
ret = -ENOMEM;
goto out;
}
- context->binder_context_mgr_node->local_weak_refs++;
- context->binder_context_mgr_node->local_strong_refs++;
- context->binder_context_mgr_node->has_strong_ref = 1;
- context->binder_context_mgr_node->has_weak_ref = 1;
+ binder_node_lock(new_node);
+ new_node->local_weak_refs++;
+ new_node->local_strong_refs++;
+ new_node->has_strong_ref = 1;
+ new_node->has_weak_ref = 1;
+ context->binder_context_mgr_node = new_node;
+ binder_node_unlock(new_node);
+ binder_put_node(new_node);
out:
+ mutex_unlock(&context->context_mgr_node_lock);
return ret;
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info)
+{
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_inner_proc_lock(proc);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_inner_proc_unlock(proc);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -3251,13 +4418,14 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
- binder_lock(__func__);
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
@@ -3270,12 +4438,19 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (ret)
goto err;
break;
- case BINDER_SET_MAX_THREADS:
- if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ case BINDER_SET_MAX_THREADS: {
+ int max_threads;
+
+ if (copy_from_user(&max_threads, ubuf,
+ sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
+ binder_inner_proc_lock(proc);
+ proc->max_threads = max_threads;
+ binder_inner_proc_unlock(proc);
break;
+ }
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp);
if (ret)
@@ -3284,7 +4459,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
- binder_free_thread(proc, thread);
+ binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
@@ -3301,6 +4476,24 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
break;
}
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -3308,8 +4501,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
ret = 0;
err:
if (thread)
- thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
- binder_unlock(__func__);
+ thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -ERESTARTSYS)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
@@ -3338,8 +4530,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
- proc->vma = NULL;
- proc->vma_vm_mm = NULL;
+ binder_alloc_vma_close(&proc->alloc);
binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
}
@@ -3357,20 +4548,18 @@ static const struct vm_operations_struct binder_vm_ops = {
static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
- struct vm_struct *area;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
- struct binder_buffer *buffer;
- if (proc->tsk != current)
+ if (proc->tsk != current->group_leader)
return -EINVAL;
if ((vma->vm_end - vma->vm_start) > SZ_4M)
vma->vm_end = vma->vm_start + SZ_4M;
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
- proc->pid, vma->vm_start, vma->vm_end,
+ "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ __func__, proc->pid, vma->vm_start, vma->vm_end,
(vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
(unsigned long)pgprot_val(vma->vm_page_prot));
@@ -3380,73 +4569,15 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
- mutex_lock(&binder_mmap_lock);
- if (proc->buffer) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
-
- area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
- if (area == NULL) {
- ret = -ENOMEM;
- failure_string = "get_vm_area";
- goto err_get_vm_area_failed;
- }
- proc->buffer = area->addr;
- proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
- mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
- if (cache_is_vipt_aliasing()) {
- while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
- pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
- vma->vm_start += PAGE_SIZE;
- }
- }
-#endif
- proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
- if (proc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- proc->buffer_size = vma->vm_end - vma->vm_start;
-
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
- if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
- ret = -ENOMEM;
- failure_string = "alloc small buf";
- goto err_alloc_small_buf_failed;
- }
- buffer = proc->buffer;
- INIT_LIST_HEAD(&proc->buffers);
- list_add(&buffer->entry, &proc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(proc, buffer);
- proc->free_async_space = proc->buffer_size / 2;
- barrier();
+ ret = binder_alloc_mmap_handler(&proc->alloc, vma);
+ if (ret)
+ return ret;
proc->files = get_files_struct(current);
- proc->vma = vma;
- proc->vma_vm_mm = vma->vm_mm;
-
- /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
- proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
return 0;
-err_alloc_small_buf_failed:
- kfree(proc->pages);
- proc->pages = NULL;
-err_alloc_pages_failed:
- mutex_lock(&binder_mmap_lock);
- vfree(proc->buffer);
- proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
- mutex_unlock(&binder_mmap_lock);
err_bad_arg:
pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
@@ -3464,24 +4595,26 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
if (proc == NULL)
return -ENOMEM;
+ spin_lock_init(&proc->inner_lock);
+ spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
INIT_LIST_HEAD(&proc->todo);
- init_waitqueue_head(&proc->wait);
proc->default_priority = task_nice(current);
binder_dev = container_of(filp->private_data, struct binder_device,
miscdev);
proc->context = &binder_dev->context;
-
- binder_lock(__func__);
+ binder_alloc_init(&proc->alloc);
binder_stats_created(BINDER_STAT_PROC);
- hlist_add_head(&proc->proc_node, &binder_procs);
proc->pid = current->group_leader->pid;
INIT_LIST_HEAD(&proc->delivered_death);
+ INIT_LIST_HEAD(&proc->waiting_threads);
filp->private_data = proc;
- binder_unlock(__func__);
+ mutex_lock(&binder_procs_lock);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ mutex_unlock(&binder_procs_lock);
if (binder_debugfs_dir_entry_proc) {
char strbuf[11];
@@ -3517,16 +4650,17 @@ static void binder_deferred_flush(struct binder_proc *proc)
struct rb_node *n;
int wake_count = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
- thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->looper_need_return = true;
if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
wake_up_interruptible(&thread->wait);
wake_count++;
}
}
- wake_up_interruptible_all(&proc->wait);
+ binder_inner_proc_unlock(proc);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
"binder_flush: %d woke %d threads\n", proc->pid,
@@ -3547,13 +4681,21 @@ static int binder_node_release(struct binder_node *node, int refs)
{
struct binder_ref *ref;
int death = 0;
+ struct binder_proc *proc = node->proc;
- list_del_init(&node->work.entry);
- binder_release_work(&node->async_todo);
+ binder_release_work(proc, &node->async_todo);
- if (hlist_empty(&node->refs)) {
- kfree(node);
- binder_stats_deleted(BINDER_STAT_NODE);
+ binder_node_lock(node);
+ binder_inner_proc_lock(proc);
+ binder_dequeue_work_ilocked(&node->work);
+ /*
+ * The caller must have taken a temporary ref on the node,
+ */
+ BUG_ON(!node->tmp_refs);
+ if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
+ binder_inner_proc_unlock(proc);
+ binder_node_unlock(node);
+ binder_free_node(node);
return refs;
}
@@ -3561,45 +4703,58 @@ static int binder_node_release(struct binder_node *node, int refs)
node->proc = NULL;
node->local_strong_refs = 0;
node->local_weak_refs = 0;
+ binder_inner_proc_unlock(proc);
+
+ spin_lock(&binder_dead_nodes_lock);
hlist_add_head(&node->dead_node, &binder_dead_nodes);
+ spin_unlock(&binder_dead_nodes_lock);
hlist_for_each_entry(ref, &node->refs, node_entry) {
refs++;
-
- if (!ref->death)
+ /*
+ * Need the node lock to synchronize
+ * with new notification requests and the
+ * inner lock to synchronize with queued
+ * death notifications.
+ */
+ binder_inner_proc_lock(ref->proc);
+ if (!ref->death) {
+ binder_inner_proc_unlock(ref->proc);
continue;
+ }
death++;
- if (list_empty(&ref->death->work.entry)) {
- ref->death->work.type = BINDER_WORK_DEAD_BINDER;
- list_add_tail(&ref->death->work.entry,
- &ref->proc->todo);
- wake_up_interruptible(&ref->proc->wait);
- } else
- BUG();
+ BUG_ON(!list_empty(&ref->death->work.entry));
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ binder_enqueue_work_ilocked(&ref->death->work,
+ &ref->proc->todo);
+ binder_wakeup_proc_ilocked(ref->proc);
+ binder_inner_proc_unlock(ref->proc);
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
+ binder_node_unlock(node);
+ binder_put_node(node);
return refs;
}
static void binder_deferred_release(struct binder_proc *proc)
{
- struct binder_transaction *t;
struct binder_context *context = proc->context;
struct rb_node *n;
- int threads, nodes, incoming_refs, outgoing_refs, buffers,
- active_transactions, page_count;
+ int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
- BUG_ON(proc->vma);
BUG_ON(proc->files);
+ mutex_lock(&binder_procs_lock);
hlist_del(&proc->proc_node);
+ mutex_unlock(&binder_procs_lock);
+ mutex_lock(&context->context_mgr_node_lock);
if (context->binder_context_mgr_node &&
context->binder_context_mgr_node->proc == proc) {
binder_debug(BINDER_DEBUG_DEAD_BINDER,
@@ -3607,15 +4762,25 @@ static void binder_deferred_release(struct binder_proc *proc)
__func__, proc->pid);
context->binder_context_mgr_node = NULL;
}
+ mutex_unlock(&context->context_mgr_node_lock);
+ binder_inner_proc_lock(proc);
+ /*
+ * Make sure proc stays alive after we
+ * remove all the threads
+ */
+ proc->tmp_ref++;
+ proc->is_dead = true;
threads = 0;
active_transactions = 0;
while ((n = rb_first(&proc->threads))) {
struct binder_thread *thread;
thread = rb_entry(n, struct binder_thread, rb_node);
+ binder_inner_proc_unlock(proc);
threads++;
- active_transactions += binder_free_thread(proc, thread);
+ active_transactions += binder_thread_release(proc, thread);
+ binder_inner_proc_lock(proc);
}
nodes = 0;
@@ -3625,73 +4790,42 @@ static void binder_deferred_release(struct binder_proc *proc)
node = rb_entry(n, struct binder_node, rb_node);
nodes++;
+ /*
+ * take a temporary ref on the node before
+ * calling binder_node_release() which will either
+ * kfree() the node or call binder_put_node()
+ */
+ binder_inc_node_tmpref_ilocked(node);
rb_erase(&node->rb_node, &proc->nodes);
+ binder_inner_proc_unlock(proc);
incoming_refs = binder_node_release(node, incoming_refs);
+ binder_inner_proc_lock(proc);
}
+ binder_inner_proc_unlock(proc);
outgoing_refs = 0;
+ binder_proc_lock(proc);
while ((n = rb_first(&proc->refs_by_desc))) {
struct binder_ref *ref;
ref = rb_entry(n, struct binder_ref, rb_node_desc);
outgoing_refs++;
- binder_delete_ref(ref);
+ binder_cleanup_ref_olocked(ref);
+ binder_proc_unlock(proc);
+ binder_free_ref(ref);
+ binder_proc_lock(proc);
}
+ binder_proc_unlock(proc);
- binder_release_work(&proc->todo);
- binder_release_work(&proc->delivered_death);
-
- buffers = 0;
- while ((n = rb_first(&proc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
- buffer = rb_entry(n, struct binder_buffer, rb_node);
-
- t = buffer->transaction;
- if (t) {
- t->buffer = NULL;
- buffer->transaction = NULL;
- pr_err("release proc %d, transaction %d, not freed\n",
- proc->pid, t->debug_id);
- /*BUG();*/
- }
-
- binder_free_buf(proc, buffer);
- buffers++;
- }
-
- binder_stats_deleted(BINDER_STAT_PROC);
-
- page_count = 0;
- if (proc->pages) {
- int i;
-
- for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
- void *page_addr;
-
- if (!proc->pages[i])
- continue;
-
- page_addr = proc->buffer + i * PAGE_SIZE;
- binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %p not freed\n",
- __func__, proc->pid, i, page_addr);
- unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
- __free_page(proc->pages[i]);
- page_count++;
- }
- kfree(proc->pages);
- vfree(proc->buffer);
- }
-
- put_task_struct(proc->tsk);
+ binder_release_work(proc, &proc->todo);
+ binder_release_work(proc, &proc->delivered_death);
binder_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+ "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
__func__, proc->pid, threads, nodes, incoming_refs,
- outgoing_refs, active_transactions, buffers, page_count);
+ outgoing_refs, active_transactions);
- kfree(proc);
+ binder_proc_dec_tmpref(proc);
}
static void binder_deferred_func(struct work_struct *work)
@@ -3702,7 +4836,6 @@ static void binder_deferred_func(struct work_struct *work)
int defer;
do {
- binder_lock(__func__);
mutex_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
@@ -3729,7 +4862,6 @@ static void binder_deferred_func(struct work_struct *work)
if (defer & BINDER_DEFERRED_RELEASE)
binder_deferred_release(proc); /* frees proc */
- binder_unlock(__func__);
if (files)
put_files_struct(files);
} while (proc);
@@ -3749,41 +4881,51 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
mutex_unlock(&binder_deferred_lock);
}
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
- struct binder_transaction *t)
+static void print_binder_transaction_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ struct binder_transaction *t)
{
+ struct binder_proc *to_proc;
+ struct binder_buffer *buffer = t->buffer;
+
+ spin_lock(&t->lock);
+ to_proc = t->to_proc;
seq_printf(m,
"%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
- t->to_proc ? t->to_proc->pid : 0,
+ to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
t->code, t->flags, t->priority, t->need_reply);
- if (t->buffer == NULL) {
+ spin_unlock(&t->lock);
+
+ if (proc != to_proc) {
+ /*
+ * Can only safely deref buffer if we are holding the
+ * correct proc inner lock for this node
+ */
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (buffer == NULL) {
seq_puts(m, " buffer free\n");
return;
}
- if (t->buffer->target_node)
- seq_printf(m, " node %d",
- t->buffer->target_node->debug_id);
+ if (buffer->target_node)
+ seq_printf(m, " node %d", buffer->target_node->debug_id);
seq_printf(m, " size %zd:%zd data %p\n",
- t->buffer->data_size, t->buffer->offsets_size,
- t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
-{
- seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->data,
buffer->data_size, buffer->offsets_size,
- buffer->transaction ? "active" : "delivered");
+ buffer->data);
}
-static void print_binder_work(struct seq_file *m, const char *prefix,
- const char *transaction_prefix,
- struct binder_work *w)
+static void print_binder_work_ilocked(struct seq_file *m,
+ struct binder_proc *proc,
+ const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
{
struct binder_node *node;
struct binder_transaction *t;
@@ -3791,8 +4933,16 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
switch (w->type) {
case BINDER_WORK_TRANSACTION:
t = container_of(w, struct binder_transaction, work);
- print_binder_transaction(m, transaction_prefix, t);
+ print_binder_transaction_ilocked(
+ m, proc, transaction_prefix, t);
break;
+ case BINDER_WORK_RETURN_ERROR: {
+ struct binder_error *e = container_of(
+ w, struct binder_error, work);
+
+ seq_printf(m, "%stransaction error: %u\n",
+ prefix, e->cmd);
+ } break;
case BINDER_WORK_TRANSACTION_COMPLETE:
seq_printf(m, "%stransaction complete\n", prefix);
break;
@@ -3817,40 +4967,46 @@ static void print_binder_work(struct seq_file *m, const char *prefix,
}
}
-static void print_binder_thread(struct seq_file *m,
- struct binder_thread *thread,
- int print_always)
+static void print_binder_thread_ilocked(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
{
struct binder_transaction *t;
struct binder_work *w;
size_t start_pos = m->count;
size_t header_pos;
- seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
+ thread->pid, thread->looper,
+ thread->looper_need_return,
+ atomic_read(&thread->tmp_ref));
header_pos = m->count;
t = thread->transaction_stack;
while (t) {
if (t->from == thread) {
- print_binder_transaction(m,
- " outgoing transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " outgoing transaction", t);
t = t->from_parent;
} else if (t->to_thread == thread) {
- print_binder_transaction(m,
+ print_binder_transaction_ilocked(m, thread->proc,
" incoming transaction", t);
t = t->to_parent;
} else {
- print_binder_transaction(m, " bad transaction", t);
+ print_binder_transaction_ilocked(m, thread->proc,
+ " bad transaction", t);
t = NULL;
}
}
list_for_each_entry(w, &thread->todo, entry) {
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, thread->proc, " ",
+ " pending transaction", w);
}
if (!print_always && m->count == header_pos)
m->count = start_pos;
}
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
+static void print_binder_node_nilocked(struct seq_file *m,
+ struct binder_node *node)
{
struct binder_ref *ref;
struct binder_work *w;
@@ -3860,27 +5016,34 @@ static void print_binder_node(struct seq_file *m, struct binder_node *node)
hlist_for_each_entry(ref, &node->refs, node_entry)
count++;
- seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+ seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
node->debug_id, (u64)node->ptr, (u64)node->cookie,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
- node->internal_strong_refs, count);
+ node->internal_strong_refs, count, node->tmp_refs);
if (count) {
seq_puts(m, " proc");
hlist_for_each_entry(ref, &node->refs, node_entry)
seq_printf(m, " %d", ref->proc->pid);
}
seq_puts(m, "\n");
- list_for_each_entry(w, &node->async_todo, entry)
- print_binder_work(m, " ",
- " pending async transaction", w);
+ if (node->proc) {
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work_ilocked(m, node->proc, " ",
+ " pending async transaction", w);
+ }
}
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+static void print_binder_ref_olocked(struct seq_file *m,
+ struct binder_ref *ref)
{
- seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
- ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
- ref->node->debug_id, ref->strong, ref->weak, ref->death);
+ binder_node_lock(ref->node);
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
+ ref->data.debug_id, ref->data.desc,
+ ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->data.strong,
+ ref->data.weak, ref->death);
+ binder_node_unlock(ref->node);
}
static void print_binder_proc(struct seq_file *m,
@@ -3890,36 +5053,60 @@ static void print_binder_proc(struct seq_file *m,
struct rb_node *n;
size_t start_pos = m->count;
size_t header_pos;
+ struct binder_node *last_node = NULL;
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
header_pos = m->count;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
- print_binder_thread(m, rb_entry(n, struct binder_thread,
+ print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
rb_node), print_all);
+
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
struct binder_node *node = rb_entry(n, struct binder_node,
rb_node);
- if (print_all || node->has_async_transaction)
- print_binder_node(m, node);
- }
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the tree
+ * while we print it.
+ */
+ binder_inc_node_tmpref_ilocked(node);
+ /* Need to drop inner lock to take node lock */
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_inner_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_inner_unlock(node);
+ last_node = node;
+ binder_inner_proc_lock(proc);
+ }
+ binder_inner_proc_unlock(proc);
+ if (last_node)
+ binder_put_node(last_node);
+
if (print_all) {
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc);
n != NULL;
n = rb_next(n))
- print_binder_ref(m, rb_entry(n, struct binder_ref,
- rb_node_desc));
+ print_binder_ref_olocked(m, rb_entry(n,
+ struct binder_ref,
+ rb_node_desc));
+ binder_proc_unlock(proc);
}
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
+ binder_alloc_print_allocated(m, &proc->alloc);
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry)
- print_binder_work(m, " ", " pending transaction", w);
+ print_binder_work_ilocked(m, proc, " ",
+ " pending transaction", w);
list_for_each_entry(w, &proc->delivered_death, entry) {
seq_puts(m, " has delivered dead binder\n");
break;
}
+ binder_inner_proc_unlock(proc);
if (!print_all && m->count == header_pos)
m->count = start_pos;
}
@@ -3985,17 +5172,21 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
ARRAY_SIZE(binder_command_strings));
for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
- if (stats->bc[i])
+ int temp = atomic_read(&stats->bc[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_command_strings[i], stats->bc[i]);
+ binder_command_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
ARRAY_SIZE(binder_return_strings));
for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
- if (stats->br[i])
+ int temp = atomic_read(&stats->br[i]);
+
+ if (temp)
seq_printf(m, "%s%s: %d\n", prefix,
- binder_return_strings[i], stats->br[i]);
+ binder_return_strings[i], temp);
}
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
@@ -4003,11 +5194,15 @@ static void print_binder_stats(struct seq_file *m, const char *prefix,
BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
ARRAY_SIZE(stats->obj_deleted));
for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
- if (stats->obj_created[i] || stats->obj_deleted[i])
- seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ int created = atomic_read(&stats->obj_created[i]);
+ int deleted = atomic_read(&stats->obj_deleted[i]);
+
+ if (created || deleted)
+ seq_printf(m, "%s%s: active %d total %d\n",
+ prefix,
binder_objstat_strings[i],
- stats->obj_created[i] - stats->obj_deleted[i],
- stats->obj_created[i]);
+ created - deleted,
+ created);
}
}
@@ -4015,51 +5210,61 @@ static void print_binder_proc_stats(struct seq_file *m,
struct binder_proc *proc)
{
struct binder_work *w;
+ struct binder_thread *thread;
struct rb_node *n;
- int count, strong, weak;
+ int count, strong, weak, ready_threads;
+ size_t free_async_space =
+ binder_alloc_get_free_async_space(&proc->alloc);
seq_printf(m, "proc %d\n", proc->pid);
seq_printf(m, "context %s\n", proc->context->name);
count = 0;
+ ready_threads = 0;
+ binder_inner_proc_lock(proc);
for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
count++;
+
+ list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
+ ready_threads++;
+
seq_printf(m, " threads: %d\n", count);
seq_printf(m, " requested threads: %d+%d/%d\n"
" ready threads %d\n"
" free async space %zd\n", proc->requested_threads,
proc->requested_threads_started, proc->max_threads,
- proc->ready_threads, proc->free_async_space);
+ ready_threads,
+ free_async_space);
count = 0;
for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
count++;
+ binder_inner_proc_unlock(proc);
seq_printf(m, " nodes: %d\n", count);
count = 0;
strong = 0;
weak = 0;
+ binder_proc_lock(proc);
for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
struct binder_ref *ref = rb_entry(n, struct binder_ref,
rb_node_desc);
count++;
- strong += ref->strong;
- weak += ref->weak;
+ strong += ref->data.strong;
+ weak += ref->data.weak;
}
+ binder_proc_unlock(proc);
seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
- count = 0;
- for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
+ count = binder_alloc_get_allocated_count(&proc->alloc);
seq_printf(m, " buffers: %d\n", count);
+ binder_alloc_print_pages(m, &proc->alloc);
+
count = 0;
+ binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->todo, entry) {
- switch (w->type) {
- case BINDER_WORK_TRANSACTION:
+ if (w->type == BINDER_WORK_TRANSACTION)
count++;
- break;
- default:
- break;
- }
}
+ binder_inner_proc_unlock(proc);
seq_printf(m, " pending transactions: %d\n", count);
print_binder_stats(m, " ", &proc->stats);
@@ -4070,57 +5275,67 @@ static int binder_state_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
struct binder_node *node;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ struct binder_node *last_node = NULL;
seq_puts(m, "binder state:\n");
+ spin_lock(&binder_dead_nodes_lock);
if (!hlist_empty(&binder_dead_nodes))
seq_puts(m, "dead nodes:\n");
- hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
- print_binder_node(m, node);
-
+ hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
+ /*
+ * take a temporary reference on the node so it
+ * survives and isn't removed from the list
+ * while we print it.
+ */
+ node->tmp_refs++;
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+ binder_node_lock(node);
+ print_binder_node_nilocked(m, node);
+ binder_node_unlock(node);
+ last_node = node;
+ spin_lock(&binder_dead_nodes_lock);
+ }
+ spin_unlock(&binder_dead_nodes_lock);
+ if (last_node)
+ binder_put_node(last_node);
+
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 1);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_stats_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder stats:\n");
print_binder_stats(m, "", &binder_stats);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc_stats(m, proc);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static int binder_transactions_show(struct seq_file *m, void *unused)
{
struct binder_proc *proc;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
seq_puts(m, "binder transactions:\n");
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(proc, &binder_procs, proc_node)
print_binder_proc(m, proc, 0);
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
@@ -4128,44 +5343,63 @@ static int binder_proc_show(struct seq_file *m, void *unused)
{
struct binder_proc *itr;
int pid = (unsigned long)m->private;
- int do_lock = !binder_debug_no_lock;
-
- if (do_lock)
- binder_lock(__func__);
+ mutex_lock(&binder_procs_lock);
hlist_for_each_entry(itr, &binder_procs, proc_node) {
if (itr->pid == pid) {
seq_puts(m, "binder proc state:\n");
print_binder_proc(m, itr, 1);
}
}
- if (do_lock)
- binder_unlock(__func__);
+ mutex_unlock(&binder_procs_lock);
+
return 0;
}
static void print_binder_transaction_log_entry(struct seq_file *m,
struct binder_transaction_log_entry *e)
{
+ int debug_id = READ_ONCE(e->debug_id_done);
+ /*
+ * read barrier to guarantee debug_id_done read before
+ * we print the log values
+ */
+ smp_rmb();
seq_printf(m,
- "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d\n",
+ "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
e->debug_id, (e->call_type == 2) ? "reply" :
((e->call_type == 1) ? "async" : "call "), e->from_proc,
e->from_thread, e->to_proc, e->to_thread, e->context_name,
- e->to_node, e->target_handle, e->data_size, e->offsets_size);
+ e->to_node, e->target_handle, e->data_size, e->offsets_size,
+ e->return_error, e->return_error_param,
+ e->return_error_line);
+ /*
+ * read-barrier to guarantee read of debug_id_done after
+ * done printing the fields of the entry
+ */
+ smp_rmb();
+ seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
+ "\n" : " (incomplete)\n");
}
static int binder_transaction_log_show(struct seq_file *m, void *unused)
{
struct binder_transaction_log *log = m->private;
+ unsigned int log_cur = atomic_read(&log->cur);
+ unsigned int count;
+ unsigned int cur;
int i;
- if (log->full) {
- for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
+ count = log_cur + 1;
+ cur = count < ARRAY_SIZE(log->entry) && !log->full ?
+ 0 : count % ARRAY_SIZE(log->entry);
+ if (count > ARRAY_SIZE(log->entry) || log->full)
+ count = ARRAY_SIZE(log->entry);
+ for (i = 0; i < count; i++) {
+ unsigned int index = cur++ % ARRAY_SIZE(log->entry);
+
+ print_binder_transaction_log_entry(m, &log->entry[index]);
}
- for (i = 0; i < log->next; i++)
- print_binder_transaction_log_entry(m, &log->entry[i]);
return 0;
}
@@ -4200,6 +5434,7 @@ static int __init init_binder_device(const char *name)
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
+ mutex_init(&binder_device->context.context_mgr_node_lock);
ret = misc_register(&binder_device->miscdev);
if (ret < 0) {
@@ -4215,10 +5450,15 @@ static int __init init_binder_device(const char *name)
static int __init binder_init(void)
{
int ret;
- char *device_name, *device_names;
+ char *device_name, *device_names, *device_tmp;
struct binder_device *device;
struct hlist_node *tmp;
+ binder_alloc_shrinker_init();
+
+ atomic_set(&binder_transaction_log.cur, ~0U);
+ atomic_set(&binder_transaction_log_failed.cur, ~0U);
+
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
if (binder_debugfs_dir_entry_root)
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
@@ -4263,7 +5503,8 @@ static int __init binder_init(void)
}
strcpy(device_names, binder_devices_param);
- while ((device_name = strsep(&device_names, ","))) {
+ device_tmp = device_names;
+ while ((device_name = strsep(&device_tmp, ","))) {
ret = init_binder_device(device_name);
if (ret)
goto err_init_binder_device_failed;
@@ -4277,6 +5518,9 @@ err_init_binder_device_failed:
hlist_del(&device->hlist);
kfree(device);
}
+
+ kfree(device_names);
+
err_alloc_device_names_failed:
debugfs_remove_recursive(binder_debugfs_dir_entry_root);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
new file mode 100644
index 000000000000..8fe165844e47
--- /dev/null
+++ b/drivers/android/binder_alloc.c
@@ -0,0 +1,1009 @@
+/* binder_alloc.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/list.h>
+#include <linux/sched/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/rbtree.h>
+#include <linux/seq_file.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/list_lru.h>
+#include "binder_alloc.h"
+#include "binder_trace.h"
+
+struct list_lru binder_alloc_lru;
+
+static DEFINE_MUTEX(binder_alloc_mmap_lock);
+
+enum {
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
+};
+static uint32_t binder_alloc_debug_mask;
+
+module_param_named(debug_mask, binder_alloc_debug_mask,
+ uint, 0644);
+
+#define binder_alloc_debug(mask, x...) \
+ do { \
+ if (binder_alloc_debug_mask & mask) \
+ pr_info(x); \
+ } while (0)
+
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
+static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &alloc->buffers))
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: add free buffer, size %zd, at %pK\n",
+ alloc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer_locked(
+ struct binder_alloc *alloc, struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &alloc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer->data < buffer->data)
+ p = &parent->rb_left;
+ else if (new_buffer->data > buffer->data)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_alloc_prepare_to_free_locked(
+ struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct rb_node *n = alloc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ void *kern_ptr;
+
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer->data)
+ n = n->rb_left;
+ else if (kern_ptr > buffer->data)
+ n = n->rb_right;
+ else {
+ /*
+ * Guard against user threads attempting to
+ * free the buffer twice
+ */
+ if (buffer->free_in_progress) {
+ pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
+ alloc->pid, current->pid, (u64)user_ptr);
+ return NULL;
+ }
+ buffer->free_in_progress = 1;
+ return buffer;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * @alloc: binder_alloc for this proc
+ * @user_ptr: User pointer to buffer data
+ *
+ * Validate userspace pointer to buffer data and return buffer corresponding to
+ * that user pointer. Search the rb tree for buffer that matches user data
+ * pointer.
+ *
+ * Return: Pointer to buffer or NULL
+ */
+struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct binder_lru_page *page;
+ struct mm_struct *mm = NULL;
+ bool need_mm = false;
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: %s pages %pK-%pK\n", alloc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ trace_binder_update_page_range(alloc, allocate, start, end);
+
+ if (allocate == 0)
+ goto free_range;
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
+ if (!page->page_ptr) {
+ need_mm = true;
+ break;
+ }
+ }
+
+ if (!vma && need_mm)
+ mm = get_task_mm(alloc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = alloc->vma;
+ if (vma && mm != alloc->vma_vm_mm) {
+ pr_err("%d: vma mm and task mm mismatch\n",
+ alloc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (!vma && need_mm) {
+ pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+ alloc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ bool on_lru;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ if (page->page_ptr) {
+ trace_binder_alloc_lru_start(alloc, index);
+
+ on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
+ WARN_ON(!on_lru);
+
+ trace_binder_alloc_lru_end(alloc, index);
+ continue;
+ }
+
+ if (WARN_ON(!vma))
+ goto err_page_ptr_cleared;
+
+ trace_binder_alloc_page_start(alloc, index);
+ page->page_ptr = alloc_page(GFP_KERNEL |
+ __GFP_HIGHMEM |
+ __GFP_ZERO);
+ if (!page->page_ptr) {
+ pr_err("%d: binder_alloc_buf failed for page at %pK\n",
+ alloc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ page->alloc = alloc;
+ INIT_LIST_HEAD(&page->lru);
+
+ ret = map_kernel_range_noflush((unsigned long)page_addr,
+ PAGE_SIZE, PAGE_KERNEL,
+ &page->page_ptr);
+ flush_cache_vmap((unsigned long)page_addr,
+ (unsigned long)page_addr + PAGE_SIZE);
+ if (ret != 1) {
+ pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
+ alloc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + alloc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
+ if (ret) {
+ pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+ alloc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+
+ trace_binder_alloc_page_end(alloc, index);
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ bool ret;
+ size_t index;
+
+ index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ page = &alloc->pages[index];
+
+ trace_binder_free_lru_start(alloc, index);
+
+ ret = list_lru_add(&binder_alloc_lru, &page->lru);
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
+ continue;
+
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+err_alloc_page_failed:
+err_page_ptr_cleared:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return vma ? -ENOMEM : -ESRCH;
+}
+
+struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct rb_node *n = alloc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size, data_offsets_size;
+ int ret;
+
+ if (alloc->vma == NULL) {
+ pr_err("%d: binder_alloc_buf, no vma\n",
+ alloc->pid);
+ return ERR_PTR(-ESRCH);
+ }
+
+ data_offsets_size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid size %zd-%zd\n",
+ alloc->pid, data_size, offsets_size);
+ return ERR_PTR(-EINVAL);
+ }
+ size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
+ if (size < data_offsets_size || size < extra_buffers_size) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: got transaction with invalid extra_buffers_size %zd\n",
+ alloc->pid, extra_buffers_size);
+ return ERR_PTR(-EINVAL);
+ }
+ if (is_async &&
+ alloc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd failed, no async space left\n",
+ alloc->pid, size);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ size_t allocated_buffers = 0;
+ size_t largest_alloc_size = 0;
+ size_t total_alloc_size = 0;
+ size_t free_buffers = 0;
+ size_t largest_free_size = 0;
+ size_t total_free_size = 0;
+
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ allocated_buffers++;
+ total_alloc_size += buffer_size;
+ if (buffer_size > largest_alloc_size)
+ largest_alloc_size = buffer_size;
+ }
+ for (n = rb_first(&alloc->free_buffers); n != NULL;
+ n = rb_next(n)) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ free_buffers++;
+ total_free_size += buffer_size;
+ if (buffer_size > largest_free_size)
+ largest_free_size = buffer_size;
+ }
+ pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+ alloc->pid, size);
+ pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
+ total_alloc_size, allocated_buffers, largest_alloc_size,
+ total_free_size, free_buffers, largest_free_size);
+ return ERR_PTR(-ENOSPC);
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+ }
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
+ alloc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ WARN_ON(n && buffer_size != size);
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ ret = binder_update_page_range(alloc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer;
+
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
+
+ rb_erase(best_fit, &alloc->free_buffers);
+ buffer->free = 0;
+ buffer->free_in_progress = 0;
+ binder_insert_allocated_buffer_locked(alloc, buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_alloc_buf size %zd got %pK\n",
+ alloc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ buffer->extra_buffers_size = extra_buffers_size;
+ if (is_async) {
+ alloc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_alloc_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+ return buffer;
+
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr, NULL);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * binder_alloc_new_buf() - Allocate a new binder buffer
+ * @alloc: binder_alloc for this proc
+ * @data_size: size of user data buffer
+ * @offsets_size: user specified buffer offset
+ * @extra_buffers_size: size of extra space for meta-data (eg, security context)
+ * @is_async: buffer for async transaction
+ *
+ * Allocate a new buffer given the requested sizes. Returns
+ * the kernel version of the buffer pointer. The size allocated
+ * is the sum of the three given sizes (each rounded up to
+ * pointer-sized boundary)
+ *
+ * Return: The allocated buffer or %NULL if error
+ */
+struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async)
+{
+ struct binder_buffer *buffer;
+
+ mutex_lock(&alloc->mutex);
+ buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
+ extra_buffers_size, is_async);
+ mutex_unlock(&alloc->mutex);
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
+}
+
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ bool to_free = true;
+ BUG_ON(alloc->buffers.next == &buffer->entry);
+ prev = binder_buffer_prev(buffer);
+ BUG_ON(!prev->free);
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
+ }
+
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
+ }
+ }
+
+ if (PAGE_ALIGNED(buffer->data)) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
+ }
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE,
+ NULL);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
+}
+
+static void binder_free_buf_locked(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_alloc_buffer_size(alloc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *)) +
+ ALIGN(buffer->extra_buffers_size, sizeof(void *));
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
+ alloc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
+
+ if (buffer->async_transaction) {
+ alloc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "%d: binder_free_buf size %zd async free %zd\n",
+ alloc->pid, size, alloc->free_async_space);
+ }
+
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+
+ rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &alloc->buffers)) {
+ struct binder_buffer *next = binder_buffer_next(buffer);
+
+ if (next->free) {
+ rb_erase(&next->rb_node, &alloc->free_buffers);
+ binder_delete_free_buffer(alloc, next);
+ }
+ }
+ if (alloc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
+
+ if (prev->free) {
+ binder_delete_free_buffer(alloc, buffer);
+ rb_erase(&prev->rb_node, &alloc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(alloc, buffer);
+}
+
+/**
+ * binder_alloc_free_buf() - free a binder buffer
+ * @alloc: binder_alloc for this proc
+ * @buffer: kernel pointer to buffer
+ *
+ * Free the buffer allocated via binder_alloc_new_buffer()
+ */
+void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer)
+{
+ mutex_lock(&alloc->mutex);
+ binder_free_buf_locked(alloc, buffer);
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_mmap_handler() - map virtual address space for proc
+ * @alloc: alloc structure for this proc
+ * @vma: vma passed to mmap()
+ *
+ * Called by binder_mmap() to initialize the space specified in
+ * vma for allocating binder buffers
+ *
+ * Return:
+ * 0 = success
+ * -EBUSY = address space already mapped
+ * -ENOMEM = failed to map memory to given address space
+ */
+int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+ if (alloc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ alloc->buffer = area->addr;
+ alloc->user_buffer_offset =
+ vma->vm_start - (uintptr_t)alloc->buffer;
+ mutex_unlock(&binder_alloc_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR(
+ (vma->vm_start ^ (uint32_t)alloc->buffer))) {
+ pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
+ __func__, alloc->pid, vma->vm_start,
+ vma->vm_end, alloc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
+ ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
+ GFP_KERNEL);
+ if (alloc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ alloc->buffer_size = vma->vm_end - vma->vm_start;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
+ }
+
+ buffer->data = alloc->buffer;
+ list_add(&buffer->entry, &alloc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(alloc, buffer);
+ alloc->free_async_space = alloc->buffer_size / 2;
+ barrier();
+ alloc->vma = vma;
+ alloc->vma_vm_mm = vma->vm_mm;
+
+ return 0;
+
+err_alloc_buf_struct_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_alloc_mmap_lock);
+ vfree(alloc->buffer);
+ alloc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
+ alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+
+void binder_alloc_deferred_release(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int buffers, page_count;
+ struct binder_buffer *buffer;
+
+ BUG_ON(alloc->vma);
+
+ buffers = 0;
+ mutex_lock(&alloc->mutex);
+ while ((n = rb_first(&alloc->allocated_buffers))) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+ /* Transaction should already have been freed */
+ BUG_ON(buffer->transaction);
+
+ binder_free_buf_locked(alloc, buffer);
+ buffers++;
+ }
+
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
+ page_count = 0;
+ if (alloc->pages) {
+ int i;
+
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ void *page_addr;
+ bool on_lru;
+
+ if (!alloc->pages[i].page_ptr)
+ continue;
+
+ on_lru = list_lru_del(&binder_alloc_lru,
+ &alloc->pages[i].lru);
+ page_addr = alloc->buffer + i * PAGE_SIZE;
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%s: %d: page %d at %pK %s\n",
+ __func__, alloc->pid, i, page_addr,
+ on_lru ? "on lru" : "active");
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+ __free_page(alloc->pages[i].page_ptr);
+ page_count++;
+ }
+ kfree(alloc->pages);
+ vfree(alloc->buffer);
+ }
+ mutex_unlock(&alloc->mutex);
+
+ binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "%s: %d buffers %d, pages %d\n",
+ __func__, alloc->pid, buffers, page_count);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->extra_buffers_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+/**
+ * binder_alloc_print_allocated() - print buffer info
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ *
+ * Prints information about every buffer associated with
+ * the binder_alloc state to the given seq_file
+ */
+void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ mutex_unlock(&alloc->mutex);
+}
+
+/**
+ * binder_alloc_print_pages() - print page usage
+ * @m: seq_file for output via seq_printf()
+ * @alloc: binder_alloc for this proc
+ */
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc)
+{
+ struct binder_lru_page *page;
+ int i;
+ int active = 0;
+ int lru = 0;
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+ page = &alloc->pages[i];
+ if (!page->page_ptr)
+ free++;
+ else if (list_empty(&page->lru))
+ active++;
+ else
+ lru++;
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+}
+
+/**
+ * binder_alloc_get_allocated_count() - return count of buffers
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: count of allocated buffers
+ */
+int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
+{
+ struct rb_node *n;
+ int count = 0;
+
+ mutex_lock(&alloc->mutex);
+ for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ mutex_unlock(&alloc->mutex);
+ return count;
+}
+
+
+/**
+ * binder_alloc_vma_close() - invalidate address space
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_vma_close() when releasing address space.
+ * Clears alloc->vma to prevent new incoming transactions from
+ * allocating more buffers.
+ */
+void binder_alloc_vma_close(struct binder_alloc *alloc)
+{
+ WRITE_ONCE(alloc->vma, NULL);
+ WRITE_ONCE(alloc->vma_vm_mm, NULL);
+}
+
+/**
+ * binder_alloc_free_page() - shrinker callback to free pages
+ * @item: item to free
+ * @lock: lock protecting the item
+ * @cb_arg: callback argument
+ *
+ * Called from list_lru_walk() in binder_shrink_scan() to free
+ * up pages when the system is under memory pressure.
+ */
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock,
+ void *cb_arg)
+{
+ struct mm_struct *mm = NULL;
+ struct binder_lru_page *page = container_of(item,
+ struct binder_lru_page,
+ lru);
+ struct binder_alloc *alloc;
+ uintptr_t page_addr;
+ size_t index;
+
+ alloc = page->alloc;
+ if (!mutex_trylock(&alloc->mutex))
+ goto err_get_alloc_mutex_failed;
+
+ if (!page->page_ptr)
+ goto err_page_already_freed;
+
+ index = page - alloc->pages;
+ page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+ if (alloc->vma) {
+ mm = get_task_mm(alloc->tsk);
+ if (!mm)
+ goto err_get_task_mm_failed;
+ if (!down_write_trylock(&mm->mmap_sem))
+ goto err_down_write_mmap_sem_failed;
+
+ trace_binder_unmap_user_start(alloc, index);
+
+ zap_page_range(alloc->vma,
+ page_addr + alloc->user_buffer_offset,
+ PAGE_SIZE);
+
+ trace_binder_unmap_user_end(alloc, index);
+
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+
+ trace_binder_unmap_kernel_start(alloc, index);
+
+ unmap_kernel_range(page_addr, PAGE_SIZE);
+ __free_page(page->page_ptr);
+ page->page_ptr = NULL;
+
+ trace_binder_unmap_kernel_end(alloc, index);
+
+ list_lru_isolate(lru, item);
+
+ mutex_unlock(&alloc->mutex);
+ return LRU_REMOVED;
+
+err_down_write_mmap_sem_failed:
+ mmput(mm);
+err_get_task_mm_failed:
+err_page_already_freed:
+ mutex_unlock(&alloc->mutex);
+err_get_alloc_mutex_failed:
+ return LRU_SKIP;
+}
+
+static unsigned long
+binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret = list_lru_count(&binder_alloc_lru);
+ return ret;
+}
+
+static unsigned long
+binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long ret;
+
+ ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, sc->nr_to_scan);
+ return ret;
+}
+
+struct shrinker binder_shrinker = {
+ .count_objects = binder_shrink_count,
+ .scan_objects = binder_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+
+/**
+ * binder_alloc_init() - called by binder_open() for per-proc initialization
+ * @alloc: binder_alloc for this proc
+ *
+ * Called from binder_open() to initialize binder_alloc fields for
+ * new binder proc
+ */
+void binder_alloc_init(struct binder_alloc *alloc)
+{
+ alloc->tsk = current->group_leader;
+ alloc->pid = current->group_leader->pid;
+ mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
+}
+
+void binder_alloc_shrinker_init(void)
+{
+ list_lru_init(&binder_alloc_lru);
+ register_shrinker(&binder_shrinker);
+}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
new file mode 100644
index 000000000000..a3a3602c689c
--- /dev/null
+++ b/drivers/android/binder_alloc.h
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_ALLOC_H
+#define _LINUX_BINDER_ALLOC_H
+
+#include <linux/rbtree.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/list_lru.h>
+
+extern struct list_lru binder_alloc_lru;
+struct binder_transaction;
+
+/**
+ * struct binder_buffer - buffer used for binder transactions
+ * @entry: entry alloc->buffers
+ * @rb_node: node for allocated_buffers/free_buffers rb trees
+ * @free: true if buffer is free
+ * @allow_user_free: describe the second member of struct blah,
+ * @async_transaction: describe the second member of struct blah,
+ * @debug_id: describe the second member of struct blah,
+ * @transaction: describe the second member of struct blah,
+ * @target_node: describe the second member of struct blah,
+ * @data_size: describe the second member of struct blah,
+ * @offsets_size: describe the second member of struct blah,
+ * @extra_buffers_size: describe the second member of struct blah,
+ * @data:i describe the second member of struct blah,
+ *
+ * Bookkeeping structure for binder transaction buffers
+ */
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by address */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned free_in_progress:1;
+ unsigned debug_id:28;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ size_t extra_buffers_size;
+ void *data;
+};
+
+/**
+ * struct binder_lru_page - page object used for binder shrinker
+ * @page_ptr: pointer to physical page in mmap'd space
+ * @lru: entry in binder_alloc_lru
+ * @alloc: binder_alloc for a proc
+ */
+struct binder_lru_page {
+ struct list_head lru;
+ struct page *page_ptr;
+ struct binder_alloc *alloc;
+};
+
+/**
+ * struct binder_alloc - per-binder proc state for binder allocator
+ * @vma: vm_area_struct passed to mmap_handler
+ * (invarient after mmap)
+ * @tsk: tid for task that called init for this proc
+ * (invariant after init)
+ * @vma_vm_mm: copy of vma->vm_mm (invarient after mmap)
+ * @buffer: base of per-proc address space mapped via mmap
+ * @user_buffer_offset: offset between user and kernel VAs for buffer
+ * @buffers: list of all buffers for this proc
+ * @free_buffers: rb tree of buffers available for allocation
+ * sorted by size
+ * @allocated_buffers: rb tree of allocated buffers sorted by address
+ * @free_async_space: VA space available for async buffers. This is
+ * initialized at mmap time to 1/2 the full VA space
+ * @pages: array of binder_lru_page
+ * @buffer_size: size of address space specified via mmap
+ * @pid: pid for associated binder_proc (invariant after init)
+ *
+ * Bookkeeping structure for per-proc address space management for binder
+ * buffers. It is normally initialized during binder_init() and binder_mmap()
+ * calls. The address space is used for both user-visible buffers and for
+ * struct binder_buffer objects used to track the user buffers
+ */
+struct binder_alloc {
+ struct mutex mutex;
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *vma_vm_mm;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+ struct binder_lru_page *pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ int pid;
+};
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
+enum lru_status binder_alloc_free_page(struct list_head *item,
+ struct list_lru_one *lru,
+ spinlock_t *lock, void *cb_arg);
+extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
+ size_t data_size,
+ size_t offsets_size,
+ size_t extra_buffers_size,
+ int is_async);
+extern void binder_alloc_init(struct binder_alloc *alloc);
+void binder_alloc_shrinker_init(void);
+extern void binder_alloc_vma_close(struct binder_alloc *alloc);
+extern struct binder_buffer *
+binder_alloc_prepare_to_free(struct binder_alloc *alloc,
+ uintptr_t user_ptr);
+extern void binder_alloc_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffer);
+extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct vm_area_struct *vma);
+extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
+extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
+extern void binder_alloc_print_allocated(struct seq_file *m,
+ struct binder_alloc *alloc);
+void binder_alloc_print_pages(struct seq_file *m,
+ struct binder_alloc *alloc);
+
+/**
+ * binder_alloc_get_free_async_space() - get free space available for async
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the bytes remaining in the address-space for async transactions
+ */
+static inline size_t
+binder_alloc_get_free_async_space(struct binder_alloc *alloc)
+{
+ size_t free_async_space;
+
+ mutex_lock(&alloc->mutex);
+ free_async_space = alloc->free_async_space;
+ mutex_unlock(&alloc->mutex);
+ return free_async_space;
+}
+
+/**
+ * binder_alloc_get_user_buffer_offset() - get offset between kernel/user addrs
+ * @alloc: binder_alloc for this proc
+ *
+ * Return: the offset between kernel and user-space addresses to use for
+ * virtual address conversion
+ */
+static inline ptrdiff_t
+binder_alloc_get_user_buffer_offset(struct binder_alloc *alloc)
+{
+ /*
+ * user_buffer_offset is constant if vma is set and
+ * undefined if vma is not set. It is possible to
+ * get here with !alloc->vma if the target process
+ * is dying while a transaction is being initiated.
+ * Returning the old value is ok in this case and
+ * the transaction will fail.
+ */
+ return alloc->user_buffer_offset;
+}
+
+#endif /* _LINUX_BINDER_ALLOC_H */
+
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 000000000000..8bd7bcef967d
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,310 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: When the end of the previous buffer
+ * is not page aligned, the end of this buffer is on the
+ * same page as the end of the previous buffer and is page
+ * aligned. When the previous buffer is page aligned, the
+ * end of this buffer is aligned to the next page boundary.
+ * Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index].page_ptr ||
+ !list_empty(&alloc->pages[page_index].lru)) {
+ pr_err("expect alloc but is %s at page index %d\n",
+ alloc->pages[page_index].page_ptr ?
+ "lru" : "free", page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq, size_t end)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < end / PAGE_SIZE; i++) {
+ /**
+ * Error message on a free page can be false positive
+ * if binder shrinker ran during binder_alloc_free_buf
+ * calls above.
+ */
+ if (list_empty(&alloc->pages[i].lru)) {
+ pr_err_size_seq(sizes, seq);
+ pr_err("expect lru but is %s at page index %d\n",
+ alloc->pages[i].page_ptr ? "alloc" : "free", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_page(struct binder_alloc *alloc)
+{
+ int i;
+ unsigned long count;
+
+ while ((count = list_lru_count(&binder_alloc_lru))) {
+ list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
+ NULL, count);
+ }
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if (alloc->pages[i].page_ptr) {
+ pr_err("expect free but is %s at page index %d\n",
+ list_empty(&alloc->pages[i].lru) ?
+ "alloc" : "lru", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, size_t end)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+
+ /* Allocate from lru. */
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ if (list_lru_count(&binder_alloc_lru))
+ pr_err("lru list should be empty but is not\n");
+
+ binder_selftest_free_buf(alloc, buffers, sizes, seq, end);
+ binder_selftest_free_page(alloc);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq,
+ int index, size_t end)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq, end);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1, end);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0,
+ end_offset[BUFFER_NUM - 1]);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * correctly allocated, put onto lru when buffers are freed, and
+ * are freed when binder_alloc_free_page is called.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 7f20f3dc8369..76e3b9c8a8a2 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -23,7 +23,8 @@
struct binder_buffer;
struct binder_node;
struct binder_proc;
-struct binder_ref;
+struct binder_alloc;
+struct binder_ref_data;
struct binder_thread;
struct binder_transaction;
@@ -146,8 +147,8 @@ TRACE_EVENT(binder_transaction_received,
TRACE_EVENT(binder_transaction_node_to_ref,
TP_PROTO(struct binder_transaction *t, struct binder_node *node,
- struct binder_ref *ref),
- TP_ARGS(t, node, ref),
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -160,8 +161,8 @@ TRACE_EVENT(binder_transaction_node_to_ref,
__entry->debug_id = t->debug_id;
__entry->node_debug_id = node->debug_id;
__entry->node_ptr = node->ptr;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
),
TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
__entry->debug_id, __entry->node_debug_id,
@@ -170,8 +171,9 @@ TRACE_EVENT(binder_transaction_node_to_ref,
);
TRACE_EVENT(binder_transaction_ref_to_node,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
- TP_ARGS(t, ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *rdata),
+ TP_ARGS(t, node, rdata),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -182,10 +184,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->ref_debug_id = ref->debug_id;
- __entry->ref_desc = ref->desc;
- __entry->node_debug_id = ref->node->debug_id;
- __entry->node_ptr = ref->node->ptr;
+ __entry->ref_debug_id = rdata->debug_id;
+ __entry->ref_desc = rdata->desc;
+ __entry->node_debug_id = node->debug_id;
+ __entry->node_ptr = node->ptr;
),
TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
__entry->debug_id, __entry->node_debug_id,
@@ -194,9 +196,10 @@ TRACE_EVENT(binder_transaction_ref_to_node,
);
TRACE_EVENT(binder_transaction_ref_to_ref,
- TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
- struct binder_ref *dest_ref),
- TP_ARGS(t, src_ref, dest_ref),
+ TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+ struct binder_ref_data *src_ref,
+ struct binder_ref_data *dest_ref),
+ TP_ARGS(t, node, src_ref, dest_ref),
TP_STRUCT__entry(
__field(int, debug_id)
@@ -208,7 +211,7 @@ TRACE_EVENT(binder_transaction_ref_to_ref,
),
TP_fast_assign(
__entry->debug_id = t->debug_id;
- __entry->node_debug_id = src_ref->node->debug_id;
+ __entry->node_debug_id = node->debug_id;
__entry->src_ref_debug_id = src_ref->debug_id;
__entry->src_ref_desc = src_ref->desc;
__entry->dest_ref_debug_id = dest_ref->debug_id;
@@ -268,9 +271,9 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
TP_ARGS(buffer));
TRACE_EVENT(binder_update_page_range,
- TP_PROTO(struct binder_proc *proc, bool allocate,
+ TP_PROTO(struct binder_alloc *alloc, bool allocate,
void *start, void *end),
- TP_ARGS(proc, allocate, start, end),
+ TP_ARGS(alloc, allocate, start, end),
TP_STRUCT__entry(
__field(int, proc)
__field(bool, allocate)
@@ -278,9 +281,9 @@ TRACE_EVENT(binder_update_page_range,
__field(size_t, size)
),
TP_fast_assign(
- __entry->proc = proc->pid;
+ __entry->proc = alloc->pid;
__entry->allocate = allocate;
- __entry->offset = start - proc->buffer;
+ __entry->offset = start - alloc->buffer;
__entry->size = end - start;
),
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
@@ -288,6 +291,61 @@ TRACE_EVENT(binder_update_page_range,
__entry->offset, __entry->size)
);
+DECLARE_EVENT_CLASS(binder_lru_page_class,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index),
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(size_t, page_index)
+ ),
+ TP_fast_assign(
+ __entry->proc = alloc->pid;
+ __entry->page_index = page_index;
+ ),
+ TP_printk("proc=%d page_index=%zu",
+ __entry->proc, __entry->page_index)
+);
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_free_lru_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_alloc_page_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_user_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_start,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
+DEFINE_EVENT(binder_lru_page_class, binder_unmap_kernel_end,
+ TP_PROTO(const struct binder_alloc *alloc, size_t page_index),
+ TP_ARGS(alloc, page_index));
+
TRACE_EVENT(binder_command,
TP_PROTO(uint32_t cmd),
TP_ARGS(cmd),
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 363fc5330c21..488c93724220 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -153,6 +153,16 @@ config AHCI_CEVA
If unsure, say N.
+config AHCI_MTK
+ tristate "MediaTek AHCI SATA support"
+ depends on ARCH_MEDIATEK
+ select MFD_SYSCON
+ help
+ This option enables support for the MediaTek SoC's
+ onboard AHCI SATA controller.
+
+ If unsure, say N.
+
config AHCI_MVEBU
tristate "Marvell EBU AHCI SATA support"
depends on ARCH_MVEBU
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index a26ef5a93919..ff9cd2e37458 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_AHCI_CEVA) += ahci_ceva.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DA850) += ahci_da850.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_DM816) += ahci_dm816.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_IMX) += ahci_imx.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_MTK) += ahci_mtk.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_MVEBU) += ahci_mvebu.o libahci.o libahci_platform.o
obj-$(CONFIG_AHCI_OCTEON) += ahci_octeon.o
obj-$(CONFIG_AHCI_SUNXI) += ahci_sunxi.o libahci.o libahci_platform.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5a5fd0b404eb..cb9b0e9090e3 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1469,7 +1469,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
return;
dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
- dev_warn(&pdev->dev, "Switch your BIOS from RAID to AHCI mode to use them.\n");
+ dev_warn(&pdev->dev,
+ "Switch your BIOS from RAID to AHCI mode to use them.\n");
+
+ /*
+ * Don't rely on the msi-x capability in the remap case,
+ * share the legacy interrupt across ahci and remapped devices.
+ */
+ hpriv->flags |= AHCI_HFLAG_NO_MSI;
}
static int ahci_get_irq_vector(struct ata_host *host, int port)
diff --git a/drivers/ata/ahci_da850.c b/drivers/ata/ahci_da850.c
index 1a50cd3b4233..9b34dff64536 100644
--- a/drivers/ata/ahci_da850.c
+++ b/drivers/ata/ahci_da850.c
@@ -216,12 +216,16 @@ static int ahci_da850_probe(struct platform_device *pdev)
return rc;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res)
+ if (!res) {
+ rc = -ENODEV;
goto disable_resources;
+ }
pwrdn_reg = devm_ioremap(dev, res->start, resource_size(res));
- if (!pwrdn_reg)
+ if (!pwrdn_reg) {
+ rc = -ENOMEM;
goto disable_resources;
+ }
da850_sata_init(dev, pwrdn_reg, hpriv->mmio, mpy);
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c
new file mode 100644
index 000000000000..80854f71559a
--- /dev/null
+++ b/drivers/ata/ahci_mtk.c
@@ -0,0 +1,196 @@
+/*
+ * MeidaTek AHCI SATA driver
+ *
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Ryder Lee <ryder.lee@mediatek.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/kernel.h>
+#include <linux/libata.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include "ahci.h"
+
+#define DRV_NAME "ahci"
+
+#define SYS_CFG 0x14
+#define SYS_CFG_SATA_MSK GENMASK(31, 30)
+#define SYS_CFG_SATA_EN BIT(31)
+
+struct mtk_ahci_plat {
+ struct regmap *mode;
+ struct reset_control *axi_rst;
+ struct reset_control *sw_rst;
+ struct reset_control *reg_rst;
+};
+
+static const struct ata_port_info ahci_port_info = {
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_platform_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+ AHCI_SHT(DRV_NAME),
+};
+
+static int mtk_ahci_platform_resets(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct mtk_ahci_plat *plat = hpriv->plat_data;
+ int err;
+
+ /* reset AXI bus and PHY part */
+ plat->axi_rst = devm_reset_control_get_optional_exclusive(dev, "axi");
+ if (PTR_ERR(plat->axi_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->axi_rst);
+
+ plat->sw_rst = devm_reset_control_get_optional_exclusive(dev, "sw");
+ if (PTR_ERR(plat->sw_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->sw_rst);
+
+ plat->reg_rst = devm_reset_control_get_optional_exclusive(dev, "reg");
+ if (PTR_ERR(plat->reg_rst) == -EPROBE_DEFER)
+ return PTR_ERR(plat->reg_rst);
+
+ err = reset_control_assert(plat->axi_rst);
+ if (err) {
+ dev_err(dev, "failed to assert AXI bus\n");
+ return err;
+ }
+
+ err = reset_control_assert(plat->sw_rst);
+ if (err) {
+ dev_err(dev, "failed to assert PHY digital part\n");
+ return err;
+ }
+
+ err = reset_control_assert(plat->reg_rst);
+ if (err) {
+ dev_err(dev, "failed to assert PHY register part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->reg_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert PHY register part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->sw_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert PHY digital part\n");
+ return err;
+ }
+
+ err = reset_control_deassert(plat->axi_rst);
+ if (err) {
+ dev_err(dev, "failed to deassert AXI bus\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int mtk_ahci_parse_property(struct ahci_host_priv *hpriv,
+ struct device *dev)
+{
+ struct mtk_ahci_plat *plat = hpriv->plat_data;
+ struct device_node *np = dev->of_node;
+
+ /* enable SATA function if needed */
+ if (of_find_property(np, "mediatek,phy-mode", NULL)) {
+ plat->mode = syscon_regmap_lookup_by_phandle(
+ np, "mediatek,phy-mode");
+ if (IS_ERR(plat->mode)) {
+ dev_err(dev, "missing phy-mode phandle\n");
+ return PTR_ERR(plat->mode);
+ }
+
+ regmap_update_bits(plat->mode, SYS_CFG, SYS_CFG_SATA_MSK,
+ SYS_CFG_SATA_EN);
+ }
+
+ of_property_read_u32(np, "ports-implemented", &hpriv->force_port_map);
+
+ return 0;
+}
+
+static int mtk_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtk_ahci_plat *plat;
+ struct ahci_host_priv *hpriv;
+ int err;
+
+ plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ hpriv = ahci_platform_get_resources(pdev);
+ if (IS_ERR(hpriv))
+ return PTR_ERR(hpriv);
+
+ hpriv->plat_data = plat;
+
+ err = mtk_ahci_parse_property(hpriv, dev);
+ if (err)
+ return err;
+
+ err = mtk_ahci_platform_resets(hpriv, dev);
+ if (err)
+ return err;
+
+ err = ahci_platform_enable_resources(hpriv);
+ if (err)
+ return err;
+
+ err = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
+ &ahci_platform_sht);
+ if (err)
+ goto disable_resources;
+
+ return 0;
+
+disable_resources:
+ ahci_platform_disable_resources(hpriv);
+ return err;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+ ahci_platform_resume);
+
+static const struct of_device_id ahci_of_match[] = {
+ { .compatible = "mediatek,mtk-ahci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ahci_of_match);
+
+static struct platform_driver mtk_ahci_driver = {
+ .probe = mtk_ahci_probe,
+ .remove = ata_platform_remove_one,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ahci_of_match,
+ .pm = &ahci_pm_ops,
+ },
+};
+module_platform_driver(mtk_ahci_driver);
+
+MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 62a04c8fb5c9..99f9a895a459 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -93,6 +93,7 @@ MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
static struct platform_driver ahci_driver = {
.probe = ahci_probe,
.remove = ata_platform_remove_one,
+ .shutdown = ahci_platform_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = ahci_of_match,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index cd2eab6aa92e..a270a1173c8c 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -602,6 +602,40 @@ static void ahci_host_stop(struct ata_host *host)
ahci_platform_disable_resources(hpriv);
}
+/**
+ * ahci_platform_shutdown - Disable interrupts and stop DMA for host ports
+ * @dev: platform device pointer for the host
+ *
+ * This function is called during system shutdown and performs the minimal
+ * deconfiguration required to ensure that an ahci_platform host cannot
+ * corrupt or otherwise interfere with a new kernel being started with kexec.
+ */
+void ahci_platform_shutdown(struct platform_device *pdev)
+{
+ struct ata_host *host = platform_get_drvdata(pdev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ int i;
+
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ /* Disable port interrupts */
+ if (ap->ops->freeze)
+ ap->ops->freeze(ap);
+
+ /* Stop the port DMA engines */
+ if (ap->ops->port_stop)
+ ap->ops->port_stop(ap);
+ }
+
+ /* Disable and clear host interrupts */
+ writel(readl(mmio + HOST_CTL) & ~HOST_IRQ_EN, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+ writel(GENMASK(host->n_ports, 0), mmio + HOST_IRQ_STAT);
+}
+EXPORT_SYMBOL_GPL(ahci_platform_shutdown);
+
#ifdef CONFIG_PM_SLEEP
/**
* ahci_platform_suspend_host - Suspend an ahci-platform host
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index fa7dd4394c02..1945a8ea2099 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2411,6 +2411,9 @@ static void ata_dev_config_trusted(struct ata_device *dev)
u64 trusted_cap;
unsigned int err;
+ if (!ata_id_has_trusted(dev->id))
+ return;
+
if (!ata_identify_page_supported(dev, ATA_LOG_SECURITY)) {
ata_dev_warn(dev,
"Security Log not supported\n");
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3dbd05532c09..e4effef0c83f 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -645,12 +645,11 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
* completions are honored. A scmd is determined to have
* timed out iff its associated qc is active and not failed.
*/
+ spin_lock_irqsave(ap->lock, flags);
if (ap->ops->error_handler) {
struct scsi_cmnd *scmd, *tmp;
int nr_timedout = 0;
- spin_lock_irqsave(ap->lock, flags);
-
/* This must occur under the ap->lock as we don't want
a polled recovery to race the real interrupt handler
@@ -700,12 +699,11 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
if (nr_timedout)
__ata_port_freeze(ap);
- spin_unlock_irqrestore(ap->lock, flags);
/* initialize eh_tries */
ap->eh_tries = ATA_EH_MAX_TRIES;
- } else
- spin_unlock_wait(ap->lock);
+ }
+ spin_unlock_irqrestore(ap->lock, flags);
}
EXPORT_SYMBOL(ata_scsi_cmd_error_handler);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 8a01d09ac4db..23a62e4015d0 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -34,7 +34,7 @@ struct zpodd {
static int eject_tray(struct ata_device *dev)
{
struct ata_taskfile tf;
- const char cdb[] = { GPCMD_START_STOP_UNIT,
+ static const char cdb[] = { GPCMD_START_STOP_UNIT,
0, 0, 0,
0x02, /* LoEj */
0, 0, 0, 0, 0, 0, 0,
@@ -55,7 +55,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc = (void *)(buf + 8);
struct ata_taskfile tf;
- char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959a821c..8706533db57b 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@ static const struct pci_device_id amd[] = {
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a554efbe..dc1255294628 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 1ba03d6df951..d3d851b014a3 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -840,7 +840,6 @@ static int octeon_cf_probe(struct platform_device *pdev)
struct property *reg_prop;
int n_addr, n_size, reg_len;
struct device_node *node;
- const void *prop;
void __iomem *cs0;
void __iomem *cs1 = NULL;
struct ata_host *host;
@@ -850,7 +849,7 @@ static int octeon_cf_probe(struct platform_device *pdev)
void __iomem *base;
struct octeon_cf_port *cf_port;
int rv = -ENOMEM;
-
+ u32 bus_width;
node = pdev->dev.of_node;
if (node == NULL)
@@ -860,11 +859,10 @@ static int octeon_cf_probe(struct platform_device *pdev)
if (!cf_port)
return -ENOMEM;
- cf_port->is_true_ide = (of_find_property(node, "cavium,true-ide", NULL) != NULL);
+ cf_port->is_true_ide = of_property_read_bool(node, "cavium,true-ide");
- prop = of_get_property(node, "cavium,bus-width", NULL);
- if (prop)
- is_16bit = (be32_to_cpup(prop) == 16);
+ if (of_property_read_u32(node, "cavium,bus-width", &bus_width) == 0)
+ is_16bit = (bus_width == 16);
else
is_16bit = false;
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index 8c704523bae7..46950e0267e0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/pinctrl/consumer.h>
#include "sata_gemini.h"
#define DRV_NAME "gemini_sata_bridge"
@@ -43,17 +44,6 @@ struct sata_gemini {
struct clk *sata1_pclk;
};
-/* Global IDE PAD Skew Control Register */
-#define GEMINI_GLOBAL_IDE_SKEW_CTRL 0x18
-#define GEMINI_IDE1_HOST_STROBE_DELAY_SHIFT 28
-#define GEMINI_IDE1_DEVICE_STROBE_DELAY_SHIFT 24
-#define GEMINI_IDE1_OUTPUT_IO_SKEW_SHIFT 20
-#define GEMINI_IDE1_INPUT_IO_SKEW_SHIFT 16
-#define GEMINI_IDE0_HOST_STROBE_DELAY_SHIFT 12
-#define GEMINI_IDE0_DEVICE_STROBE_DELAY_SHIFT 8
-#define GEMINI_IDE0_OUTPUT_IO_SKEW_SHIFT 4
-#define GEMINI_IDE0_INPUT_IO_SKEW_SHIFT 0
-
/* Miscellaneous Control Register */
#define GEMINI_GLOBAL_MISC_CTRL 0x30
/*
@@ -91,8 +81,6 @@ struct sata_gemini {
#define GEMINI_IDE_IOMUX_MODE2 (2 << 24)
#define GEMINI_IDE_IOMUX_MODE3 (3 << 24)
#define GEMINI_IDE_IOMUX_SHIFT (24)
-#define GEMINI_IDE_PADS_ENABLE BIT(4)
-#define GEMINI_PFLASH_PADS_DISABLE BIT(1)
/*
* Registers directly controlling the PATA<->SATA adapters
@@ -274,14 +262,14 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return ret;
}
- sg->sata0_reset = devm_reset_control_get(dev, "sata0");
+ sg->sata0_reset = devm_reset_control_get_exclusive(dev, "sata0");
if (IS_ERR(sg->sata0_reset)) {
dev_err(dev, "no SATA0 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
clk_disable_unprepare(sg->sata0_pclk);
return PTR_ERR(sg->sata0_reset);
}
- sg->sata1_reset = devm_reset_control_get(dev, "sata1");
+ sg->sata1_reset = devm_reset_control_get_exclusive(dev, "sata1");
if (IS_ERR(sg->sata1_reset)) {
dev_err(dev, "no SATA1 reset controller\n");
clk_disable_unprepare(sg->sata1_pclk);
@@ -300,17 +288,39 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
return 0;
}
+static int gemini_setup_ide_pins(struct device *dev)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *ide_state;
+ int ret;
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p))
+ return PTR_ERR(p);
+
+ ide_state = pinctrl_lookup_state(p, "ide");
+ if (IS_ERR(ide_state))
+ return PTR_ERR(ide_state);
+
+ ret = pinctrl_select_state(p, ide_state);
+ if (ret) {
+ dev_err(dev, "could not select IDE state\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int gemini_sata_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct sata_gemini *sg;
- static struct regmap *map;
+ struct regmap *map;
struct resource *res;
enum gemini_muxmode muxmode;
u32 gmode;
u32 gmask;
- u32 val;
int ret;
sg = devm_kzalloc(dev, sizeof(*sg), GFP_KERNEL);
@@ -362,16 +372,6 @@ static int gemini_sata_probe(struct platform_device *pdev)
gmask = GEMINI_IDE_IOMUX_MASK;
gmode = (muxmode << GEMINI_IDE_IOMUX_SHIFT);
- /*
- * If we mux out the IDE, parallel flash must be disabled.
- * SATA0 and SATA1 have dedicated pins and may coexist with
- * parallel flash.
- */
- if (sg->ide_pins)
- gmode |= GEMINI_IDE_PADS_ENABLE | GEMINI_PFLASH_PADS_DISABLE;
- else
- gmask |= GEMINI_IDE_PADS_ENABLE;
-
ret = regmap_update_bits(map, GEMINI_GLOBAL_MISC_CTRL, gmask, gmode);
if (ret) {
dev_err(dev, "unable to set up IDE muxing\n");
@@ -379,14 +379,15 @@ static int gemini_sata_probe(struct platform_device *pdev)
goto out_unprep_clk;
}
- /* FIXME: add more elaborate IDE skew control handling */
+ /*
+ * Route out the IDE pins if desired.
+ * This is done by looking up a special pin control state called
+ * "ide" that will route out the IDE pins.
+ */
if (sg->ide_pins) {
- ret = regmap_read(map, GEMINI_GLOBAL_IDE_SKEW_CTRL, &val);
- if (ret) {
- dev_err(dev, "cannot read IDE skew control register\n");
+ ret = gemini_setup_ide_pins(dev);
+ if (ret)
return ret;
- }
- dev_info(dev, "IDE skew control: %08x\n", val);
}
dev_info(dev, "set up the Gemini IDE/SATA nexus\n");
diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
index 0fd6ac7e57ba..a9d692c6c182 100644
--- a/drivers/ata/sata_svw.c
+++ b/drivers/ata/sata_svw.c
@@ -339,7 +339,7 @@ static int k2_sata_show_info(struct seq_file *m, struct Scsi_Host *shost)
if (!reg)
continue;
if (index == *reg) {
- seq_printf(m, "devspec: %s\n", np->full_name);
+ seq_printf(m, "devspec: %pOF\n", np);
break;
}
}
diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
index 1fd25e872ece..8d98130ecd40 100644
--- a/drivers/atm/adummy.c
+++ b/drivers/atm/adummy.c
@@ -71,7 +71,7 @@ static struct attribute *adummy_attrs[] = {
NULL
};
-static struct attribute_group adummy_group_attrs = {
+static const struct attribute_group adummy_group_attrs = {
.name = NULL, /* We want them in dev's root folder */
.attrs = adummy_attrs
};
@@ -130,7 +130,7 @@ adummy_proc_read(struct atm_dev *dev, loff_t *pos, char *page)
return 0;
}
-static struct atmdev_ops adummy_ops =
+static const struct atmdev_ops adummy_ops =
{
.open = adummy_open,
.close = adummy_close,
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 906705e5f776..acf16c323e38 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2374,7 +2374,7 @@ MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
/********** module entry **********/
-static struct pci_device_id amb_pci_tbl[] = {
+static const struct pci_device_id amb_pci_tbl[] = {
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR), 0 },
{ PCI_VDEVICE(MADGE, PCI_DEVICE_ID_MADGE_AMBASSADOR_BAD), 0 },
{ 0, }
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index 56fa16c85ebf..afebeb1c3e1e 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -342,7 +342,7 @@ static struct atmdev_ops atmtcp_v_dev_ops = {
*/
-static struct atmdev_ops atmtcp_c_dev_ops = {
+static const struct atmdev_ops atmtcp_c_dev_ops = {
.close = atmtcp_c_close,
.send = atmtcp_c_send
};
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index b042ec458544..ce47eb17901d 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -2292,7 +2292,7 @@ err_disable:
}
-static struct pci_device_id eni_pci_tbl[] = {
+static const struct pci_device_id eni_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_FPGA), 0 /* FPGA */ },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_ASIC), 1 /* ASIC */ },
{ 0, }
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index 22dcab952a24..6b6368a56526 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -2030,7 +2030,7 @@ static void firestream_remove_one(struct pci_dev *pdev)
func_exit ();
}
-static struct pci_device_id firestream_pci_tbl[] = {
+static const struct pci_device_id firestream_pci_tbl[] = {
{ PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS50), FS_IS50},
{ PCI_VDEVICE(FUJITSU_ME, PCI_DEVICE_ID_FUJITSU_FS155), FS_IS155},
{ 0, }
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f0433adcd8fc..f8b7e86907cc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2757,7 +2757,7 @@ static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
}
-static struct pci_device_id fore200e_pca_tbl[] = {
+static const struct pci_device_id fore200e_pca_tbl[] = {
{ PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID,
0, 0, (unsigned long) &fore200e_bus[0] },
{ 0, }
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 37ee21c5a5ca..e58538c29377 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -161,7 +161,7 @@ static unsigned int clocktab[] = {
CLK_LOW
};
-static struct atmdev_ops he_ops =
+static const struct atmdev_ops he_ops =
{
.open = he_open,
.close = he_close,
@@ -2851,7 +2851,7 @@ MODULE_PARM_DESC(irq_coalesce, "use interrupt coalescing (default 1)");
module_param(sdh, bool, 0);
MODULE_PARM_DESC(sdh, "use SDH framing (default 0)");
-static struct pci_device_id he_pci_tbl[] = {
+static const struct pci_device_id he_pci_tbl[] = {
{ PCI_VDEVICE(FORE, PCI_DEVICE_ID_FORE_HE), 0 },
{ 0, }
};
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 0f18480b33b5..7e76b35f422c 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -2867,7 +2867,7 @@ MODULE_PARM_DESC(max_tx_size, "maximum size of TX AAL5 frames");
MODULE_PARM_DESC(max_rx_size, "maximum size of RX AAL5 frames");
MODULE_PARM_DESC(pci_lat, "PCI latency in bus cycles");
-static struct pci_device_id hrz_pci_tbl[] = {
+static const struct pci_device_id hrz_pci_tbl[] = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_HORIZON, PCI_ANY_ID, PCI_ANY_ID,
0, 0, 0 },
{ 0, }
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 60bacba03d17..47f3c4ae0594 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -134,7 +134,7 @@ static int idt77252_proc_read(struct atm_dev *dev, loff_t * pos,
static void idt77252_softint(struct work_struct *work);
-static struct atmdev_ops idt77252_ops =
+static const struct atmdev_ops idt77252_ops =
{
.dev_close = idt77252_dev_close,
.open = idt77252_open,
@@ -3725,7 +3725,7 @@ err_out_disable_pdev:
return err;
}
-static struct pci_device_id idt77252_pci_tbl[] =
+static const struct pci_device_id idt77252_pci_tbl[] =
{
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77252), 0 },
{ 0, }
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index a4fa6c82261e..fc72b763fdd7 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -3266,7 +3266,7 @@ static void ia_remove_one(struct pci_dev *pdev)
kfree(iadev);
}
-static struct pci_device_id ia_pci_tbl[] = {
+static const struct pci_device_id ia_pci_tbl[] = {
{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 1a9bc51284b0..2351dad78ff5 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2589,7 +2589,7 @@ static int lanai_init_one(struct pci_dev *pci,
return result;
}
-static struct pci_device_id lanai_pci_tbl[] = {
+static const struct pci_device_id lanai_pci_tbl[] = {
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAI2) },
{ PCI_VDEVICE(EF, PCI_DEVICE_ID_EF_ATM_LANAIHB) },
{ 0, } /* terminal entry */
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index d879f3bca107..a9702836cbae 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -154,7 +154,7 @@ static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
static struct ns_dev *cards[NS_MAX_CARDS];
static unsigned num_cards;
-static struct atmdev_ops atm_ops = {
+static const struct atmdev_ops atm_ops = {
.open = ns_open,
.close = ns_close,
.ioctl = ns_ioctl,
@@ -253,7 +253,7 @@ static void nicstar_remove_one(struct pci_dev *pcidev)
kfree(card);
}
-static struct pci_device_id nicstar_pci_tbl[] = {
+static const struct pci_device_id nicstar_pci_tbl[] = {
{ PCI_VDEVICE(IDT, PCI_DEVICE_ID_IDT_IDT77201), 0 },
{0,} /* terminate list */
};
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c8f2ca6d8b29..0df1a1c80b00 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -611,7 +611,7 @@ static struct attribute *solos_attrs[] = {
NULL
};
-static struct attribute_group solos_attr_group = {
+static const struct attribute_group solos_attr_group = {
.attrs = solos_attrs,
.name = "parameters",
};
@@ -628,7 +628,7 @@ static struct attribute *gpio_attrs[] = {
NULL
};
-static struct attribute_group gpio_attr_group = {
+static const struct attribute_group gpio_attr_group = {
.attrs = gpio_attrs,
.name = "gpio",
};
@@ -1187,7 +1187,7 @@ static int psend(struct atm_vcc *vcc, struct sk_buff *skb)
return 0;
}
-static struct atmdev_ops fpga_ops = {
+static const struct atmdev_ops fpga_ops = {
.open = popen,
.close = pclose,
.ioctl = NULL,
@@ -1476,7 +1476,7 @@ static void fpga_remove(struct pci_dev *dev)
kfree(card);
}
-static struct pci_device_id fpga_pci_tbl[] = {
+static const struct pci_device_id fpga_pci_tbl[] = {
{ 0x10ee, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index 07bdd51b3b9a..1ef67db03c8e 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1642,7 +1642,7 @@ out_free:
MODULE_LICENSE("GPL");
-static struct pci_device_id zatm_pci_tbl[] = {
+static const struct pci_device_id zatm_pci_tbl[] = {
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1221), ZATM_COPPER },
{ PCI_VDEVICE(ZEITNET, PCI_DEVICE_ID_ZEITNET_1225), 0 },
{ 0, }
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 7a8b8fb2f572..df126dcdaf18 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -877,21 +877,21 @@ static void lcd_clear_fast_tilcd(struct charlcd *charlcd)
spin_unlock_irq(&pprt_lock);
}
-static struct charlcd_ops charlcd_serial_ops = {
+static const struct charlcd_ops charlcd_serial_ops = {
.write_cmd = lcd_write_cmd_s,
.write_data = lcd_write_data_s,
.clear_fast = lcd_clear_fast_s,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_parallel_ops = {
+static const struct charlcd_ops charlcd_parallel_ops = {
.write_cmd = lcd_write_cmd_p8,
.write_data = lcd_write_data_p8,
.clear_fast = lcd_clear_fast_p8,
.backlight = lcd_backlight,
};
-static struct charlcd_ops charlcd_tilcd_ops = {
+static const struct charlcd_ops charlcd_tilcd_ops = {
.write_cmd = lcd_write_cmd_tilcd,
.write_data = lcd_write_data_tilcd,
.clear_fast = lcd_clear_fast_tilcd,
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index f046d21de57d..1a5f6a157a57 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -140,13 +140,10 @@ config EXTRA_FIRMWARE
config EXTRA_FIRMWARE_DIR
string "Firmware blobs root directory"
depends on EXTRA_FIRMWARE != ""
- default "firmware"
+ default "/lib/firmware"
help
This option controls the directory in which the kernel build system
looks for the firmware files listed in the EXTRA_FIRMWARE option.
- The default is firmware/ in the kernel source tree, but by changing
- this option you can point it elsewhere, such as /lib/firmware/ or
- some other directory containing the firmware files.
config FW_LOADER_USER_HELPER
bool
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index d1c33a85059e..41be9ff7d70a 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -41,8 +41,7 @@ static ssize_t cpu_capacity_show(struct device *dev,
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n",
- topology_get_cpu_scale(NULL, cpu->dev.id));
+ return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
}
static ssize_t cpu_capacity_store(struct device *dev,
@@ -96,14 +95,21 @@ subsys_initcall(register_cpu_capacity_sysctl);
static u32 capacity_scale;
static u32 *raw_capacity;
-static bool cap_parsing_failed;
+
+static int __init free_raw_capacity(void)
+{
+ kfree(raw_capacity);
+ raw_capacity = NULL;
+
+ return 0;
+}
void topology_normalize_cpu_scale(void)
{
u64 capacity;
int cpu;
- if (!raw_capacity || cap_parsing_failed)
+ if (!raw_capacity)
return;
pr_debug("cpu_capacity: capacity_scale=%u\n", capacity_scale);
@@ -120,16 +126,16 @@ void topology_normalize_cpu_scale(void)
mutex_unlock(&cpu_scale_mutex);
}
-int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
+bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
{
- int ret = 1;
+ static bool cap_parsing_failed;
+ int ret;
u32 cpu_capacity;
if (cap_parsing_failed)
- return !ret;
+ return false;
- ret = of_property_read_u32(cpu_node,
- "capacity-dmips-mhz",
+ ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz",
&cpu_capacity);
if (!ret) {
if (!raw_capacity) {
@@ -139,21 +145,21 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
if (!raw_capacity) {
pr_err("cpu_capacity: failed to allocate memory for raw capacities\n");
cap_parsing_failed = true;
- return 0;
+ return false;
}
}
capacity_scale = max(cpu_capacity, capacity_scale);
raw_capacity[cpu] = cpu_capacity;
- pr_debug("cpu_capacity: %s cpu_capacity=%u (raw)\n",
- cpu_node->full_name, raw_capacity[cpu]);
+ pr_debug("cpu_capacity: %pOF cpu_capacity=%u (raw)\n",
+ cpu_node, raw_capacity[cpu]);
} else {
if (raw_capacity) {
- pr_err("cpu_capacity: missing %s raw capacity\n",
- cpu_node->full_name);
+ pr_err("cpu_capacity: missing %pOF raw capacity\n",
+ cpu_node);
pr_err("cpu_capacity: partial information: fallback to 1024 for all CPUs\n");
}
cap_parsing_failed = true;
- kfree(raw_capacity);
+ free_raw_capacity();
}
return !ret;
@@ -161,7 +167,6 @@ int __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
#ifdef CONFIG_CPU_FREQ
static cpumask_var_t cpus_to_visit;
-static bool cap_parsing_done;
static void parsing_done_workfn(struct work_struct *work);
static DECLARE_WORK(parsing_done_work, parsing_done_workfn);
@@ -173,30 +178,31 @@ init_cpu_capacity_callback(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
int cpu;
- if (cap_parsing_failed || cap_parsing_done)
+ if (!raw_capacity)
return 0;
- switch (val) {
- case CPUFREQ_NOTIFY:
- pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
- cpumask_pr_args(policy->related_cpus),
- cpumask_pr_args(cpus_to_visit));
- cpumask_andnot(cpus_to_visit,
- cpus_to_visit,
- policy->related_cpus);
- for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
- policy->cpuinfo.max_freq / 1000UL;
- capacity_scale = max(raw_capacity[cpu], capacity_scale);
- }
- if (cpumask_empty(cpus_to_visit)) {
- topology_normalize_cpu_scale();
- kfree(raw_capacity);
- pr_debug("cpu_capacity: parsing done\n");
- cap_parsing_done = true;
- schedule_work(&parsing_done_work);
- }
+ if (val != CPUFREQ_NOTIFY)
+ return 0;
+
+ pr_debug("cpu_capacity: init cpu capacity for CPUs [%*pbl] (to_visit=%*pbl)\n",
+ cpumask_pr_args(policy->related_cpus),
+ cpumask_pr_args(cpus_to_visit));
+
+ cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
+
+ for_each_cpu(cpu, policy->related_cpus) {
+ raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+ policy->cpuinfo.max_freq / 1000UL;
+ capacity_scale = max(raw_capacity[cpu], capacity_scale);
+ }
+
+ if (cpumask_empty(cpus_to_visit)) {
+ topology_normalize_cpu_scale();
+ free_raw_capacity();
+ pr_debug("cpu_capacity: parsing done\n");
+ schedule_work(&parsing_done_work);
}
+
return 0;
}
@@ -233,11 +239,5 @@ static void parsing_done_workfn(struct work_struct *work)
}
#else
-static int __init free_raw_capacity(void)
-{
- kfree(raw_capacity);
-
- return 0;
-}
core_initcall(free_raw_capacity);
#endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index e19b1008e5fb..539432a14b5c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -126,11 +126,6 @@ extern int driver_add_groups(struct device_driver *drv,
extern void driver_remove_groups(struct device_driver *drv,
const struct attribute_group **groups);
-extern int device_add_groups(struct device *dev,
- const struct attribute_group **groups);
-extern void device_remove_groups(struct device *dev,
- const struct attribute_group **groups);
-
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index e162c9a789ba..22a64fd3309b 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -698,7 +698,7 @@ int bus_add_driver(struct device_driver *drv)
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 755451f684bc..12ebd055724c 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1023,12 +1023,144 @@ int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
return sysfs_create_groups(&dev->kobj, groups);
}
+EXPORT_SYMBOL_GPL(device_add_groups);
void device_remove_groups(struct device *dev,
const struct attribute_group **groups)
{
sysfs_remove_groups(&dev->kobj, groups);
}
+EXPORT_SYMBOL_GPL(device_remove_groups);
+
+union device_attr_group_devres {
+ const struct attribute_group *group;
+ const struct attribute_group **groups;
+};
+
+static int devm_attr_group_match(struct device *dev, void *res, void *data)
+{
+ return ((union device_attr_group_devres *)res)->group == data;
+}
+
+static void devm_attr_group_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group *group = devres->group;
+
+ dev_dbg(dev, "%s: removing group %p\n", __func__, group);
+ sysfs_remove_group(&dev->kobj, group);
+}
+
+static void devm_attr_groups_remove(struct device *dev, void *res)
+{
+ union device_attr_group_devres *devres = res;
+ const struct attribute_group **groups = devres->groups;
+
+ dev_dbg(dev, "%s: removing groups %p\n", __func__, groups);
+ sysfs_remove_groups(&dev->kobj, groups);
+}
+
+/**
+ * devm_device_add_group - given a device, create a managed attribute group
+ * @dev: The device to create the group for
+ * @grp: The attribute group to create
+ *
+ * This function creates a group for the first time. It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error code on failure.
+ */
+int devm_device_add_group(struct device *dev, const struct attribute_group *grp)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_group_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_group(&dev->kobj, grp);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->group = grp;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_group);
+
+/**
+ * devm_device_remove_group: remove a managed group from a device
+ * @dev: device to remove the group from
+ * @grp: group to remove
+ *
+ * This function removes a group of attributes from a device. The attributes
+ * previously have to have been created for this group, otherwise it will fail.
+ */
+void devm_device_remove_group(struct device *dev,
+ const struct attribute_group *grp)
+{
+ WARN_ON(devres_release(dev, devm_attr_group_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)grp));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_group);
+
+/**
+ * devm_device_add_groups - create a bunch of managed attribute groups
+ * @dev: The device to create the group for
+ * @groups: The attribute groups to create, NULL terminated
+ *
+ * This function creates a bunch of managed attribute groups. If an error
+ * occurs when creating a group, all previously created groups will be
+ * removed, unwinding everything back to the original state when this
+ * function was called. It will explicitly warn and error if any of the
+ * attribute files being created already exist.
+ *
+ * Returns 0 on success or error code from sysfs_create_group on failure.
+ */
+int devm_device_add_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ union device_attr_group_devres *devres;
+ int error;
+
+ devres = devres_alloc(devm_attr_groups_remove,
+ sizeof(*devres), GFP_KERNEL);
+ if (!devres)
+ return -ENOMEM;
+
+ error = sysfs_create_groups(&dev->kobj, groups);
+ if (error) {
+ devres_free(devres);
+ return error;
+ }
+
+ devres->groups = groups;
+ devres_add(dev, devres);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_device_add_groups);
+
+/**
+ * devm_device_remove_groups - remove a list of managed groups
+ *
+ * @dev: The device for the groups to be removed from
+ * @groups: NULL terminated list of groups to be removed
+ *
+ * If groups is not NULL, remove the specified groups from the device.
+ */
+void devm_device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
+{
+ WARN_ON(devres_release(dev, devm_attr_groups_remove,
+ devm_attr_group_match,
+ /* cast away const */ (void *)groups));
+}
+EXPORT_SYMBOL_GPL(devm_device_remove_groups);
static int device_add_attrs(struct device *dev)
{
@@ -2664,11 +2796,12 @@ void device_shutdown(void)
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->class && dev->class->shutdown) {
+ if (dev->class && dev->class->shutdown_pre) {
if (initcall_debug)
- dev_info(dev, "shutdown\n");
- dev->class->shutdown(dev);
- } else if (dev->bus && dev->bus->shutdown) {
+ dev_info(dev, "shutdown_pre\n");
+ dev->class->shutdown_pre(dev);
+ }
+ if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 2c3b359b3536..321cd7b4d817 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -256,9 +256,9 @@ static ssize_t print_cpus_offline(struct device *dev,
buf[n++] = ',';
if (nr_cpu_ids == total_cpus-1)
- n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
+ n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
else
- n += snprintf(&buf[n], len - n, "%d-%d",
+ n += snprintf(&buf[n], len - n, "%u-%d",
nr_cpu_ids, total_cpus-1);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 4882f06d12df..ad44b40fe284 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -20,6 +20,7 @@
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/wait.h>
@@ -53,6 +54,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
+static bool initcalls_done;
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
@@ -62,6 +64,26 @@ static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
static bool defer_all_probes;
/*
+ * For initcall_debug, show the deferred probes executed in late_initcall
+ * processing.
+ */
+static void deferred_probe_debug(struct device *dev)
+{
+ ktime_t calltime, delta, rettime;
+ unsigned long long duration;
+
+ printk(KERN_DEBUG "deferred probe %s @ %i\n", dev_name(dev),
+ task_pid_nr(current));
+ calltime = ktime_get();
+ bus_probe_device(dev);
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ duration = (unsigned long long) ktime_to_ns(delta) >> 10;
+ printk(KERN_DEBUG "deferred probe %s returned after %lld usecs\n",
+ dev_name(dev), duration);
+}
+
+/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
@@ -106,7 +128,10 @@ static void deferred_probe_work_func(struct work_struct *work)
device_pm_unlock();
dev_dbg(dev, "Retrying from deferred list\n");
- bus_probe_device(dev);
+ if (initcall_debug && !initcalls_done)
+ deferred_probe_debug(dev);
+ else
+ bus_probe_device(dev);
mutex_lock(&deferred_probe_mutex);
@@ -215,6 +240,7 @@ static int deferred_probe_initcall(void)
driver_deferred_probe_trigger();
/* Sort as many dependencies as possible before exiting initcalls */
flush_work(&deferred_probe_work);
+ initcalls_done = true;
return 0;
}
late_initcall(deferred_probe_initcall);
@@ -259,6 +285,8 @@ static void driver_bound(struct device *dev)
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_BOUND_DRIVER, dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_BIND);
}
static int driver_sysfs_add(struct device *dev)
@@ -848,6 +876,8 @@ static void __device_release_driver(struct device *dev, struct device *parent)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_UNBOUND_DRIVER,
dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_UNBIND);
}
}
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 1c152aed6b82..744f64f43454 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -37,7 +37,7 @@ static inline dma_addr_t dma_get_device_base(struct device *dev,
return mem->device_base;
}
-static bool dma_init_coherent_memory(
+static int dma_init_coherent_memory(
phys_addr_t phys_addr, dma_addr_t device_addr, size_t size, int flags,
struct dma_coherent_mem **mem)
{
@@ -45,25 +45,28 @@ static bool dma_init_coherent_memory(
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+ int ret;
- if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
- goto out;
- if (!size)
+ if (!size) {
+ ret = -EINVAL;
goto out;
+ }
- if (flags & DMA_MEMORY_MAP)
- mem_base = memremap(phys_addr, size, MEMREMAP_WC);
- else
- mem_base = ioremap(phys_addr, size);
- if (!mem_base)
+ mem_base = memremap(phys_addr, size, MEMREMAP_WC);
+ if (!mem_base) {
+ ret = -EINVAL;
goto out;
-
+ }
dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dma_mem)
+ if (!dma_mem) {
+ ret = -ENOMEM;
goto out;
+ }
dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dma_mem->bitmap)
+ if (!dma_mem->bitmap) {
+ ret = -ENOMEM;
goto out;
+ }
dma_mem->virt_base = mem_base;
dma_mem->device_base = device_addr;
@@ -73,17 +76,13 @@ static bool dma_init_coherent_memory(
spin_lock_init(&dma_mem->spinlock);
*mem = dma_mem;
- return true;
+ return 0;
out:
kfree(dma_mem);
- if (mem_base) {
- if (flags & DMA_MEMORY_MAP)
- memunmap(mem_base);
- else
- iounmap(mem_base);
- }
- return false;
+ if (mem_base)
+ memunmap(mem_base);
+ return ret;
}
static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
@@ -91,10 +90,7 @@ static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
if (!mem)
return;
- if (mem->flags & DMA_MEMORY_MAP)
- memunmap(mem->virt_base);
- else
- iounmap(mem->virt_base);
+ memunmap(mem->virt_base);
kfree(mem->bitmap);
kfree(mem);
}
@@ -109,8 +105,6 @@ static int dma_assign_coherent_memory(struct device *dev,
return -EBUSY;
dev->dma_mem = mem;
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
-
return 0;
}
@@ -118,16 +112,16 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
{
struct dma_coherent_mem *mem;
+ int ret;
- if (!dma_init_coherent_memory(phys_addr, device_addr, size, flags,
- &mem))
- return 0;
-
- if (dma_assign_coherent_memory(dev, mem) == 0)
- return flags & DMA_MEMORY_MAP ? DMA_MEMORY_MAP : DMA_MEMORY_IO;
+ ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags, &mem);
+ if (ret)
+ return ret;
- dma_release_coherent_memory(mem);
- return 0;
+ ret = dma_assign_coherent_memory(dev, mem);
+ if (ret)
+ dma_release_coherent_memory(mem);
+ return ret;
}
EXPORT_SYMBOL(dma_declare_coherent_memory);
@@ -171,7 +165,6 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
int order = get_order(size);
unsigned long flags;
int pageno;
- int dma_memory_map;
void *ret;
spin_lock_irqsave(&mem->spinlock, flags);
@@ -188,15 +181,9 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT);
- dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
spin_unlock_irqrestore(&mem->spinlock, flags);
- if (dma_memory_map)
- memset(ret, 0, size);
- else
- memset_io(ret, 0, size);
-
+ memset(ret, 0, size);
return ret;
-
err:
spin_unlock_irqrestore(&mem->spinlock, flags);
return NULL;
@@ -359,14 +346,17 @@ static struct reserved_mem *dma_reserved_default_memory __initdata;
static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
{
struct dma_coherent_mem *mem = rmem->priv;
+ int ret;
- if (!mem &&
- !dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
- DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
- &mem)) {
- pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
- &rmem->base, (unsigned long)rmem->size / SZ_1M);
- return -ENODEV;
+ if (!mem) {
+ ret = dma_init_coherent_memory(rmem->base, rmem->base,
+ rmem->size,
+ DMA_MEMORY_EXCLUSIVE, &mem);
+ if (ret) {
+ pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return ret;
+ }
}
mem->use_dev_dma_pfn_offset = true;
rmem->priv = mem;
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index b555ff9dd8fc..e584eddef0a7 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -176,13 +176,10 @@ int dmam_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
rc = dma_declare_coherent_memory(dev, phys_addr, device_addr, size,
flags);
- if (rc) {
+ if (!rc)
devres_add(dev, res);
- rc = 0;
- } else {
+ else
devres_free(res);
- rc = -ENOMEM;
- }
return rc;
}
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bfbe1e154128..4b57cf5bc81d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -7,6 +7,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -256,38 +258,6 @@ static int fw_cache_piggyback_on_request(const char *name);
* guarding for corner cases a global lock should be OK */
static DEFINE_MUTEX(fw_lock);
-static bool __enable_firmware = false;
-
-static void enable_firmware(void)
-{
- mutex_lock(&fw_lock);
- __enable_firmware = true;
- mutex_unlock(&fw_lock);
-}
-
-static void disable_firmware(void)
-{
- mutex_lock(&fw_lock);
- __enable_firmware = false;
- mutex_unlock(&fw_lock);
-}
-
-/*
- * When disabled only the built-in firmware and the firmware cache will be
- * used to look for firmware.
- */
-static bool firmware_enabled(void)
-{
- bool enabled = false;
-
- mutex_lock(&fw_lock);
- if (__enable_firmware)
- enabled = true;
- mutex_unlock(&fw_lock);
-
- return enabled;
-}
-
static struct firmware_cache fw_cache;
static struct firmware_buf *__allocate_fw_buf(const char *fw_name,
@@ -331,6 +301,7 @@ static struct firmware_buf *__fw_lookup_buf(const char *fw_name)
return NULL;
}
+/* Returns 1 for batching firmware requests with the same name */
static int fw_lookup_and_allocate_buf(const char *fw_name,
struct firmware_cache *fwc,
struct firmware_buf **buf, void *dbuf,
@@ -344,6 +315,7 @@ static int fw_lookup_and_allocate_buf(const char *fw_name,
kref_get(&tmp->ref);
spin_unlock(&fwc->lock);
*buf = tmp;
+ pr_debug("batched request - sharing the same struct firmware_buf and lookup for multiple requests\n");
return 1;
}
tmp = __allocate_fw_buf(fw_name, fwc, dbuf, size);
@@ -1085,9 +1057,12 @@ static int _request_firmware_load(struct firmware_priv *fw_priv,
mutex_unlock(&fw_lock);
}
- if (fw_state_is_aborted(&buf->fw_st))
- retval = -EAGAIN;
- else if (buf->is_paged_buf && !buf->data)
+ if (fw_state_is_aborted(&buf->fw_st)) {
+ if (retval == -ERESTARTSYS)
+ retval = -EINTR;
+ else
+ retval = -EAGAIN;
+ } else if (buf->is_paged_buf && !buf->data)
retval = -ENOMEM;
device_del(f_dev);
@@ -1239,12 +1214,6 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (ret <= 0) /* error or already assigned */
goto out;
- if (!firmware_enabled()) {
- WARN(1, "firmware request while host is not available\n");
- ret = -EHOSTDOWN;
- goto out;
- }
-
ret = fw_get_filesystem_firmware(device, fw->priv);
if (ret) {
if (!(opt_flags & FW_OPT_NO_WARN))
@@ -1755,62 +1724,6 @@ static void device_uncache_fw_images_delay(unsigned long delay)
msecs_to_jiffies(delay));
}
-/**
- * fw_pm_notify - notifier for suspend/resume
- * @notify_block: unused
- * @mode: mode we are switching to
- * @unused: unused
- *
- * Used to modify the firmware_class state as we move in between states.
- * The firmware_class implements a firmware cache to enable device driver
- * to fetch firmware upon resume before the root filesystem is ready. We
- * disable API calls which do not use the built-in firmware or the firmware
- * cache when we know these calls will not work.
- *
- * The inner logic behind all this is a bit complex so it is worth summarizing
- * the kernel's own suspend/resume process with context and focus on how this
- * can impact the firmware API.
- *
- * First a review on how we go to suspend::
- *
- * pm_suspend() --> enter_state() -->
- * sys_sync()
- * suspend_prepare() -->
- * __pm_notifier_call_chain(PM_SUSPEND_PREPARE, ...);
- * suspend_freeze_processes() -->
- * freeze_processes() -->
- * __usermodehelper_set_disable_depth(UMH_DISABLED);
- * freeze all tasks ...
- * freeze_kernel_threads()
- * suspend_devices_and_enter() -->
- * dpm_suspend_start() -->
- * dpm_prepare()
- * dpm_suspend()
- * suspend_enter() -->
- * platform_suspend_prepare()
- * dpm_suspend_late()
- * freeze_enter()
- * syscore_suspend()
- *
- * When we resume we bail out of a loop from suspend_devices_and_enter() and
- * unwind back out to the caller enter_state() where we were before as follows::
- *
- * enter_state() -->
- * suspend_devices_and_enter() --> (bail from loop)
- * dpm_resume_end() -->
- * dpm_resume()
- * dpm_complete()
- * suspend_finish() -->
- * suspend_thaw_processes() -->
- * thaw_processes() -->
- * __usermodehelper_set_disable_depth(UMH_FREEZING);
- * thaw_workqueues();
- * thaw all processes ...
- * usermodehelper_enable();
- * pm_notifier_call_chain(PM_POST_SUSPEND);
- *
- * fw_pm_notify() works through pm_notifier_call_chain().
- */
static int fw_pm_notify(struct notifier_block *notify_block,
unsigned long mode, void *unused)
{
@@ -1824,7 +1737,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
*/
kill_pending_fw_fallback_reqs(true);
device_cache_fw_images();
- disable_firmware();
break;
case PM_POST_SUSPEND:
@@ -1837,7 +1749,6 @@ static int fw_pm_notify(struct notifier_block *notify_block,
mutex_lock(&fw_lock);
fw_cache.state = FW_LOADER_NO_CACHE;
mutex_unlock(&fw_lock);
- enable_firmware();
device_uncache_fw_images_delay(10 * MSEC_PER_SEC);
break;
@@ -1886,7 +1797,6 @@ static void __init fw_cache_init(void)
static int fw_shutdown_notify(struct notifier_block *unused1,
unsigned long unused2, void *unused3)
{
- disable_firmware();
/*
* Kill all pending fallback requests to avoid both stalling shutdown,
* and avoid a deadlock with the usermode_lock.
@@ -1902,7 +1812,6 @@ static struct notifier_block fw_shutdown_nb = {
static int __init firmware_class_init(void)
{
- enable_firmware();
fw_cache_init();
register_reboot_notifier(&fw_shutdown_nb);
#ifdef CONFIG_FW_LOADER_USER_HELPER
@@ -1914,7 +1823,6 @@ static int __init firmware_class_init(void)
static void __exit firmware_class_exit(void)
{
- disable_firmware();
#ifdef CONFIG_PM_SLEEP
unregister_syscore_ops(&fw_syscore_ops);
unregister_pm_notifier(&fw_cache.pm_notify);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index c7c4e0325cdb..4e3b61cda520 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -388,6 +388,19 @@ static ssize_t show_phys_device(struct device *dev,
}
#ifdef CONFIG_MEMORY_HOTREMOVE
+static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
+ unsigned long nr_pages, int online_type,
+ struct zone *default_zone)
+{
+ struct zone *zone;
+
+ zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
+ if (zone != default_zone) {
+ strcat(buf, " ");
+ strcat(buf, zone->name);
+ }
+}
+
static ssize_t show_valid_zones(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -395,7 +408,7 @@ static ssize_t show_valid_zones(struct device *dev,
unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
unsigned long valid_start_pfn, valid_end_pfn;
- bool append = false;
+ struct zone *default_zone;
int nid;
/*
@@ -418,16 +431,13 @@ static ssize_t show_valid_zones(struct device *dev,
}
nid = pfn_to_nid(start_pfn);
- if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL)) {
- strcat(buf, default_zone_for_pfn(nid, start_pfn, nr_pages)->name);
- append = true;
- }
+ default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+ strcat(buf, default_zone->name);
- if (allow_online_pfn_range(nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE)) {
- if (append)
- strcat(buf, " ");
- strcat(buf, NODE_DATA(nid)->node_zones[ZONE_MOVABLE].name);
- }
+ print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
+ default_zone);
+ print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
+ default_zone);
out:
strcat(buf, "\n");
diff --git a/drivers/base/node.c b/drivers/base/node.c
index d8dc83017d8d..3855902f2c5b 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -160,12 +160,12 @@ static ssize_t node_read_numastat(struct device *dev,
"interleave_hit %lu\n"
"local_node %lu\n"
"other_node %lu\n",
- sum_zone_node_page_state(dev->id, NUMA_HIT),
- sum_zone_node_page_state(dev->id, NUMA_MISS),
- sum_zone_node_page_state(dev->id, NUMA_FOREIGN),
- sum_zone_node_page_state(dev->id, NUMA_INTERLEAVE_HIT),
- sum_zone_node_page_state(dev->id, NUMA_LOCAL),
- sum_zone_node_page_state(dev->id, NUMA_OTHER));
+ sum_zone_numa_state(dev->id, NUMA_HIT),
+ sum_zone_numa_state(dev->id, NUMA_MISS),
+ sum_zone_numa_state(dev->id, NUMA_FOREIGN),
+ sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
+ sum_zone_numa_state(dev->id, NUMA_LOCAL),
+ sum_zone_numa_state(dev->id, NUMA_OTHER));
}
static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
@@ -181,9 +181,17 @@ static ssize_t node_read_vmstat(struct device *dev,
n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
sum_zone_node_page_state(nid, i));
- for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+#ifdef CONFIG_NUMA
+ for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
n += sprintf(buf+n, "%s %lu\n",
vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
+ sum_zone_numa_state(nid, i));
+#endif
+
+ for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
+ n += sprintf(buf+n, "%s %lu\n",
+ vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
+ NR_VM_NUMA_STAT_ITEMS],
node_page_state(pgdat, i));
return n;
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 60303aa28587..e8ca5e2cf1e5 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
+#ifdef CONFIG_DEBUG_FS
+static void genpd_update_accounting(struct generic_pm_domain *genpd)
+{
+ ktime_t delta, now;
+
+ now = ktime_get();
+ delta = ktime_sub(now, genpd->accounting_time);
+
+ /*
+ * If genpd->status is active, it means we are just
+ * out of off and so update the idle time and vice
+ * versa.
+ */
+ if (genpd->status == GPD_STATE_ACTIVE) {
+ int state_idx = genpd->state_idx;
+
+ genpd->states[state_idx].idle_time =
+ ktime_add(genpd->states[state_idx].idle_time, delta);
+ } else {
+ genpd->on_time = ktime_add(genpd->on_time, delta);
+ }
+
+ genpd->accounting_time = now;
+}
+#else
+static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {}
+#endif
+
static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
unsigned int state_idx = genpd->state_idx;
@@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on,
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd_update_accounting(genpd);
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth)
goto err;
genpd->status = GPD_STATE_ACTIVE;
+ genpd_update_accounting(genpd);
+
return 0;
err:
@@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd,
genpd->max_off_time_changed = true;
genpd->provider = NULL;
genpd->has_provider = false;
+ genpd->accounting_time = ktime_get();
genpd->domain.ops.runtime_suspend = genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = genpd_runtime_resume;
genpd->domain.ops.prepare = pm_genpd_prepare;
@@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
mutex_lock(&of_genpd_mutex);
list_add(&cp->link, &of_genpd_providers);
mutex_unlock(&of_genpd_mutex);
- pr_debug("Added domain provider from %s\n", np->full_name);
+ pr_debug("Added domain provider from %pOF\n", np);
return 0;
}
@@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
err = of_property_read_u32(state_node, "entry-latency-us",
&entry_latency);
if (err) {
- pr_debug(" * %s missing entry-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing entry-latency-us property\n",
+ state_node);
return -EINVAL;
}
err = of_property_read_u32(state_node, "exit-latency-us",
&exit_latency);
if (err) {
- pr_debug(" * %s missing exit-latency-us property\n",
- state_node->full_name);
+ pr_debug(" * %pOF missing exit-latency-us property\n",
+ state_node);
return -EINVAL;
}
@@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn,
ret = genpd_parse_state(&st[i++], np);
if (ret) {
pr_err
- ("Parsing idle state node %s failed with err %d\n",
- np->full_name, ret);
+ ("Parsing idle state node %pOF failed with err %d\n",
+ np, ret);
of_node_put(np);
kfree(st);
return ret;
@@ -2327,7 +2359,7 @@ exit:
return 0;
}
-static int pm_genpd_summary_show(struct seq_file *s, void *data)
+static int genpd_summary_show(struct seq_file *s, void *data)
{
struct generic_pm_domain *genpd;
int ret = 0;
@@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data)
return ret;
}
-static int pm_genpd_summary_open(struct inode *inode, struct file *file)
+static int genpd_status_show(struct seq_file *s, void *data)
{
- return single_open(file, pm_genpd_summary_show, NULL);
+ static const char * const status_lookup[] = {
+ [GPD_STATE_ACTIVE] = "on",
+ [GPD_STATE_POWER_OFF] = "off"
+ };
+
+ struct generic_pm_domain *genpd = s->private;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+
+ if (genpd->status == GPD_STATE_POWER_OFF)
+ seq_printf(s, "%s-%u\n", status_lookup[genpd->status],
+ genpd->state_idx);
+ else
+ seq_printf(s, "%s\n", status_lookup[genpd->status]);
+exit:
+ genpd_unlock(genpd);
+ return ret;
}
-static const struct file_operations pm_genpd_summary_fops = {
- .open = pm_genpd_summary_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
+static int genpd_sub_domains_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct gpd_link *link;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(link, &genpd->master_links, master_node)
+ seq_printf(s, "%s\n", link->slave->name);
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_idle_states_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ seq_puts(s, "State Time Spent(ms)\n");
+
+ for (i = 0; i < genpd->state_count; i++) {
+ ktime_t delta = 0;
+ s64 msecs;
+
+ if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ msecs = ktime_to_ms(
+ ktime_add(genpd->states[i].idle_time, delta));
+ seq_printf(s, "S%-13i %lld\n", i, msecs);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_active_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (genpd->status == GPD_STATE_ACTIVE)
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(
+ ktime_add(genpd->on_time, delta)));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+static int genpd_total_idle_time_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ ktime_t delta = 0, total = 0;
+ unsigned int i;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ for (i = 0; i < genpd->state_count; i++) {
+
+ if ((genpd->status == GPD_STATE_POWER_OFF) &&
+ (genpd->state_idx == i))
+ delta = ktime_sub(ktime_get(), genpd->accounting_time);
+
+ total = ktime_add(total, genpd->states[i].idle_time);
+ }
+ total = ktime_add(total, delta);
+
+ seq_printf(s, "%lld ms\n", ktime_to_ms(total));
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+
+static int genpd_devices_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *genpd = s->private;
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ int ret = 0;
+
+ ret = genpd_lock_interruptible(genpd);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(pm_data, &genpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj,
+ genpd_is_irq_safe(genpd) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "%s\n", kobj_path);
+ kfree(kobj_path);
+ }
+
+ genpd_unlock(genpd);
+ return ret;
+}
+
+#define define_genpd_open_function(name) \
+static int genpd_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, genpd_##name##_show, inode->i_private); \
+}
+
+define_genpd_open_function(summary);
+define_genpd_open_function(status);
+define_genpd_open_function(sub_domains);
+define_genpd_open_function(idle_states);
+define_genpd_open_function(active_time);
+define_genpd_open_function(total_idle_time);
+define_genpd_open_function(devices);
+
+#define define_genpd_debugfs_fops(name) \
+static const struct file_operations genpd_##name##_fops = { \
+ .open = genpd_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+define_genpd_debugfs_fops(summary);
+define_genpd_debugfs_fops(status);
+define_genpd_debugfs_fops(sub_domains);
+define_genpd_debugfs_fops(idle_states);
+define_genpd_debugfs_fops(active_time);
+define_genpd_debugfs_fops(total_idle_time);
+define_genpd_debugfs_fops(devices);
static int __init pm_genpd_debug_init(void)
{
struct dentry *d;
+ struct generic_pm_domain *genpd;
pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
@@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void)
return -ENOMEM;
d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
- pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
+ pm_genpd_debugfs_dir, NULL, &genpd_summary_fops);
if (!d)
return -ENOMEM;
+ list_for_each_entry(genpd, &gpd_list, gpd_list_node) {
+ d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir);
+ if (!d)
+ return -ENOMEM;
+
+ debugfs_create_file("current_state", 0444,
+ d, genpd, &genpd_status_fops);
+ debugfs_create_file("sub_domains", 0444,
+ d, genpd, &genpd_sub_domains_fops);
+ debugfs_create_file("idle_states", 0444,
+ d, genpd, &genpd_idle_states_fops);
+ debugfs_create_file("active_time", 0444,
+ d, genpd, &genpd_active_time_fops);
+ debugfs_create_file("total_idle_time", 0444,
+ d, genpd, &genpd_total_idle_time_fops);
+ debugfs_create_file("devices", 0444,
+ d, genpd, &genpd_devices_fops);
+ }
+
return 0;
}
late_initcall(pm_genpd_debug_init);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c99f8730de82..ea1732ed7a9d 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -418,8 +418,7 @@ static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
dev_name(dev), pm_verb(state.event), info, error);
}
-#ifdef CONFIG_PM_DEBUG
-static void dpm_show_time(ktime_t starttime, pm_message_t state,
+static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
const char *info)
{
ktime_t calltime;
@@ -432,14 +431,12 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state,
usecs = usecs64;
if (usecs == 0)
usecs = 1;
- pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
- info ?: "", info ? " " : "", pm_verb(state.event),
- usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
+
+ pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
+ info ?: "", info ? " " : "", pm_verb(state.event),
+ error ? "aborted" : "complete",
+ usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
-#else
-static inline void dpm_show_time(ktime_t starttime, pm_message_t state,
- const char *info) {}
-#endif /* CONFIG_PM_DEBUG */
static int dpm_run_callback(pm_callback_t cb, struct device *dev,
pm_message_t state, const char *info)
@@ -602,14 +599,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
put_device(dev);
}
-/**
- * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
- * enable device drivers to receive interrupts.
- */
-void dpm_resume_noirq(pm_message_t state)
+void dpm_noirq_resume_devices(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -654,11 +644,28 @@ void dpm_resume_noirq(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, "noirq");
+ dpm_show_time(starttime, state, 0, "noirq");
+ trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+void dpm_noirq_end(void)
+{
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
cpuidle_resume();
- trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
+}
+
+/**
+ * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
+ * allow device drivers' interrupt handlers to be called.
+ */
+void dpm_resume_noirq(pm_message_t state)
+{
+ dpm_noirq_resume_devices(state);
+ dpm_noirq_end();
}
/**
@@ -776,7 +783,7 @@ void dpm_resume_early(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, "early");
+ dpm_show_time(starttime, state, 0, "early");
trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
}
@@ -948,7 +955,7 @@ void dpm_resume(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
- dpm_show_time(starttime, state, NULL);
+ dpm_show_time(starttime, state, 0, NULL);
cpufreq_resume();
trace_suspend_resume(TPS("dpm_resume"), state.event, false);
@@ -1098,6 +1105,11 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
if (async_error)
goto Complete;
+ if (pm_wakeup_pending()) {
+ async_error = -EBUSY;
+ goto Complete;
+ }
+
if (dev->power.syscore || dev->power.direct_complete)
goto Complete;
@@ -1158,22 +1170,19 @@ static int device_suspend_noirq(struct device *dev)
return __device_suspend_noirq(dev, pm_transition, false);
}
-/**
- * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
- * @state: PM transition of the system being carried out.
- *
- * Prevent device drivers from receiving interrupts and call the "noirq" suspend
- * handlers for all non-sysdev devices.
- */
-int dpm_suspend_noirq(pm_message_t state)
+void dpm_noirq_begin(void)
+{
+ cpuidle_pause();
+ device_wakeup_arm_wake_irqs();
+ suspend_device_irqs();
+}
+
+int dpm_noirq_suspend_devices(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- cpuidle_pause();
- device_wakeup_arm_wake_irqs();
- suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
pm_transition = state;
async_error = 0;
@@ -1208,15 +1217,32 @@ int dpm_suspend_noirq(pm_message_t state)
if (error) {
suspend_stats.failed_suspend_noirq++;
dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
- dpm_resume_noirq(resume_event(state));
- } else {
- dpm_show_time(starttime, state, "noirq");
}
+ dpm_show_time(starttime, state, error, "noirq");
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
return error;
}
/**
+ * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
+ * @state: PM transition of the system being carried out.
+ *
+ * Prevent device drivers' interrupt handlers from being called and invoke
+ * "noirq" suspend callbacks for all non-sysdev devices.
+ */
+int dpm_suspend_noirq(pm_message_t state)
+{
+ int ret;
+
+ dpm_noirq_begin();
+ ret = dpm_noirq_suspend_devices(state);
+ if (ret)
+ dpm_resume_noirq(resume_event(state));
+
+ return ret;
+}
+
+/**
* device_suspend_late - Execute a "late suspend" callback for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -1350,9 +1376,8 @@ int dpm_suspend_late(pm_message_t state)
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
dpm_resume_early(resume_event(state));
- } else {
- dpm_show_time(starttime, state, "late");
}
+ dpm_show_time(starttime, state, error, "late");
trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
return error;
}
@@ -1618,8 +1643,8 @@ int dpm_suspend(pm_message_t state)
if (error) {
suspend_stats.failed_suspend++;
dpm_save_failed_step(SUSPEND_SUSPEND);
- } else
- dpm_show_time(starttime, state, NULL);
+ }
+ dpm_show_time(starttime, state, error, NULL);
trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
return error;
}
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
index 57eec1ca0569..0b718886479b 100644
--- a/drivers/base/power/opp/of.c
+++ b/drivers/base/power/opp/of.c
@@ -248,15 +248,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-/* Returns opp descriptor node for a device, caller must do of_node_put() */
-struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+/* Returns opp descriptor node for a device node, caller must
+ * do of_node_put() */
+static struct device_node *_opp_of_get_opp_desc_node(struct device_node *np)
{
/*
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
- return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
+ return of_parse_phandle(np, "operating-points-v2", 0);
+}
+
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev)
+{
+ return _opp_of_get_opp_desc_node(dev->of_node);
}
EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node);
@@ -539,8 +546,12 @@ int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
ret = dev_pm_opp_of_add_table(cpu_dev);
if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
+ /*
+ * OPP may get registered dynamically, don't print error
+ * message here.
+ */
+ pr_debug("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
/* Free all other OPPs */
dev_pm_opp_of_cpumask_remove_table(cpumask);
@@ -572,8 +583,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
struct cpumask *cpumask)
{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
+ struct device_node *np, *tmp_np, *cpu_np;
int cpu, ret = 0;
/* Get OPP descriptor node */
@@ -593,19 +603,18 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
if (cpu == cpu_dev->id)
continue;
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ cpu_np = of_get_cpu_node(cpu, NULL);
+ if (!cpu_np) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d node\n",
__func__, cpu);
- ret = -ENODEV;
+ ret = -ENOENT;
goto put_cpu_node;
}
/* Get OPP descriptor node */
- tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev);
+ tmp_np = _opp_of_get_opp_desc_node(cpu_np);
if (!tmp_np) {
- dev_err(tcpu_dev, "%s: Couldn't find opp node.\n",
- __func__);
+ pr_err("%pOF: Couldn't find opp node\n", cpu_np);
ret = -ENOENT;
goto put_cpu_node;
}
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 144e6d8fafc8..cdd6f256da59 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable)
if (!!dev->power.can_wakeup == !!capable)
return;
+ dev->power.can_wakeup = capable;
if (device_is_registered(dev) && !list_empty(&dev->power.entry)) {
if (capable) {
- if (wakeup_sysfs_add(dev))
- return;
+ int ret = wakeup_sysfs_add(dev);
+
+ if (ret)
+ dev_info(dev, "Wakeup sysfs attributes not added\n");
} else {
wakeup_sysfs_remove(dev);
}
}
- dev->power.can_wakeup = capable;
}
EXPORT_SYMBOL_GPL(device_set_wakeup_capable);
@@ -863,7 +865,7 @@ bool pm_wakeup_pending(void)
void pm_system_wakeup(void)
{
atomic_inc(&pm_abort_suspend);
- freeze_wake();
+ s2idle_wake();
}
EXPORT_SYMBOL_GPL(pm_system_wakeup);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index edf02c1b5845..d0b65bbe7e15 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -25,19 +25,25 @@ struct property_set {
const struct property_entry *properties;
};
-static inline bool is_pset_node(struct fwnode_handle *fwnode)
-{
- return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
-}
+static const struct fwnode_operations pset_fwnode_ops;
-static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
+static inline bool is_pset_node(const struct fwnode_handle *fwnode)
{
- return is_pset_node(fwnode) ?
- container_of(fwnode, struct property_set, fwnode) : NULL;
+ return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &pset_fwnode_ops;
}
-static const struct property_entry *pset_prop_get(struct property_set *pset,
- const char *name)
+#define to_pset_node(__fwnode) \
+ ({ \
+ typeof(__fwnode) __to_pset_node_fwnode = __fwnode; \
+ \
+ is_pset_node(__to_pset_node_fwnode) ? \
+ container_of(__to_pset_node_fwnode, \
+ struct property_set, fwnode) : \
+ NULL; \
+ })
+
+static const struct property_entry *
+pset_prop_get(const struct property_set *pset, const char *name)
{
const struct property_entry *prop;
@@ -51,7 +57,7 @@ static const struct property_entry *pset_prop_get(struct property_set *pset,
return NULL;
}
-static const void *pset_prop_find(struct property_set *pset,
+static const void *pset_prop_find(const struct property_set *pset,
const char *propname, size_t length)
{
const struct property_entry *prop;
@@ -71,7 +77,7 @@ static const void *pset_prop_find(struct property_set *pset,
return pointer;
}
-static int pset_prop_read_u8_array(struct property_set *pset,
+static int pset_prop_read_u8_array(const struct property_set *pset,
const char *propname,
u8 *values, size_t nval)
{
@@ -86,7 +92,7 @@ static int pset_prop_read_u8_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u16_array(struct property_set *pset,
+static int pset_prop_read_u16_array(const struct property_set *pset,
const char *propname,
u16 *values, size_t nval)
{
@@ -101,7 +107,7 @@ static int pset_prop_read_u16_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u32_array(struct property_set *pset,
+static int pset_prop_read_u32_array(const struct property_set *pset,
const char *propname,
u32 *values, size_t nval)
{
@@ -116,7 +122,7 @@ static int pset_prop_read_u32_array(struct property_set *pset,
return 0;
}
-static int pset_prop_read_u64_array(struct property_set *pset,
+static int pset_prop_read_u64_array(const struct property_set *pset,
const char *propname,
u64 *values, size_t nval)
{
@@ -131,7 +137,7 @@ static int pset_prop_read_u64_array(struct property_set *pset,
return 0;
}
-static int pset_prop_count_elems_of_size(struct property_set *pset,
+static int pset_prop_count_elems_of_size(const struct property_set *pset,
const char *propname, size_t length)
{
const struct property_entry *prop;
@@ -143,7 +149,7 @@ static int pset_prop_count_elems_of_size(struct property_set *pset,
return prop->length / length;
}
-static int pset_prop_read_string_array(struct property_set *pset,
+static int pset_prop_read_string_array(const struct property_set *pset,
const char *propname,
const char **strings, size_t nval)
{
@@ -187,18 +193,18 @@ struct fwnode_handle *dev_fwnode(struct device *dev)
}
EXPORT_SYMBOL_GPL(dev_fwnode);
-static bool pset_fwnode_property_present(struct fwnode_handle *fwnode,
+static bool pset_fwnode_property_present(const struct fwnode_handle *fwnode,
const char *propname)
{
return !!pset_prop_get(to_pset_node(fwnode), propname);
}
-static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode,
+static int pset_fwnode_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
{
- struct property_set *node = to_pset_node(fwnode);
+ const struct property_set *node = to_pset_node(fwnode);
if (!val)
return pset_prop_count_elems_of_size(node, propname, elem_size);
@@ -217,9 +223,10 @@ static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode,
return -ENXIO;
}
-static int pset_fwnode_property_read_string_array(struct fwnode_handle *fwnode,
- const char *propname,
- const char **val, size_t nval)
+static int
+pset_fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
+ const char *propname,
+ const char **val, size_t nval)
{
return pset_prop_read_string_array(to_pset_node(fwnode), propname,
val, nval);
@@ -249,7 +256,8 @@ EXPORT_SYMBOL_GPL(device_property_present);
* @fwnode: Firmware node whose property to check
* @propname: Name of the property
*/
-bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
+bool fwnode_property_present(const struct fwnode_handle *fwnode,
+ const char *propname)
{
bool ret;
@@ -431,7 +439,7 @@ int device_property_match_string(struct device *dev, const char *propname,
}
EXPORT_SYMBOL_GPL(device_property_match_string);
-static int fwnode_property_read_int_array(struct fwnode_handle *fwnode,
+static int fwnode_property_read_int_array(const struct fwnode_handle *fwnode,
const char *propname,
unsigned int elem_size, void *val,
size_t nval)
@@ -467,7 +475,7 @@ static int fwnode_property_read_int_array(struct fwnode_handle *fwnode,
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u8_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u8_array(const struct fwnode_handle *fwnode,
const char *propname, u8 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u8),
@@ -493,7 +501,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u16_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u16_array(const struct fwnode_handle *fwnode,
const char *propname, u16 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u16),
@@ -519,7 +527,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u32_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u32_array(const struct fwnode_handle *fwnode,
const char *propname, u32 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u32),
@@ -545,7 +553,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_u64_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_u64_array(const struct fwnode_handle *fwnode,
const char *propname, u64 *val, size_t nval)
{
return fwnode_property_read_int_array(fwnode, propname, sizeof(u64),
@@ -571,7 +579,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array);
* %-EOVERFLOW if the size of the property is not as expected,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
+int fwnode_property_read_string_array(const struct fwnode_handle *fwnode,
const char *propname, const char **val,
size_t nval)
{
@@ -603,7 +611,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string_array);
* %-EPROTO or %-EILSEQ if the property is not a string,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_read_string(struct fwnode_handle *fwnode,
+int fwnode_property_read_string(const struct fwnode_handle *fwnode,
const char *propname, const char **val)
{
int ret = fwnode_property_read_string_array(fwnode, propname, val, 1);
@@ -627,7 +635,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string);
* %-EPROTO if the property is not an array of strings,
* %-ENXIO if no suitable firmware interface is present.
*/
-int fwnode_property_match_string(struct fwnode_handle *fwnode,
+int fwnode_property_match_string(const struct fwnode_handle *fwnode,
const char *propname, const char *string)
{
const char **values;
@@ -657,6 +665,34 @@ out:
}
EXPORT_SYMBOL_GPL(fwnode_property_match_string);
+/**
+ * fwnode_property_get_reference_args() - Find a reference with arguments
+ * @fwnode: Firmware node where to look for the reference
+ * @prop: The name of the property
+ * @nargs_prop: The name of the property telling the number of
+ * arguments in the referred node. NULL if @nargs is known,
+ * otherwise @nargs is ignored. Only relevant on OF.
+ * @nargs: Number of arguments. Ignored if @nargs_prop is non-NULL.
+ * @index: Index of the reference, from zero onwards.
+ * @args: Result structure with reference and integer arguments.
+ *
+ * Obtain a reference based on a named property in an fwnode, with
+ * integer arguments.
+ *
+ * Caller is responsible to call fwnode_handle_put() on the returned
+ * args->fwnode pointer.
+ *
+ */
+int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
+ const char *prop, const char *nargs_prop,
+ unsigned int nargs, unsigned int index,
+ struct fwnode_reference_args *args)
+{
+ return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+}
+EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
+
static int property_copy_string_array(struct property_entry *dst,
const struct property_entry *src)
{
@@ -900,7 +936,6 @@ int device_add_properties(struct device *dev,
if (IS_ERR(p))
return PTR_ERR(p);
- p->fwnode.type = FWNODE_PDATA;
p->fwnode.ops = &pset_fwnode_ops;
set_secondary_fwnode(dev, &p->fwnode);
return 0;
@@ -935,7 +970,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
* Return parent firmware node of the given node if possible or %NULL if no
* parent was available.
*/
-struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode)
+struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, get_parent);
}
@@ -946,8 +981,9 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent);
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
*/
-struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
- struct fwnode_handle *child)
+struct fwnode_handle *
+fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+ struct fwnode_handle *child)
{
return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
}
@@ -978,8 +1014,9 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node);
* @fwnode: Firmware node to find the named child node for.
* @childname: String to match child node name against.
*/
-struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
- const char *childname)
+struct fwnode_handle *
+fwnode_get_named_child_node(const struct fwnode_handle *fwnode,
+ const char *childname)
{
return fwnode_call_ptr_op(fwnode, get_named_child_node, childname);
}
@@ -1025,7 +1062,7 @@ EXPORT_SYMBOL_GPL(fwnode_handle_put);
* fwnode_device_is_available - check if a device is available for use
* @fwnode: Pointer to the fwnode of the device.
*/
-bool fwnode_device_is_available(struct fwnode_handle *fwnode)
+bool fwnode_device_is_available(const struct fwnode_handle *fwnode)
{
return fwnode_call_bool_op(fwnode, device_is_available);
}
@@ -1163,7 +1200,7 @@ EXPORT_SYMBOL(device_get_mac_address);
* are available.
*/
struct fwnode_handle *
-fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_handle *prev)
{
return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev);
@@ -1177,7 +1214,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
* Return: the firmware node of the device the @endpoint belongs to.
*/
struct fwnode_handle *
-fwnode_graph_get_port_parent(struct fwnode_handle *endpoint)
+fwnode_graph_get_port_parent(const struct fwnode_handle *endpoint)
{
struct fwnode_handle *port, *parent;
@@ -1197,7 +1234,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent);
* Extracts firmware node of a remote device the @fwnode points to.
*/
struct fwnode_handle *
-fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode)
+fwnode_graph_get_remote_port_parent(const struct fwnode_handle *fwnode)
{
struct fwnode_handle *endpoint, *parent;
@@ -1216,7 +1253,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent);
*
* Extracts firmware node of a remote port the @fwnode points to.
*/
-struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode)
+struct fwnode_handle *
+fwnode_graph_get_remote_port(const struct fwnode_handle *fwnode)
{
return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode));
}
@@ -1229,7 +1267,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port);
* Extracts firmware node of a remote endpoint the @fwnode points to.
*/
struct fwnode_handle *
-fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode)
+fwnode_graph_get_remote_endpoint(const struct fwnode_handle *fwnode)
{
return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint);
}
@@ -1244,8 +1282,9 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint);
* Return: Remote fwnode handle associated with remote endpoint node linked
* to @node. Use fwnode_node_put() on it when done.
*/
-struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode,
- u32 port_id, u32 endpoint_id)
+struct fwnode_handle *
+fwnode_graph_get_remote_node(const struct fwnode_handle *fwnode, u32 port_id,
+ u32 endpoint_id)
{
struct fwnode_handle *endpoint = NULL;
@@ -1281,7 +1320,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node);
* information in @endpoint. The caller must hold a reference to
* @fwnode.
*/
-int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint)
{
memset(endpoint, 0, sizeof(*endpoint));
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index d6ec1c546f5b..d936fcf9f1fb 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -105,7 +105,7 @@ static struct attribute *default_attrs[] = {
NULL
};
-static struct attribute_group topology_attr_group = {
+static const struct attribute_group topology_attr_group = {
.attrs = default_attrs,
.name = "topology"
};
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index b5c48a8d485f..54f81c554815 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -3,11 +3,8 @@ config BCMA_POSSIBLE
depends on HAS_IOMEM && HAS_DMA
default y
-menu "Broadcom specific AMBA"
- depends on BCMA_POSSIBLE
-
-config BCMA
- tristate "BCMA support"
+menuconfig BCMA
+ tristate "Broadcom specific AMBA"
depends on BCMA_POSSIBLE
help
Bus driver for Broadcom specific Advanced Microcontroller Bus
@@ -117,5 +114,3 @@ config BCMA_DEBUG
This turns on additional debugging messages.
If unsure, say N
-
-endmenu
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
index 7bde8d7a2816..982d5781d3ce 100644
--- a/drivers/bcma/driver_gpio.c
+++ b/drivers/bcma/driver_gpio.c
@@ -191,6 +191,7 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
case BCMA_CHIP_ID_BCM4707:
case BCMA_CHIP_ID_BCM5357:
case BCMA_CHIP_ID_BCM53572:
+ case BCMA_CHIP_ID_BCM53573:
case BCMA_CHIP_ID_BCM47094:
chip->ngpio = 32;
break;
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 245a879b036e..255591ab3716 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1678,9 +1678,12 @@ static bool DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
Enquiry2->FirmwareID.FirmwareType = '0';
Enquiry2->FirmwareID.TurnID = 0;
}
- sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d",
- Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion,
- Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID);
+ snprintf(Controller->FirmwareVersion, sizeof(Controller->FirmwareVersion),
+ "%d.%02d-%c-%02d",
+ Enquiry2->FirmwareID.MajorVersion,
+ Enquiry2->FirmwareID.MinorVersion,
+ Enquiry2->FirmwareID.FirmwareType,
+ Enquiry2->FirmwareID.TurnID);
if (!((Controller->FirmwareVersion[0] == '5' &&
strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
(Controller->FirmwareVersion[0] == '4' &&
@@ -6588,7 +6591,8 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
&dac960_proc_fops);
}
- sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
+ snprintf(Controller->ControllerName, sizeof(Controller->ControllerName),
+ "c%d", Controller->ControllerNumber);
ControllerProcEntry = proc_mkdir(Controller->ControllerName,
DAC960_ProcDirectoryEntry);
proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 8ddc98279c8f..4a438b8abe27 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,6 +17,7 @@ if BLK_DEV
config BLK_DEV_NULL_BLK
tristate "Null test block driver"
+ depends on CONFIGFS_FS
config BLK_DEV_FD
tristate "Normal floppy disk support"
@@ -111,33 +112,6 @@ source "drivers/block/mtip32xx/Kconfig"
source "drivers/block/zram/Kconfig"
-config BLK_CPQ_CISS_DA
- tristate "Compaq Smart Array 5xxx support"
- depends on PCI
- select CHECK_SIGNATURE
- select BLK_SCSI_REQUEST
- help
- This is the driver for Compaq Smart Array 5xxx controllers.
- Everyone using these boards should say Y here.
- See <file:Documentation/blockdev/cciss.txt> for the current list of
- boards supported by this driver, and for further information
- on the use of this driver.
-
-config CISS_SCSI_TAPE
- bool "SCSI tape drive support for Smart Array 5xxx"
- depends on BLK_CPQ_CISS_DA && PROC_FS
- depends on SCSI=y || SCSI=BLK_CPQ_CISS_DA
- help
- When enabled (Y), this option allows SCSI tape drives and SCSI medium
- changers (tape robots) to be accessed via a Compaq 5xxx array
- controller. (See <file:Documentation/blockdev/cciss.txt> for more details.)
-
- "SCSI support" and "SCSI tape support" must also be enabled for this
- option to work.
-
- When this option is disabled (N), the SCSI portion of the driver
- is not compiled.
-
config BLK_DEV_DAC960
tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
depends on PCI
@@ -470,7 +444,7 @@ config VIRTIO_BLK
depends on VIRTIO
---help---
This is the virtual block driver for virtio. It can be used with
- lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
+ QEMU based VMMs (like KVM or Xen). Say Y or M.
config VIRTIO_BLK_SCSI
bool "SCSI passthrough request for the Virtio block driver"
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index ec8c36897b75..1f456d86a190 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -15,7 +15,6 @@ obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o
obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o
obj-$(CONFIG_BLK_DEV_RAM) += brd.o
obj-$(CONFIG_BLK_DEV_LOOP) += loop.o
-obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 104b71c0490d..bbd0d186cfc0 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -294,14 +294,13 @@ out:
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{
- struct block_device *bdev = bio->bi_bdev;
- struct brd_device *brd = bdev->bd_disk->private_data;
+ struct brd_device *brd = bio->bi_disk->private_data;
struct bio_vec bvec;
sector_t sector;
struct bvec_iter iter;
sector = bio->bi_iter.bi_sector;
- if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
+ if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error;
bio_for_each_segment(bvec, bio, iter) {
@@ -326,7 +325,11 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
struct brd_device *brd = bdev->bd_disk->private_data;
- int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
+ int err;
+
+ if (PageTransHuge(page))
+ return -ENOTSUPP;
+ err = brd_do_bvec(brd, page, PAGE_SIZE, 0, is_write, sector);
page_endio(page, is_write, err);
return err;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
deleted file mode 100644
index 678af946be30..000000000000
--- a/drivers/block/cciss.c
+++ /dev/null
@@ -1,5415 +0,0 @@
-/*
- * Disk Array driver for HP Smart Array controllers.
- * (C) Copyright 2000, 2007 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
- * 02111-1307, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/major.h>
-#include <linux/fs.h>
-#include <linux/bio.h>
-#include <linux/blkpg.h>
-#include <linux/timer.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/init.h>
-#include <linux/jiffies.h>
-#include <linux/hdreg.h>
-#include <linux/spinlock.h>
-#include <linux/compat.h>
-#include <linux/mutex.h>
-#include <linux/bitmap.h>
-#include <linux/io.h>
-#include <linux/uaccess.h>
-
-#include <linux/dma-mapping.h>
-#include <linux/blkdev.h>
-#include <linux/genhd.h>
-#include <linux/completion.h>
-#include <scsi/scsi.h>
-#include <scsi/sg.h>
-#include <scsi/scsi_ioctl.h>
-#include <scsi/scsi_request.h>
-#include <linux/cdrom.h>
-#include <linux/scatterlist.h>
-#include <linux/kthread.h>
-
-#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
-#define DRIVER_NAME "HP CISS Driver (v 3.6.26)"
-#define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26)
-
-/* Embedded module documentation macros - see modules.h */
-MODULE_AUTHOR("Hewlett-Packard Company");
-MODULE_DESCRIPTION("Driver for HP Smart Array Controllers");
-MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
-MODULE_VERSION("3.6.26");
-MODULE_LICENSE("GPL");
-static int cciss_tape_cmds = 6;
-module_param(cciss_tape_cmds, int, 0644);
-MODULE_PARM_DESC(cciss_tape_cmds,
- "number of commands to allocate for tape devices (default: 6)");
-static int cciss_simple_mode;
-module_param(cciss_simple_mode, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_simple_mode,
- "Use 'simple mode' rather than 'performant mode'");
-
-static int cciss_allow_hpsa;
-module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(cciss_allow_hpsa,
- "Prevent cciss driver from accessing hardware known to be "
- " supported by the hpsa driver");
-
-static DEFINE_MUTEX(cciss_mutex);
-static struct proc_dir_entry *proc_cciss;
-
-#include "cciss_cmd.h"
-#include "cciss.h"
-#include <linux/cciss_ioctl.h>
-
-/* define the PCI info for the cards we can control */
-static const struct pci_device_id cciss_pci_device_id[] = {
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS, 0x0E11, 0x4070},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4080},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4082},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB, 0x0E11, 0x4083},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x4091},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409A},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409B},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409C},
- {PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC, 0x0E11, 0x409D},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA, 0x103C, 0x3225},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3235},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3211},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3212},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3213},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3214},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSD, 0x103C, 0x3215},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3237},
- {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D},
- {0,}
-};
-
-MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
-
-/* board_id = Subsystem Device ID & Vendor ID
- * product = Marketing Name for the board
- * access = Address of the struct of function pointers
- */
-static struct board_type products[] = {
- {0x40700E11, "Smart Array 5300", &SA5_access},
- {0x40800E11, "Smart Array 5i", &SA5B_access},
- {0x40820E11, "Smart Array 532", &SA5B_access},
- {0x40830E11, "Smart Array 5312", &SA5B_access},
- {0x409A0E11, "Smart Array 641", &SA5_access},
- {0x409B0E11, "Smart Array 642", &SA5_access},
- {0x409C0E11, "Smart Array 6400", &SA5_access},
- {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
- {0x40910E11, "Smart Array 6i", &SA5_access},
- {0x3225103C, "Smart Array P600", &SA5_access},
- {0x3223103C, "Smart Array P800", &SA5_access},
- {0x3234103C, "Smart Array P400", &SA5_access},
- {0x3235103C, "Smart Array P400i", &SA5_access},
- {0x3211103C, "Smart Array E200i", &SA5_access},
- {0x3212103C, "Smart Array E200", &SA5_access},
- {0x3213103C, "Smart Array E200i", &SA5_access},
- {0x3214103C, "Smart Array E200i", &SA5_access},
- {0x3215103C, "Smart Array E200i", &SA5_access},
- {0x3237103C, "Smart Array E500", &SA5_access},
- {0x323D103C, "Smart Array P700m", &SA5_access},
-};
-
-/* How long to wait (in milliseconds) for board to go into simple mode */
-#define MAX_CONFIG_WAIT 30000
-#define MAX_IOCTL_CONFIG_WAIT 1000
-
-/*define how many times we will try a command because of bus resets */
-#define MAX_CMD_RETRIES 3
-
-#define MAX_CTLR 32
-
-/* Originally cciss driver only supports 8 major numbers */
-#define MAX_CTLR_ORIG 8
-
-static ctlr_info_t *hba[MAX_CTLR];
-
-static struct task_struct *cciss_scan_thread;
-static DEFINE_MUTEX(scan_mutex);
-static LIST_HEAD(scan_q);
-
-static void do_cciss_request(struct request_queue *q);
-static irqreturn_t do_cciss_intx(int irq, void *dev_id);
-static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id);
-static int cciss_open(struct block_device *bdev, fmode_t mode);
-static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode);
-static void cciss_release(struct gendisk *disk, fmode_t mode);
-static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
-static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
-
-static int cciss_revalidate(struct gendisk *disk);
-static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
-static int deregister_disk(ctlr_info_t *h, int drv_index,
- int clear_all, int via_ioctl);
-
-static void cciss_read_capacity(ctlr_info_t *h, int logvol,
- sector_t *total_size, unsigned int *block_size);
-static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
- sector_t *total_size, unsigned int *block_size);
-static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
- sector_t total_size,
- unsigned int block_size, InquiryData_struct *inq_buff,
- drive_info_struct *drv);
-static void cciss_interrupt_mode(ctlr_info_t *);
-static int cciss_enter_simple_mode(struct ctlr_info *h);
-static void start_io(ctlr_info_t *h);
-static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
- __u8 page_code, unsigned char scsi3addr[],
- int cmd_type);
-static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
- int attempt_retry);
-static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
-
-static int add_to_scan_list(struct ctlr_info *h);
-static int scan_thread(void *data);
-static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
-static void cciss_hba_release(struct device *dev);
-static void cciss_device_release(struct device *dev);
-static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
-static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
-static inline u32 next_command(ctlr_info_t *h);
-static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
- u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset);
-static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar);
-static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag);
-static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable);
-
-/* performant mode helper functions */
-static void calc_bucket_map(int *bucket, int num_buckets, int nsgs,
- int *bucket_map);
-static void cciss_put_controller_into_performant_mode(ctlr_info_t *h);
-
-#ifdef CONFIG_PROC_FS
-static void cciss_procinit(ctlr_info_t *h);
-#else
-static void cciss_procinit(ctlr_info_t *h)
-{
-}
-#endif /* CONFIG_PROC_FS */
-
-#ifdef CONFIG_COMPAT
-static int cciss_compat_ioctl(struct block_device *, fmode_t,
- unsigned, unsigned long);
-#endif
-
-static const struct block_device_operations cciss_fops = {
- .owner = THIS_MODULE,
- .open = cciss_unlocked_open,
- .release = cciss_release,
- .ioctl = cciss_ioctl,
- .getgeo = cciss_getgeo,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = cciss_compat_ioctl,
-#endif
- .revalidate_disk = cciss_revalidate,
-};
-
-/* set_performant_mode: Modify the tag for cciss performant
- * set bit 0 for pull model, bits 3-1 for block fetch
- * register number
- */
-static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c)
-{
- if (likely(h->transMethod & CFGTBL_Trans_Performant))
- c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
-}
-
-/*
- * Enqueuing and dequeuing functions for cmdlists.
- */
-static inline void addQ(struct list_head *list, CommandList_struct *c)
-{
- list_add_tail(&c->list, list);
-}
-
-static inline void removeQ(CommandList_struct *c)
-{
- /*
- * After kexec/dump some commands might still
- * be in flight, which the firmware will try
- * to complete. Resetting the firmware doesn't work
- * with old fw revisions, so we have to mark
- * them off as 'stale' to prevent the driver from
- * falling over.
- */
- if (WARN_ON(list_empty(&c->list))) {
- c->cmd_type = CMD_MSG_STALE;
- return;
- }
-
- list_del_init(&c->list);
-}
-
-static void enqueue_cmd_and_start_io(ctlr_info_t *h,
- CommandList_struct *c)
-{
- unsigned long flags;
- set_performant_mode(h, c);
- spin_lock_irqsave(&h->lock, flags);
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
- start_io(h);
- spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list,
- int nr_cmds)
-{
- int i;
-
- if (!cmd_sg_list)
- return;
- for (i = 0; i < nr_cmds; i++) {
- kfree(cmd_sg_list[i]);
- cmd_sg_list[i] = NULL;
- }
- kfree(cmd_sg_list);
-}
-
-static SGDescriptor_struct **cciss_allocate_sg_chain_blocks(
- ctlr_info_t *h, int chainsize, int nr_cmds)
-{
- int j;
- SGDescriptor_struct **cmd_sg_list;
-
- if (chainsize <= 0)
- return NULL;
-
- cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL);
- if (!cmd_sg_list)
- return NULL;
-
- /* Build up chain blocks for each command */
- for (j = 0; j < nr_cmds; j++) {
- /* Need a block of chainsized s/g elements. */
- cmd_sg_list[j] = kmalloc((chainsize *
- sizeof(*cmd_sg_list[j])), GFP_KERNEL);
- if (!cmd_sg_list[j]) {
- dev_err(&h->pdev->dev, "Cannot get memory "
- "for s/g chains.\n");
- goto clean;
- }
- }
- return cmd_sg_list;
-clean:
- cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds);
- return NULL;
-}
-
-static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c)
-{
- SGDescriptor_struct *chain_sg;
- u64bit temp64;
-
- if (c->Header.SGTotal <= h->max_cmd_sgentries)
- return;
-
- chain_sg = &c->SG[h->max_cmd_sgentries - 1];
- temp64.val32.lower = chain_sg->Addr.lower;
- temp64.val32.upper = chain_sg->Addr.upper;
- pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
-}
-
-static int cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c,
- SGDescriptor_struct *chain_block, int len)
-{
- SGDescriptor_struct *chain_sg;
- u64bit temp64;
-
- chain_sg = &c->SG[h->max_cmd_sgentries - 1];
- chain_sg->Ext = CCISS_SG_CHAIN;
- chain_sg->Len = len;
- temp64.val = pci_map_single(h->pdev, chain_block, len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
- dev_warn(&h->pdev->dev,
- "%s: error mapping chain block for DMA\n",
- __func__);
- return -1;
- }
- chain_sg->Addr.lower = temp64.val32.lower;
- chain_sg->Addr.upper = temp64.val32.upper;
-
- return 0;
-}
-
-#include "cciss_scsi.c" /* For SCSI tape support */
-
-static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
- "UNKNOWN"
-};
-#define RAID_UNKNOWN (ARRAY_SIZE(raid_label)-1)
-
-#ifdef CONFIG_PROC_FS
-
-/*
- * Report information about this controller.
- */
-#define ENG_GIG 1000000000
-#define ENG_GIG_FACTOR (ENG_GIG/512)
-#define ENGAGE_SCSI "engage scsi"
-
-static void cciss_seq_show_header(struct seq_file *seq)
-{
- ctlr_info_t *h = seq->private;
-
- seq_printf(seq, "%s: HP %s Controller\n"
- "Board ID: 0x%08lx\n"
- "Firmware Version: %c%c%c%c\n"
- "IRQ: %d\n"
- "Logical drives: %d\n"
- "Current Q depth: %d\n"
- "Current # commands on controller: %d\n"
- "Max Q depth since init: %d\n"
- "Max # commands on controller since init: %d\n"
- "Max SG entries since init: %d\n",
- h->devname,
- h->product_name,
- (unsigned long)h->board_id,
- h->firm_ver[0], h->firm_ver[1], h->firm_ver[2],
- h->firm_ver[3], (unsigned int)h->intr[h->intr_mode],
- h->num_luns,
- h->Qdepth, h->commands_outstanding,
- h->maxQsinceinit, h->max_outstanding, h->maxSG);
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_seq_tape_report(seq, h);
-#endif /* CONFIG_CISS_SCSI_TAPE */
-}
-
-static void *cciss_seq_start(struct seq_file *seq, loff_t *pos)
-{
- ctlr_info_t *h = seq->private;
- unsigned long flags;
-
- /* prevent displaying bogus info during configuration
- * or deconfiguration of a logical volume
- */
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return ERR_PTR(-EBUSY);
- }
- h->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (*pos == 0)
- cciss_seq_show_header(seq);
-
- return pos;
-}
-
-static int cciss_seq_show(struct seq_file *seq, void *v)
-{
- sector_t vol_sz, vol_sz_frac;
- ctlr_info_t *h = seq->private;
- unsigned ctlr = h->ctlr;
- loff_t *pos = v;
- drive_info_struct *drv = h->drv[*pos];
-
- if (*pos > h->highest_lun)
- return 0;
-
- if (drv == NULL) /* it's possible for h->drv[] to have holes. */
- return 0;
-
- if (drv->heads == 0)
- return 0;
-
- vol_sz = drv->nr_blocks;
- vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
- vol_sz_frac *= 100;
- sector_div(vol_sz_frac, ENG_GIG_FACTOR);
-
- if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
- drv->raid_level = RAID_UNKNOWN;
- seq_printf(seq, "cciss/c%dd%d:"
- "\t%4u.%02uGB\tRAID %s\n",
- ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac,
- raid_label[drv->raid_level]);
- return 0;
-}
-
-static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos)
-{
- ctlr_info_t *h = seq->private;
-
- if (*pos > h->highest_lun)
- return NULL;
- *pos += 1;
-
- return pos;
-}
-
-static void cciss_seq_stop(struct seq_file *seq, void *v)
-{
- ctlr_info_t *h = seq->private;
-
- /* Only reset h->busy_configuring if we succeeded in setting
- * it during cciss_seq_start. */
- if (v == ERR_PTR(-EBUSY))
- return;
-
- h->busy_configuring = 0;
-}
-
-static const struct seq_operations cciss_seq_ops = {
- .start = cciss_seq_start,
- .show = cciss_seq_show,
- .next = cciss_seq_next,
- .stop = cciss_seq_stop,
-};
-
-static int cciss_seq_open(struct inode *inode, struct file *file)
-{
- int ret = seq_open(file, &cciss_seq_ops);
- struct seq_file *seq = file->private_data;
-
- if (!ret)
- seq->private = PDE_DATA(inode);
-
- return ret;
-}
-
-static ssize_t
-cciss_proc_write(struct file *file, const char __user *buf,
- size_t length, loff_t *ppos)
-{
- int err;
- char *buffer;
-
-#ifndef CONFIG_CISS_SCSI_TAPE
- return -EINVAL;
-#endif
-
- if (!buf || length > PAGE_SIZE - 1)
- return -EINVAL;
-
- buffer = memdup_user_nul(buf, length);
- if (IS_ERR(buffer))
- return PTR_ERR(buffer);
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) {
- struct seq_file *seq = file->private_data;
- ctlr_info_t *h = seq->private;
-
- err = cciss_engage_scsi(h);
- if (err == 0)
- err = length;
- } else
-#endif /* CONFIG_CISS_SCSI_TAPE */
- err = -EINVAL;
- /* might be nice to have "disengage" too, but it's not
- safely possible. (only 1 module use count, lock issues.) */
-
- kfree(buffer);
- return err;
-}
-
-static const struct file_operations cciss_proc_fops = {
- .owner = THIS_MODULE,
- .open = cciss_seq_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
- .write = cciss_proc_write,
-};
-
-static void cciss_procinit(ctlr_info_t *h)
-{
- struct proc_dir_entry *pde;
-
- if (proc_cciss == NULL)
- proc_cciss = proc_mkdir("driver/cciss", NULL);
- if (!proc_cciss)
- return;
- pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP |
- S_IROTH, proc_cciss,
- &cciss_proc_fops, h);
-}
-#endif /* CONFIG_PROC_FS */
-
-#define MAX_PRODUCT_NAME_LEN 19
-
-#define to_hba(n) container_of(n, struct ctlr_info, dev)
-#define to_drv(n) container_of(n, drive_info_struct, dev)
-
-/* List of controllers which cannot be hard reset on kexec with reset_devices */
-static u32 unresettable_controller[] = {
- 0x3223103C, /* Smart Array P800 */
- 0x3234103C, /* Smart Array P400 */
- 0x3235103C, /* Smart Array P400i */
- 0x3211103C, /* Smart Array E200i */
- 0x3212103C, /* Smart Array E200 */
- 0x3213103C, /* Smart Array E200i */
- 0x3214103C, /* Smart Array E200i */
- 0x3215103C, /* Smart Array E200i */
- 0x3237103C, /* Smart Array E500 */
- 0x323D103C, /* Smart Array P700m */
- 0x40800E11, /* Smart Array 5i */
- 0x409C0E11, /* Smart Array 6400 */
- 0x409D0E11, /* Smart Array 6400 EM */
- 0x40700E11, /* Smart Array 5300 */
- 0x40820E11, /* Smart Array 532 */
- 0x40830E11, /* Smart Array 5312 */
- 0x409A0E11, /* Smart Array 641 */
- 0x409B0E11, /* Smart Array 642 */
- 0x40910E11, /* Smart Array 6i */
-};
-
-/* List of controllers which cannot even be soft reset */
-static u32 soft_unresettable_controller[] = {
- 0x40800E11, /* Smart Array 5i */
- 0x40700E11, /* Smart Array 5300 */
- 0x40820E11, /* Smart Array 532 */
- 0x40830E11, /* Smart Array 5312 */
- 0x409A0E11, /* Smart Array 641 */
- 0x409B0E11, /* Smart Array 642 */
- 0x40910E11, /* Smart Array 6i */
- /* Exclude 640x boards. These are two pci devices in one slot
- * which share a battery backed cache module. One controls the
- * cache, the other accesses the cache through the one that controls
- * it. If we reset the one controlling the cache, the other will
- * likely not be happy. Just forbid resetting this conjoined mess.
- */
- 0x409C0E11, /* Smart Array 6400 */
- 0x409D0E11, /* Smart Array 6400 EM */
-};
-
-static int ctlr_is_hard_resettable(u32 board_id)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
- if (unresettable_controller[i] == board_id)
- return 0;
- return 1;
-}
-
-static int ctlr_is_soft_resettable(u32 board_id)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
- if (soft_unresettable_controller[i] == board_id)
- return 0;
- return 1;
-}
-
-static int ctlr_is_resettable(u32 board_id)
-{
- return ctlr_is_hard_resettable(board_id) ||
- ctlr_is_soft_resettable(board_id);
-}
-
-static ssize_t host_show_resettable(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ctlr_info *h = to_hba(dev);
-
- return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
-}
-static DEVICE_ATTR(resettable, S_IRUGO, host_show_resettable, NULL);
-
-static ssize_t host_store_rescan(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ctlr_info *h = to_hba(dev);
-
- add_to_scan_list(h);
- wake_up_process(cciss_scan_thread);
- wait_for_completion_interruptible(&h->scan_wait);
-
- return count;
-}
-static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
-
-static ssize_t host_show_transport_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct ctlr_info *h = to_hba(dev);
-
- return snprintf(buf, 20, "%s\n",
- h->transMethod & CFGTBL_Trans_Performant ?
- "performant" : "simple");
-}
-static DEVICE_ATTR(transport_mode, S_IRUGO, host_show_transport_mode, NULL);
-
-static ssize_t dev_show_unique_id(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- __u8 sn[16];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(sn, drv->serial_no, sizeof(sn));
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, 16 * 2 + 2,
- "%02X%02X%02X%02X%02X%02X%02X%02X"
- "%02X%02X%02X%02X%02X%02X%02X%02X\n",
- sn[0], sn[1], sn[2], sn[3],
- sn[4], sn[5], sn[6], sn[7],
- sn[8], sn[9], sn[10], sn[11],
- sn[12], sn[13], sn[14], sn[15]);
-}
-static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
-
-static ssize_t dev_show_vendor(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- char vendor[VENDOR_LEN + 1];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(vendor, drv->vendor, VENDOR_LEN + 1);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
-}
-static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
-
-static ssize_t dev_show_model(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- char model[MODEL_LEN + 1];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(model, drv->model, MODEL_LEN + 1);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
-}
-static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
-
-static ssize_t dev_show_rev(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- char rev[REV_LEN + 1];
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring)
- ret = -EBUSY;
- else
- memcpy(rev, drv->rev, REV_LEN + 1);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (ret)
- return ret;
- else
- return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
-}
-static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
-
-static ssize_t cciss_show_lunid(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- unsigned long flags;
- unsigned char lunid[8];
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- if (!drv->heads) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -ENOTTY;
- }
- memcpy(lunid, drv->LunID, sizeof(lunid));
- spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- lunid[0], lunid[1], lunid[2], lunid[3],
- lunid[4], lunid[5], lunid[6], lunid[7]);
-}
-static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
-
-static ssize_t cciss_show_raid_level(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- int raid;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- raid = drv->raid_level;
- spin_unlock_irqrestore(&h->lock, flags);
- if (raid < 0 || raid > RAID_UNKNOWN)
- raid = RAID_UNKNOWN;
-
- return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
- raid_label[raid]);
-}
-static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
-
-static ssize_t cciss_show_usage_count(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- drive_info_struct *drv = to_drv(dev);
- struct ctlr_info *h = to_hba(drv->dev.parent);
- unsigned long flags;
- int count;
-
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- count = drv->usage_count;
- spin_unlock_irqrestore(&h->lock, flags);
- return snprintf(buf, 20, "%d\n", count);
-}
-static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
-
-static struct attribute *cciss_host_attrs[] = {
- &dev_attr_rescan.attr,
- &dev_attr_resettable.attr,
- &dev_attr_transport_mode.attr,
- NULL
-};
-
-static struct attribute_group cciss_host_attr_group = {
- .attrs = cciss_host_attrs,
-};
-
-static const struct attribute_group *cciss_host_attr_groups[] = {
- &cciss_host_attr_group,
- NULL
-};
-
-static struct device_type cciss_host_type = {
- .name = "cciss_host",
- .groups = cciss_host_attr_groups,
- .release = cciss_hba_release,
-};
-
-static struct attribute *cciss_dev_attrs[] = {
- &dev_attr_unique_id.attr,
- &dev_attr_model.attr,
- &dev_attr_vendor.attr,
- &dev_attr_rev.attr,
- &dev_attr_lunid.attr,
- &dev_attr_raid_level.attr,
- &dev_attr_usage_count.attr,
- NULL
-};
-
-static struct attribute_group cciss_dev_attr_group = {
- .attrs = cciss_dev_attrs,
-};
-
-static const struct attribute_group *cciss_dev_attr_groups[] = {
- &cciss_dev_attr_group,
- NULL
-};
-
-static struct device_type cciss_dev_type = {
- .name = "cciss_device",
- .groups = cciss_dev_attr_groups,
- .release = cciss_device_release,
-};
-
-static struct bus_type cciss_bus_type = {
- .name = "cciss",
-};
-
-/*
- * cciss_hba_release is called when the reference count
- * of h->dev goes to zero.
- */
-static void cciss_hba_release(struct device *dev)
-{
- /*
- * nothing to do, but need this to avoid a warning
- * about not having a release handler from lib/kref.c.
- */
-}
-
-/*
- * Initialize sysfs entry for each controller. This sets up and registers
- * the 'cciss#' directory for each individual controller under
- * /sys/bus/pci/devices/<dev>/.
- */
-static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
-{
- device_initialize(&h->dev);
- h->dev.type = &cciss_host_type;
- h->dev.bus = &cciss_bus_type;
- dev_set_name(&h->dev, "%s", h->devname);
- h->dev.parent = &h->pdev->dev;
-
- return device_add(&h->dev);
-}
-
-/*
- * Remove sysfs entries for an hba.
- */
-static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
-{
- device_del(&h->dev);
- put_device(&h->dev); /* final put. */
-}
-
-/* cciss_device_release is called when the reference count
- * of h->drv[x]dev goes to zero.
- */
-static void cciss_device_release(struct device *dev)
-{
- drive_info_struct *drv = to_drv(dev);
- kfree(drv);
-}
-
-/*
- * Initialize sysfs for each logical drive. This sets up and registers
- * the 'c#d#' directory for each individual logical drive under
- * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
- * /sys/block/cciss!c#d# to this entry.
- */
-static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
- int drv_index)
-{
- struct device *dev;
-
- if (h->drv[drv_index]->device_initialized)
- return 0;
-
- dev = &h->drv[drv_index]->dev;
- device_initialize(dev);
- dev->type = &cciss_dev_type;
- dev->bus = &cciss_bus_type;
- dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
- dev->parent = &h->dev;
- h->drv[drv_index]->device_initialized = 1;
- return device_add(dev);
-}
-
-/*
- * Remove sysfs entries for a logical drive.
- */
-static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
- int ctlr_exiting)
-{
- struct device *dev = &h->drv[drv_index]->dev;
-
- /* special case for c*d0, we only destroy it on controller exit */
- if (drv_index == 0 && !ctlr_exiting)
- return;
-
- device_del(dev);
- put_device(dev); /* the "final" put. */
- h->drv[drv_index] = NULL;
-}
-
-/*
- * For operations that cannot sleep, a command block is allocated at init,
- * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
- * which ones are free or in use.
- */
-static CommandList_struct *cmd_alloc(ctlr_info_t *h)
-{
- CommandList_struct *c;
- int i;
- u64bit temp64;
- dma_addr_t cmd_dma_handle, err_dma_handle;
-
- do {
- i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
- if (i == h->nr_cmds)
- return NULL;
- } while (test_and_set_bit(i, h->cmd_pool_bits) != 0);
- c = h->cmd_pool + i;
- memset(c, 0, sizeof(CommandList_struct));
- cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct);
- c->err_info = h->errinfo_pool + i;
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- err_dma_handle = h->errinfo_pool_dhandle
- + i * sizeof(ErrorInfo_struct);
- h->nr_allocs++;
-
- c->cmdindex = i;
-
- INIT_LIST_HEAD(&c->list);
- c->busaddr = (__u32) cmd_dma_handle;
- temp64.val = (__u64) err_dma_handle;
- c->ErrDesc.Addr.lower = temp64.val32.lower;
- c->ErrDesc.Addr.upper = temp64.val32.upper;
- c->ErrDesc.Len = sizeof(ErrorInfo_struct);
-
- c->ctlr = h->ctlr;
- return c;
-}
-
-/* allocate a command using pci_alloc_consistent, used for ioctls,
- * etc., not for the main i/o path.
- */
-static CommandList_struct *cmd_special_alloc(ctlr_info_t *h)
-{
- CommandList_struct *c;
- u64bit temp64;
- dma_addr_t cmd_dma_handle, err_dma_handle;
-
- c = pci_zalloc_consistent(h->pdev, sizeof(CommandList_struct),
- &cmd_dma_handle);
- if (c == NULL)
- return NULL;
-
- c->cmdindex = -1;
-
- c->err_info = pci_zalloc_consistent(h->pdev, sizeof(ErrorInfo_struct),
- &err_dma_handle);
-
- if (c->err_info == NULL) {
- pci_free_consistent(h->pdev,
- sizeof(CommandList_struct), c, cmd_dma_handle);
- return NULL;
- }
-
- INIT_LIST_HEAD(&c->list);
- c->busaddr = (__u32) cmd_dma_handle;
- temp64.val = (__u64) err_dma_handle;
- c->ErrDesc.Addr.lower = temp64.val32.lower;
- c->ErrDesc.Addr.upper = temp64.val32.upper;
- c->ErrDesc.Len = sizeof(ErrorInfo_struct);
-
- c->ctlr = h->ctlr;
- return c;
-}
-
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c)
-{
- int i;
-
- i = c - h->cmd_pool;
- clear_bit(i, h->cmd_pool_bits);
- h->nr_frees++;
-}
-
-static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c)
-{
- u64bit temp64;
-
- temp64.val32.lower = c->ErrDesc.Addr.lower;
- temp64.val32.upper = c->ErrDesc.Addr.upper;
- pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct),
- c->err_info, (dma_addr_t) temp64.val);
- pci_free_consistent(h->pdev, sizeof(CommandList_struct), c,
- (dma_addr_t) cciss_tag_discard_error_bits(h, (u32) c->busaddr));
-}
-
-static inline ctlr_info_t *get_host(struct gendisk *disk)
-{
- return disk->queue->queuedata;
-}
-
-static inline drive_info_struct *get_drv(struct gendisk *disk)
-{
- return disk->private_data;
-}
-
-/*
- * Open. Make sure the device is really there.
- */
-static int cciss_open(struct block_device *bdev, fmode_t mode)
-{
- ctlr_info_t *h = get_host(bdev->bd_disk);
- drive_info_struct *drv = get_drv(bdev->bd_disk);
-
- dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name);
- if (drv->busy_configuring)
- return -EBUSY;
- /*
- * Root is allowed to open raw volume zero even if it's not configured
- * so array config can still work. Root is also allowed to open any
- * volume that has a LUN ID, so it can issue IOCTL to reread the
- * disk information. I don't think I really like this
- * but I'm already using way to many device nodes to claim another one
- * for "raw controller".
- */
- if (drv->heads == 0) {
- if (MINOR(bdev->bd_dev) != 0) { /* not node 0? */
- /* if not node 0 make sure it is a partition = 0 */
- if (MINOR(bdev->bd_dev) & 0x0f) {
- return -ENXIO;
- /* if it is, make sure we have a LUN ID */
- } else if (memcmp(drv->LunID, CTLR_LUNID,
- sizeof(drv->LunID))) {
- return -ENXIO;
- }
- }
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- }
- drv->usage_count++;
- h->usage_count++;
- return 0;
-}
-
-static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode)
-{
- int ret;
-
- mutex_lock(&cciss_mutex);
- ret = cciss_open(bdev, mode);
- mutex_unlock(&cciss_mutex);
-
- return ret;
-}
-
-/*
- * Close. Sync first.
- */
-static void cciss_release(struct gendisk *disk, fmode_t mode)
-{
- ctlr_info_t *h;
- drive_info_struct *drv;
-
- mutex_lock(&cciss_mutex);
- h = get_host(disk);
- drv = get_drv(disk);
- dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name);
- drv->usage_count--;
- h->usage_count--;
- mutex_unlock(&cciss_mutex);
-}
-
-#ifdef CONFIG_COMPAT
-
-static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg);
-static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg);
-
-static int cciss_compat_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- switch (cmd) {
- case CCISS_GETPCIINFO:
- case CCISS_GETINTINFO:
- case CCISS_SETINTINFO:
- case CCISS_GETNODENAME:
- case CCISS_SETNODENAME:
- case CCISS_GETHEARTBEAT:
- case CCISS_GETBUSTYPES:
- case CCISS_GETFIRMVER:
- case CCISS_GETDRIVVER:
- case CCISS_REVALIDVOLS:
- case CCISS_DEREGDISK:
- case CCISS_REGNEWDISK:
- case CCISS_REGNEWD:
- case CCISS_RESCANDISK:
- case CCISS_GETLUNINFO:
- return cciss_ioctl(bdev, mode, cmd, arg);
-
- case CCISS_PASSTHRU32:
- return cciss_ioctl32_passthru(bdev, mode, cmd, arg);
- case CCISS_BIG_PASSTHRU32:
- return cciss_ioctl32_big_passthru(bdev, mode, cmd, arg);
-
- default:
- return -ENOIOCTLCMD;
- }
-}
-
-static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- IOCTL32_Command_struct __user *arg32 =
- (IOCTL32_Command_struct __user *) arg;
- IOCTL_Command_struct arg64;
- IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
- int err;
- u32 cp;
-
- memset(&arg64, 0, sizeof(arg64));
- err = 0;
- err |=
- copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
- sizeof(arg64.LUN_info));
- err |=
- copy_from_user(&arg64.Request, &arg32->Request,
- sizeof(arg64.Request));
- err |=
- copy_from_user(&arg64.error_info, &arg32->error_info,
- sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
-
- if (err)
- return -EFAULT;
-
- err = cciss_ioctl(bdev, mode, CCISS_PASSTHRU, (unsigned long)p);
- if (err)
- return err;
- err |=
- copy_in_user(&arg32->error_info, &p->error_info,
- sizeof(arg32->error_info));
- if (err)
- return -EFAULT;
- return err;
-}
-
-static int cciss_ioctl32_big_passthru(struct block_device *bdev, fmode_t mode,
- unsigned cmd, unsigned long arg)
-{
- BIG_IOCTL32_Command_struct __user *arg32 =
- (BIG_IOCTL32_Command_struct __user *) arg;
- BIG_IOCTL_Command_struct arg64;
- BIG_IOCTL_Command_struct __user *p =
- compat_alloc_user_space(sizeof(arg64));
- int err;
- u32 cp;
-
- memset(&arg64, 0, sizeof(arg64));
- err = 0;
- err |=
- copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
- sizeof(arg64.LUN_info));
- err |=
- copy_from_user(&arg64.Request, &arg32->Request,
- sizeof(arg64.Request));
- err |=
- copy_from_user(&arg64.error_info, &arg32->error_info,
- sizeof(arg64.error_info));
- err |= get_user(arg64.buf_size, &arg32->buf_size);
- err |= get_user(arg64.malloc_size, &arg32->malloc_size);
- err |= get_user(cp, &arg32->buf);
- arg64.buf = compat_ptr(cp);
- err |= copy_to_user(p, &arg64, sizeof(arg64));
-
- if (err)
- return -EFAULT;
-
- err = cciss_ioctl(bdev, mode, CCISS_BIG_PASSTHRU, (unsigned long)p);
- if (err)
- return err;
- err |=
- copy_in_user(&arg32->error_info, &p->error_info,
- sizeof(arg32->error_info));
- if (err)
- return -EFAULT;
- return err;
-}
-#endif
-
-static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- drive_info_struct *drv = get_drv(bdev->bd_disk);
-
- if (!drv->cylinders)
- return -ENXIO;
-
- geo->heads = drv->heads;
- geo->sectors = drv->sectors;
- geo->cylinders = drv->cylinders;
- return 0;
-}
-
-static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c)
-{
- if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
- c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
- (void)check_for_unit_attention(h, c);
-}
-
-static int cciss_getpciinfo(ctlr_info_t *h, void __user *argp)
-{
- cciss_pci_info_struct pciinfo;
-
- if (!argp)
- return -EINVAL;
- pciinfo.domain = pci_domain_nr(h->pdev->bus);
- pciinfo.bus = h->pdev->bus->number;
- pciinfo.dev_fn = h->pdev->devfn;
- pciinfo.board_id = h->board_id;
- if (copy_to_user(argp, &pciinfo, sizeof(cciss_pci_info_struct)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getintinfo(ctlr_info_t *h, void __user *argp)
-{
- cciss_coalint_struct intinfo;
- unsigned long flags;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- intinfo.delay = readl(&h->cfgtable->HostWrite.CoalIntDelay);
- intinfo.count = readl(&h->cfgtable->HostWrite.CoalIntCount);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user
- (argp, &intinfo, sizeof(cciss_coalint_struct)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_setintinfo(ctlr_info_t *h, void __user *argp)
-{
- cciss_coalint_struct intinfo;
- unsigned long flags;
- int i;
-
- if (!argp)
- return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user(&intinfo, argp, sizeof(intinfo)))
- return -EFAULT;
- if ((intinfo.delay == 0) && (intinfo.count == 0))
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- /* Update the field, and then ring the doorbell */
- writel(intinfo.delay, &(h->cfgtable->HostWrite.CoalIntDelay));
- writel(intinfo.count, &(h->cfgtable->HostWrite.CoalIntCount));
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
-
- for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- udelay(1000); /* delay and try again */
- }
- spin_unlock_irqrestore(&h->lock, flags);
- if (i >= MAX_IOCTL_CONFIG_WAIT)
- return -EAGAIN;
- return 0;
-}
-
-static int cciss_getnodename(ctlr_info_t *h, void __user *argp)
-{
- NodeName_type NodeName;
- unsigned long flags;
- int i;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- for (i = 0; i < 16; i++)
- NodeName[i] = readb(&h->cfgtable->ServerName[i]);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user(argp, NodeName, sizeof(NodeName_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_setnodename(ctlr_info_t *h, void __user *argp)
-{
- NodeName_type NodeName;
- unsigned long flags;
- int i;
-
- if (!argp)
- return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (copy_from_user(NodeName, argp, sizeof(NodeName_type)))
- return -EFAULT;
- spin_lock_irqsave(&h->lock, flags);
- /* Update the field, and then ring the doorbell */
- for (i = 0; i < 16; i++)
- writeb(NodeName[i], &h->cfgtable->ServerName[i]);
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- udelay(1000); /* delay and try again */
- }
- spin_unlock_irqrestore(&h->lock, flags);
- if (i >= MAX_IOCTL_CONFIG_WAIT)
- return -EAGAIN;
- return 0;
-}
-
-static int cciss_getheartbeat(ctlr_info_t *h, void __user *argp)
-{
- Heartbeat_type heartbeat;
- unsigned long flags;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- heartbeat = readl(&h->cfgtable->HeartBeat);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user(argp, &heartbeat, sizeof(Heartbeat_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getbustypes(ctlr_info_t *h, void __user *argp)
-{
- BusTypes_type BusTypes;
- unsigned long flags;
-
- if (!argp)
- return -EINVAL;
- spin_lock_irqsave(&h->lock, flags);
- BusTypes = readl(&h->cfgtable->BusTypes);
- spin_unlock_irqrestore(&h->lock, flags);
- if (copy_to_user(argp, &BusTypes, sizeof(BusTypes_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getfirmver(ctlr_info_t *h, void __user *argp)
-{
- FirmwareVer_type firmware;
-
- if (!argp)
- return -EINVAL;
- memcpy(firmware, h->firm_ver, 4);
-
- if (copy_to_user
- (argp, firmware, sizeof(FirmwareVer_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getdrivver(ctlr_info_t *h, void __user *argp)
-{
- DriverVer_type DriverVer = DRIVER_VERSION;
-
- if (!argp)
- return -EINVAL;
- if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_getluninfo(ctlr_info_t *h,
- struct gendisk *disk, void __user *argp)
-{
- LogvolInfo_struct luninfo;
- drive_info_struct *drv = get_drv(disk);
-
- if (!argp)
- return -EINVAL;
- memcpy(&luninfo.LunID, drv->LunID, sizeof(luninfo.LunID));
- luninfo.num_opens = drv->usage_count;
- luninfo.num_parts = 0;
- if (copy_to_user(argp, &luninfo, sizeof(LogvolInfo_struct)))
- return -EFAULT;
- return 0;
-}
-
-static int cciss_passthru(ctlr_info_t *h, void __user *argp)
-{
- IOCTL_Command_struct iocommand;
- CommandList_struct *c;
- char *buff = NULL;
- u64bit temp64;
- DECLARE_COMPLETION_ONSTACK(wait);
-
- if (!argp)
- return -EINVAL;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- if (copy_from_user
- (&iocommand, argp, sizeof(IOCTL_Command_struct)))
- return -EFAULT;
- if ((iocommand.buf_size < 1) &&
- (iocommand.Request.Type.Direction != XFER_NONE)) {
- return -EINVAL;
- }
- if (iocommand.buf_size > 0) {
- buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
- if (buff == NULL)
- return -EFAULT;
- }
- if (iocommand.Request.Type.Direction == XFER_WRITE) {
- /* Copy the data into the buffer we created */
- if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
- }
- } else {
- memset(buff, 0, iocommand.buf_size);
- }
- c = cmd_special_alloc(h);
- if (!c) {
- kfree(buff);
- return -ENOMEM;
- }
- /* Fill in the command type */
- c->cmd_type = CMD_IOCTL_PEND;
- /* Fill in Command Header */
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- if (iocommand.buf_size > 0) { /* buffer to fill */
- c->Header.SGList = 1;
- c->Header.SGTotal = 1;
- } else { /* no buffers to fill */
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
- c->Header.LUN = iocommand.LUN_info;
- /* use the kernel address the cmd block for tag */
- c->Header.Tag.lower = c->busaddr;
-
- /* Fill in Request block */
- c->Request = iocommand.Request;
-
- /* Fill in the scatter gather information */
- if (iocommand.buf_size > 0) {
- temp64.val = pci_map_single(h->pdev, buff,
- iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
- c->SG[0].Addr.lower = temp64.val32.lower;
- c->SG[0].Addr.upper = temp64.val32.upper;
- c->SG[0].Len = iocommand.buf_size;
- c->SG[0].Ext = 0; /* we are not chaining */
- }
- c->waiting = &wait;
-
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
-
- /* unlock the buffers from DMA */
- temp64.val32.lower = c->SG[0].Addr.lower;
- temp64.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, iocommand.buf_size,
- PCI_DMA_BIDIRECTIONAL);
- check_ioctl_unit_attention(h, c);
-
- /* Copy the error information out */
- iocommand.error_info = *(c->err_info);
- if (copy_to_user(argp, &iocommand, sizeof(IOCTL_Command_struct))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
- }
-
- if (iocommand.Request.Type.Direction == XFER_READ) {
- /* Copy the data out of the buffer we created */
- if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
- }
- }
- kfree(buff);
- cmd_special_free(h, c);
- return 0;
-}
-
-static int cciss_bigpassthru(ctlr_info_t *h, void __user *argp)
-{
- BIG_IOCTL_Command_struct *ioc;
- CommandList_struct *c;
- unsigned char **buff = NULL;
- int *buff_size = NULL;
- u64bit temp64;
- BYTE sg_used = 0;
- int status = 0;
- int i;
- DECLARE_COMPLETION_ONSTACK(wait);
- __u32 left;
- __u32 sz;
- BYTE __user *data_ptr;
-
- if (!argp)
- return -EINVAL;
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
- ioc = kmalloc(sizeof(*ioc), GFP_KERNEL);
- if (!ioc) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (copy_from_user(ioc, argp, sizeof(*ioc))) {
- status = -EFAULT;
- goto cleanup1;
- }
- if ((ioc->buf_size < 1) &&
- (ioc->Request.Type.Direction != XFER_NONE)) {
- status = -EINVAL;
- goto cleanup1;
- }
- /* Check kmalloc limits using all SGs */
- if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
- status = -EINVAL;
- goto cleanup1;
- }
- if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
- status = -EINVAL;
- goto cleanup1;
- }
- buff = kzalloc(MAXSGENTRIES * sizeof(char *), GFP_KERNEL);
- if (!buff) {
- status = -ENOMEM;
- goto cleanup1;
- }
- buff_size = kmalloc(MAXSGENTRIES * sizeof(int), GFP_KERNEL);
- if (!buff_size) {
- status = -ENOMEM;
- goto cleanup1;
- }
- left = ioc->buf_size;
- data_ptr = ioc->buf;
- while (left) {
- sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
- buff_size[sg_used] = sz;
- buff[sg_used] = kmalloc(sz, GFP_KERNEL);
- if (buff[sg_used] == NULL) {
- status = -ENOMEM;
- goto cleanup1;
- }
- if (ioc->Request.Type.Direction == XFER_WRITE) {
- if (copy_from_user(buff[sg_used], data_ptr, sz)) {
- status = -EFAULT;
- goto cleanup1;
- }
- } else {
- memset(buff[sg_used], 0, sz);
- }
- left -= sz;
- data_ptr += sz;
- sg_used++;
- }
- c = cmd_special_alloc(h);
- if (!c) {
- status = -ENOMEM;
- goto cleanup1;
- }
- c->cmd_type = CMD_IOCTL_PEND;
- c->Header.ReplyQueue = 0;
- c->Header.SGList = sg_used;
- c->Header.SGTotal = sg_used;
- c->Header.LUN = ioc->LUN_info;
- c->Header.Tag.lower = c->busaddr;
-
- c->Request = ioc->Request;
- for (i = 0; i < sg_used; i++) {
- temp64.val = pci_map_single(h->pdev, buff[i], buff_size[i],
- PCI_DMA_BIDIRECTIONAL);
- c->SG[i].Addr.lower = temp64.val32.lower;
- c->SG[i].Addr.upper = temp64.val32.upper;
- c->SG[i].Len = buff_size[i];
- c->SG[i].Ext = 0; /* we are not chaining */
- }
- c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
- /* unlock the buffers from DMA */
- for (i = 0; i < sg_used; i++) {
- temp64.val32.lower = c->SG[i].Addr.lower;
- temp64.val32.upper = c->SG[i].Addr.upper;
- pci_unmap_single(h->pdev,
- (dma_addr_t) temp64.val, buff_size[i],
- PCI_DMA_BIDIRECTIONAL);
- }
- check_ioctl_unit_attention(h, c);
- /* Copy the error information out */
- ioc->error_info = *(c->err_info);
- if (copy_to_user(argp, ioc, sizeof(*ioc))) {
- cmd_special_free(h, c);
- status = -EFAULT;
- goto cleanup1;
- }
- if (ioc->Request.Type.Direction == XFER_READ) {
- /* Copy the data out of the buffer we created */
- BYTE __user *ptr = ioc->buf;
- for (i = 0; i < sg_used; i++) {
- if (copy_to_user(ptr, buff[i], buff_size[i])) {
- cmd_special_free(h, c);
- status = -EFAULT;
- goto cleanup1;
- }
- ptr += buff_size[i];
- }
- }
- cmd_special_free(h, c);
- status = 0;
-cleanup1:
- if (buff) {
- for (i = 0; i < sg_used; i++)
- kfree(buff[i]);
- kfree(buff);
- }
- kfree(buff_size);
- kfree(ioc);
- return status;
-}
-
-static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- struct gendisk *disk = bdev->bd_disk;
- ctlr_info_t *h = get_host(disk);
- void __user *argp = (void __user *)arg;
-
- dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n",
- cmd, arg);
- switch (cmd) {
- case CCISS_GETPCIINFO:
- return cciss_getpciinfo(h, argp);
- case CCISS_GETINTINFO:
- return cciss_getintinfo(h, argp);
- case CCISS_SETINTINFO:
- return cciss_setintinfo(h, argp);
- case CCISS_GETNODENAME:
- return cciss_getnodename(h, argp);
- case CCISS_SETNODENAME:
- return cciss_setnodename(h, argp);
- case CCISS_GETHEARTBEAT:
- return cciss_getheartbeat(h, argp);
- case CCISS_GETBUSTYPES:
- return cciss_getbustypes(h, argp);
- case CCISS_GETFIRMVER:
- return cciss_getfirmver(h, argp);
- case CCISS_GETDRIVVER:
- return cciss_getdrivver(h, argp);
- case CCISS_DEREGDISK:
- case CCISS_REGNEWD:
- case CCISS_REVALIDVOLS:
- return rebuild_lun_table(h, 0, 1);
- case CCISS_GETLUNINFO:
- return cciss_getluninfo(h, disk, argp);
- case CCISS_PASSTHRU:
- return cciss_passthru(h, argp);
- case CCISS_BIG_PASSTHRU:
- return cciss_bigpassthru(h, argp);
-
- /* scsi_cmd_blk_ioctl handles these, below, though some are not */
- /* very meaningful for cciss. SG_IO is the main one people want. */
-
- case SG_GET_VERSION_NUM:
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_GET_RESERVED_SIZE:
- case SG_SET_RESERVED_SIZE:
- case SG_EMULATED_HOST:
- case SG_IO:
- case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
-
- /* scsi_cmd_blk_ioctl would normally handle these, below, but */
- /* they aren't a good fit for cciss, as CD-ROMs are */
- /* not supported, and we don't have any bus/target/lun */
- /* which we present to the kernel. */
-
- case CDROM_SEND_PACKET:
- case CDROMCLOSETRAY:
- case CDROMEJECT:
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- default:
- return -ENOTTY;
- }
-}
-
-static void cciss_check_queues(ctlr_info_t *h)
-{
- int start_queue = h->next_to_run;
- int i;
-
- /* check to see if we have maxed out the number of commands that can
- * be placed on the queue. If so then exit. We do this check here
- * in case the interrupt we serviced was from an ioctl and did not
- * free any new commands.
- */
- if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds)
- return;
-
- /* We have room on the queue for more commands. Now we need to queue
- * them up. We will also keep track of the next queue to run so
- * that every queue gets a chance to be started first.
- */
- for (i = 0; i < h->highest_lun + 1; i++) {
- int curr_queue = (start_queue + i) % (h->highest_lun + 1);
- /* make sure the disk has been added and the drive is real
- * because this can be called from the middle of init_one.
- */
- if (!h->drv[curr_queue])
- continue;
- if (!(h->drv[curr_queue]->queue) ||
- !(h->drv[curr_queue]->heads))
- continue;
- blk_start_queue(h->gendisk[curr_queue]->queue);
-
- /* check to see if we have maxed out the number of commands
- * that can be placed on the queue.
- */
- if ((find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds)) == h->nr_cmds) {
- if (curr_queue == start_queue) {
- h->next_to_run =
- (start_queue + 1) % (h->highest_lun + 1);
- break;
- } else {
- h->next_to_run = curr_queue;
- break;
- }
- }
- }
-}
-
-static void cciss_softirq_done(struct request *rq)
-{
- CommandList_struct *c = rq->completion_data;
- ctlr_info_t *h = hba[c->ctlr];
- SGDescriptor_struct *curr_sg = c->SG;
- u64bit temp64;
- unsigned long flags;
- int i, ddir;
- int sg_index = 0;
-
- if (c->Request.Type.Direction == XFER_READ)
- ddir = PCI_DMA_FROMDEVICE;
- else
- ddir = PCI_DMA_TODEVICE;
-
- /* command did not need to be retried */
- /* unmap the DMA mapping for all the scatter gather elements */
- for (i = 0; i < c->Header.SGList; i++) {
- if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) {
- cciss_unmap_sg_chain_block(h, c);
- /* Point to the next block */
- curr_sg = h->cmd_sg_list[c->cmdindex];
- sg_index = 0;
- }
- temp64.val32.lower = curr_sg[sg_index].Addr.lower;
- temp64.val32.upper = curr_sg[sg_index].Addr.upper;
- pci_unmap_page(h->pdev, temp64.val, curr_sg[sg_index].Len,
- ddir);
- ++sg_index;
- }
-
- dev_dbg(&h->pdev->dev, "Done with %p\n", rq);
-
- /* set the residual count for pc requests */
- if (blk_rq_is_passthrough(rq))
- scsi_req(rq)->resid_len = c->err_info->ResidualCnt;
- blk_end_request_all(rq, scsi_req(rq)->result ?
- BLK_STS_IOERR : BLK_STS_OK);
-
- spin_lock_irqsave(&h->lock, flags);
- cmd_free(h, c);
- cciss_check_queues(h);
- spin_unlock_irqrestore(&h->lock, flags);
-}
-
-static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
- unsigned char scsi3addr[], uint32_t log_unit)
-{
- memcpy(scsi3addr, h->drv[log_unit]->LunID,
- sizeof(h->drv[log_unit]->LunID));
-}
-
-/* This function gets the SCSI vendor, model, and revision of a logical drive
- * via the inquiry page 0. Model, vendor, and rev are set to empty strings if
- * they cannot be read.
- */
-static void cciss_get_device_descr(ctlr_info_t *h, int logvol,
- char *vendor, char *model, char *rev)
-{
- int rc;
- InquiryData_struct *inq_buf;
- unsigned char scsi3addr[8];
-
- *vendor = '\0';
- *model = '\0';
- *rev = '\0';
-
- inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (!inq_buf)
- return;
-
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0,
- scsi3addr, TYPE_CMD);
- if (rc == IO_OK) {
- memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN);
- vendor[VENDOR_LEN] = '\0';
- memcpy(model, &inq_buf->data_byte[16], MODEL_LEN);
- model[MODEL_LEN] = '\0';
- memcpy(rev, &inq_buf->data_byte[32], REV_LEN);
- rev[REV_LEN] = '\0';
- }
-
- kfree(inq_buf);
- return;
-}
-
-/* This function gets the serial number of a logical drive via
- * inquiry page 0x83. Serial no. is 16 bytes. If the serial
- * number cannot be had, for whatever reason, 16 bytes of 0xff
- * are returned instead.
- */
-static void cciss_get_serial_no(ctlr_info_t *h, int logvol,
- unsigned char *serial_no, int buflen)
-{
-#define PAGE_83_INQ_BYTES 64
- int rc;
- unsigned char *buf;
- unsigned char scsi3addr[8];
-
- if (buflen > 16)
- buflen = 16;
- memset(serial_no, 0xff, buflen);
- buf = kzalloc(PAGE_83_INQ_BYTES, GFP_KERNEL);
- if (!buf)
- return;
- memset(serial_no, 0, buflen);
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- rc = sendcmd_withirq(h, CISS_INQUIRY, buf,
- PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD);
- if (rc == IO_OK)
- memcpy(serial_no, &buf[8], buflen);
- kfree(buf);
- return;
-}
-
-static void cciss_initialize_rq(struct request *rq)
-{
- struct scsi_request *sreq = blk_mq_rq_to_pdu(rq);
-
- scsi_req_init(sreq);
-}
-
-/*
- * cciss_add_disk sets up the block device queue for a logical drive
- */
-static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
- int drv_index)
-{
- disk->queue = blk_alloc_queue(GFP_KERNEL);
- if (!disk->queue)
- goto init_queue_failure;
-
- disk->queue->cmd_size = sizeof(struct scsi_request);
- disk->queue->request_fn = do_cciss_request;
- disk->queue->initialize_rq_fn = cciss_initialize_rq;
- disk->queue->queue_lock = &h->lock;
- queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue);
- if (blk_init_allocated_queue(disk->queue) < 0)
- goto cleanup_queue;
-
- sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
- disk->major = h->major;
- disk->first_minor = drv_index << NWD_SHIFT;
- disk->fops = &cciss_fops;
- if (cciss_create_ld_sysfs_entry(h, drv_index))
- goto cleanup_queue;
- disk->private_data = h->drv[drv_index];
-
- /* Set up queue information */
- blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
-
- /* This is a hardware imposed limit. */
- blk_queue_max_segments(disk->queue, h->maxsgentries);
-
- blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors);
-
- blk_queue_softirq_done(disk->queue, cciss_softirq_done);
-
- disk->queue->queuedata = h;
-
- blk_queue_logical_block_size(disk->queue,
- h->drv[drv_index]->block_size);
-
- /* Make sure all queue data is written out before */
- /* setting h->drv[drv_index]->queue, as setting this */
- /* allows the interrupt handler to start the queue */
- wmb();
- h->drv[drv_index]->queue = disk->queue;
- device_add_disk(&h->drv[drv_index]->dev, disk);
- return 0;
-
-cleanup_queue:
- blk_cleanup_queue(disk->queue);
- disk->queue = NULL;
-init_queue_failure:
- return -1;
-}
-
-/* This function will check the usage_count of the drive to be updated/added.
- * If the usage_count is zero and it is a heretofore unknown drive, or,
- * the drive's capacity, geometry, or serial number has changed,
- * then the drive information will be updated and the disk will be
- * re-registered with the kernel. If these conditions don't hold,
- * then it will be left alone for the next reboot. The exception to this
- * is disk 0 which will always be left registered with the kernel since it
- * is also the controller node. Any changes to disk 0 will show up on
- * the next reboot.
- */
-static void cciss_update_drive_info(ctlr_info_t *h, int drv_index,
- int first_time, int via_ioctl)
-{
- struct gendisk *disk;
- InquiryData_struct *inq_buff = NULL;
- unsigned int block_size;
- sector_t total_size;
- unsigned long flags = 0;
- int ret = 0;
- drive_info_struct *drvinfo;
-
- /* Get information about the disk and modify the driver structure */
- inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
- if (inq_buff == NULL || drvinfo == NULL)
- goto mem_msg;
-
- /* testing to see if 16-byte CDBs are already being used */
- if (h->cciss_read == CCISS_READ_16) {
- cciss_read_capacity_16(h, drv_index,
- &total_size, &block_size);
-
- } else {
- cciss_read_capacity(h, drv_index, &total_size, &block_size);
- /* if read_capacity returns all F's this volume is >2TB */
- /* in size so we switch to 16-byte CDB's for all */
- /* read/write ops */
- if (total_size == 0xFFFFFFFFULL) {
- cciss_read_capacity_16(h, drv_index,
- &total_size, &block_size);
- h->cciss_read = CCISS_READ_16;
- h->cciss_write = CCISS_WRITE_16;
- } else {
- h->cciss_read = CCISS_READ_10;
- h->cciss_write = CCISS_WRITE_10;
- }
- }
-
- cciss_geometry_inquiry(h, drv_index, total_size, block_size,
- inq_buff, drvinfo);
- drvinfo->block_size = block_size;
- drvinfo->nr_blocks = total_size + 1;
-
- cciss_get_device_descr(h, drv_index, drvinfo->vendor,
- drvinfo->model, drvinfo->rev);
- cciss_get_serial_no(h, drv_index, drvinfo->serial_no,
- sizeof(drvinfo->serial_no));
- /* Save the lunid in case we deregister the disk, below. */
- memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
- sizeof(drvinfo->LunID));
-
- /* Is it the same disk we already know, and nothing's changed? */
- if (h->drv[drv_index]->raid_level != -1 &&
- ((memcmp(drvinfo->serial_no,
- h->drv[drv_index]->serial_no, 16) == 0) &&
- drvinfo->block_size == h->drv[drv_index]->block_size &&
- drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
- drvinfo->heads == h->drv[drv_index]->heads &&
- drvinfo->sectors == h->drv[drv_index]->sectors &&
- drvinfo->cylinders == h->drv[drv_index]->cylinders))
- /* The disk is unchanged, nothing to update */
- goto freeret;
-
- /* If we get here it's not the same disk, or something's changed,
- * so we need to * deregister it, and re-register it, if it's not
- * in use.
- * If the disk already exists then deregister it before proceeding
- * (unless it's the first disk (for the controller node).
- */
- if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
- dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index);
- spin_lock_irqsave(&h->lock, flags);
- h->drv[drv_index]->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
-
- /* deregister_disk sets h->drv[drv_index]->queue = NULL
- * which keeps the interrupt handler from starting
- * the queue.
- */
- ret = deregister_disk(h, drv_index, 0, via_ioctl);
- }
-
- /* If the disk is in use return */
- if (ret)
- goto freeret;
-
- /* Save the new information from cciss_geometry_inquiry
- * and serial number inquiry. If the disk was deregistered
- * above, then h->drv[drv_index] will be NULL.
- */
- if (h->drv[drv_index] == NULL) {
- drvinfo->device_initialized = 0;
- h->drv[drv_index] = drvinfo;
- drvinfo = NULL; /* so it won't be freed below. */
- } else {
- /* special case for cxd0 */
- h->drv[drv_index]->block_size = drvinfo->block_size;
- h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
- h->drv[drv_index]->heads = drvinfo->heads;
- h->drv[drv_index]->sectors = drvinfo->sectors;
- h->drv[drv_index]->cylinders = drvinfo->cylinders;
- h->drv[drv_index]->raid_level = drvinfo->raid_level;
- memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
- memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
- VENDOR_LEN + 1);
- memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
- memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
- }
-
- ++h->num_luns;
- disk = h->gendisk[drv_index];
- set_capacity(disk, h->drv[drv_index]->nr_blocks);
-
- /* If it's not disk 0 (drv_index != 0)
- * or if it was disk 0, but there was previously
- * no actual corresponding configured logical drive
- * (raid_leve == -1) then we want to update the
- * logical drive's information.
- */
- if (drv_index || first_time) {
- if (cciss_add_disk(h, disk, drv_index) != 0) {
- cciss_free_gendisk(h, drv_index);
- cciss_free_drive_info(h, drv_index);
- dev_warn(&h->pdev->dev, "could not update disk %d\n",
- drv_index);
- --h->num_luns;
- }
- }
-
-freeret:
- kfree(inq_buff);
- kfree(drvinfo);
- return;
-mem_msg:
- dev_err(&h->pdev->dev, "out of memory\n");
- goto freeret;
-}
-
-/* This function will find the first index of the controllers drive array
- * that has a null drv pointer and allocate the drive info struct and
- * will return that index This is where new drives will be added.
- * If the index to be returned is greater than the highest_lun index for
- * the controller then highest_lun is set * to this new index.
- * If there are no available indexes or if tha allocation fails, then -1
- * is returned. * "controller_node" is used to know if this is a real
- * logical drive, or just the controller node, which determines if this
- * counts towards highest_lun.
- */
-static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
-{
- int i;
- drive_info_struct *drv;
-
- /* Search for an empty slot for our drive info */
- for (i = 0; i < CISS_MAX_LUN; i++) {
-
- /* if not cxd0 case, and it's occupied, skip it. */
- if (h->drv[i] && i != 0)
- continue;
- /*
- * If it's cxd0 case, and drv is alloc'ed already, and a
- * disk is configured there, skip it.
- */
- if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
- continue;
-
- /*
- * We've found an empty slot. Update highest_lun
- * provided this isn't just the fake cxd0 controller node.
- */
- if (i > h->highest_lun && !controller_node)
- h->highest_lun = i;
-
- /* If adding a real disk at cxd0, and it's already alloc'ed */
- if (i == 0 && h->drv[i] != NULL)
- return i;
-
- /*
- * Found an empty slot, not already alloc'ed. Allocate it.
- * Mark it with raid_level == -1, so we know it's new later on.
- */
- drv = kzalloc(sizeof(*drv), GFP_KERNEL);
- if (!drv)
- return -1;
- drv->raid_level = -1; /* so we know it's new */
- h->drv[i] = drv;
- return i;
- }
- return -1;
-}
-
-static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
-{
- kfree(h->drv[drv_index]);
- h->drv[drv_index] = NULL;
-}
-
-static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
-{
- put_disk(h->gendisk[drv_index]);
- h->gendisk[drv_index] = NULL;
-}
-
-/* cciss_add_gendisk finds a free hba[]->drv structure
- * and allocates a gendisk if needed, and sets the lunid
- * in the drvinfo structure. It returns the index into
- * the ->drv[] array, or -1 if none are free.
- * is_controller_node indicates whether highest_lun should
- * count this disk, or if it's only being added to provide
- * a means to talk to the controller in case no logical
- * drives have yet been configured.
- */
-static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
- int controller_node)
-{
- int drv_index;
-
- drv_index = cciss_alloc_drive_info(h, controller_node);
- if (drv_index == -1)
- return -1;
-
- /*Check if the gendisk needs to be allocated */
- if (!h->gendisk[drv_index]) {
- h->gendisk[drv_index] =
- alloc_disk(1 << NWD_SHIFT);
- if (!h->gendisk[drv_index]) {
- dev_err(&h->pdev->dev,
- "could not allocate a new disk %d\n",
- drv_index);
- goto err_free_drive_info;
- }
- }
- memcpy(h->drv[drv_index]->LunID, lunid,
- sizeof(h->drv[drv_index]->LunID));
- if (cciss_create_ld_sysfs_entry(h, drv_index))
- goto err_free_disk;
- /* Don't need to mark this busy because nobody */
- /* else knows about this disk yet to contend */
- /* for access to it. */
- h->drv[drv_index]->busy_configuring = 0;
- wmb();
- return drv_index;
-
-err_free_disk:
- cciss_free_gendisk(h, drv_index);
-err_free_drive_info:
- cciss_free_drive_info(h, drv_index);
- return -1;
-}
-
-/* This is for the special case of a controller which
- * has no logical drives. In this case, we still need
- * to register a disk so the controller can be accessed
- * by the Array Config Utility.
- */
-static void cciss_add_controller_node(ctlr_info_t *h)
-{
- struct gendisk *disk;
- int drv_index;
-
- if (h->gendisk[0] != NULL) /* already did this? Then bail. */
- return;
-
- drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
- if (drv_index == -1)
- goto error;
- h->drv[drv_index]->block_size = 512;
- h->drv[drv_index]->nr_blocks = 0;
- h->drv[drv_index]->heads = 0;
- h->drv[drv_index]->sectors = 0;
- h->drv[drv_index]->cylinders = 0;
- h->drv[drv_index]->raid_level = -1;
- memset(h->drv[drv_index]->serial_no, 0, 16);
- disk = h->gendisk[drv_index];
- if (cciss_add_disk(h, disk, drv_index) == 0)
- return;
- cciss_free_gendisk(h, drv_index);
- cciss_free_drive_info(h, drv_index);
-error:
- dev_warn(&h->pdev->dev, "could not add disk 0.\n");
- return;
-}
-
-/* This function will add and remove logical drives from the Logical
- * drive array of the controller and maintain persistency of ordering
- * so that mount points are preserved until the next reboot. This allows
- * for the removal of logical drives in the middle of the drive array
- * without a re-ordering of those drives.
- * INPUT
- * h = The controller to perform the operations on
- */
-static int rebuild_lun_table(ctlr_info_t *h, int first_time,
- int via_ioctl)
-{
- int num_luns;
- ReportLunData_struct *ld_buff = NULL;
- int return_code;
- int listlength = 0;
- int i;
- int drv_found;
- int drv_index = 0;
- unsigned char lunid[8] = CTLR_LUNID;
- unsigned long flags;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /* Set busy_configuring flag for this operation */
- spin_lock_irqsave(&h->lock, flags);
- if (h->busy_configuring) {
- spin_unlock_irqrestore(&h->lock, flags);
- return -EBUSY;
- }
- h->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
-
- ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
- if (ld_buff == NULL)
- goto mem_msg;
-
- return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff,
- sizeof(ReportLunData_struct),
- 0, CTLR_LUNID, TYPE_CMD);
-
- if (return_code == IO_OK)
- listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength);
- else { /* reading number of logical volumes failed */
- dev_warn(&h->pdev->dev,
- "report logical volume command failed\n");
- listlength = 0;
- goto freeret;
- }
-
- num_luns = listlength / 8; /* 8 bytes per entry */
- if (num_luns > CISS_MAX_LUN) {
- num_luns = CISS_MAX_LUN;
- dev_warn(&h->pdev->dev, "more luns configured"
- " on controller than can be handled by"
- " this driver.\n");
- }
-
- if (num_luns == 0)
- cciss_add_controller_node(h);
-
- /* Compare controller drive array to driver's drive array
- * to see if any drives are missing on the controller due
- * to action of Array Config Utility (user deletes drive)
- * and deregister logical drives which have disappeared.
- */
- for (i = 0; i <= h->highest_lun; i++) {
- int j;
- drv_found = 0;
-
- /* skip holes in the array from already deleted drives */
- if (h->drv[i] == NULL)
- continue;
-
- for (j = 0; j < num_luns; j++) {
- memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
- if (memcmp(h->drv[i]->LunID, lunid,
- sizeof(lunid)) == 0) {
- drv_found = 1;
- break;
- }
- }
- if (!drv_found) {
- /* Deregister it from the OS, it's gone. */
- spin_lock_irqsave(&h->lock, flags);
- h->drv[i]->busy_configuring = 1;
- spin_unlock_irqrestore(&h->lock, flags);
- return_code = deregister_disk(h, i, 1, via_ioctl);
- if (h->drv[i] != NULL)
- h->drv[i]->busy_configuring = 0;
- }
- }
-
- /* Compare controller drive array to driver's drive array.
- * Check for updates in the drive information and any new drives
- * on the controller due to ACU adding logical drives, or changing
- * a logical drive's size, etc. Reregister any new/changed drives
- */
- for (i = 0; i < num_luns; i++) {
- int j;
-
- drv_found = 0;
-
- memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
- /* Find if the LUN is already in the drive array
- * of the driver. If so then update its info
- * if not in use. If it does not exist then find
- * the first free index and add it.
- */
- for (j = 0; j <= h->highest_lun; j++) {
- if (h->drv[j] != NULL &&
- memcmp(h->drv[j]->LunID, lunid,
- sizeof(h->drv[j]->LunID)) == 0) {
- drv_index = j;
- drv_found = 1;
- break;
- }
- }
-
- /* check if the drive was found already in the array */
- if (!drv_found) {
- drv_index = cciss_add_gendisk(h, lunid, 0);
- if (drv_index == -1)
- goto freeret;
- }
- cciss_update_drive_info(h, drv_index, first_time, via_ioctl);
- } /* end for */
-
-freeret:
- kfree(ld_buff);
- h->busy_configuring = 0;
- /* We return -1 here to tell the ACU that we have registered/updated
- * all of the drives that we can and to keep it from calling us
- * additional times.
- */
- return -1;
-mem_msg:
- dev_err(&h->pdev->dev, "out of memory\n");
- h->busy_configuring = 0;
- goto freeret;
-}
-
-static void cciss_clear_drive_info(drive_info_struct *drive_info)
-{
- /* zero out the disk size info */
- drive_info->nr_blocks = 0;
- drive_info->block_size = 0;
- drive_info->heads = 0;
- drive_info->sectors = 0;
- drive_info->cylinders = 0;
- drive_info->raid_level = -1;
- memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
- memset(drive_info->model, 0, sizeof(drive_info->model));
- memset(drive_info->rev, 0, sizeof(drive_info->rev));
- memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
- /*
- * don't clear the LUNID though, we need to remember which
- * one this one is.
- */
-}
-
-/* This function will deregister the disk and it's queue from the
- * kernel. It must be called with the controller lock held and the
- * drv structures busy_configuring flag set. It's parameters are:
- *
- * disk = This is the disk to be deregistered
- * drv = This is the drive_info_struct associated with the disk to be
- * deregistered. It contains information about the disk used
- * by the driver.
- * clear_all = This flag determines whether or not the disk information
- * is going to be completely cleared out and the highest_lun
- * reset. Sometimes we want to clear out information about
- * the disk in preparation for re-adding it. In this case
- * the highest_lun should be left unchanged and the LunID
- * should not be cleared.
- * via_ioctl
- * This indicates whether we've reached this path via ioctl.
- * This affects the maximum usage count allowed for c0d0 to be messed with.
- * If this path is reached via ioctl(), then the max_usage_count will
- * be 1, as the process calling ioctl() has got to have the device open.
- * If we get here via sysfs, then the max usage count will be zero.
-*/
-static int deregister_disk(ctlr_info_t *h, int drv_index,
- int clear_all, int via_ioctl)
-{
- int i;
- struct gendisk *disk;
- drive_info_struct *drv;
- int recalculate_highest_lun;
-
- if (!capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- drv = h->drv[drv_index];
- disk = h->gendisk[drv_index];
-
- /* make sure logical volume is NOT is use */
- if (clear_all || (h->gendisk[0] == disk)) {
- if (drv->usage_count > via_ioctl)
- return -EBUSY;
- } else if (drv->usage_count > 0)
- return -EBUSY;
-
- recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
-
- /* invalidate the devices and deregister the disk. If it is disk
- * zero do not deregister it but just zero out it's values. This
- * allows us to delete disk zero but keep the controller registered.
- */
- if (h->gendisk[0] != disk) {
- struct request_queue *q = disk->queue;
- if (disk->flags & GENHD_FL_UP) {
- cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
- del_gendisk(disk);
- }
- if (q)
- blk_cleanup_queue(q);
- /* If clear_all is set then we are deleting the logical
- * drive, not just refreshing its info. For drives
- * other than disk 0 we will call put_disk. We do not
- * do this for disk 0 as we need it to be able to
- * configure the controller.
- */
- if (clear_all){
- /* This isn't pretty, but we need to find the
- * disk in our array and NULL our the pointer.
- * This is so that we will call alloc_disk if
- * this index is used again later.
- */
- for (i=0; i < CISS_MAX_LUN; i++){
- if (h->gendisk[i] == disk) {
- h->gendisk[i] = NULL;
- break;
- }
- }
- put_disk(disk);
- }
- } else {
- set_capacity(disk, 0);
- cciss_clear_drive_info(drv);
- }
-
- --h->num_luns;
-
- /* if it was the last disk, find the new hightest lun */
- if (clear_all && recalculate_highest_lun) {
- int newhighest = -1;
- for (i = 0; i <= h->highest_lun; i++) {
- /* if the disk has size > 0, it is available */
- if (h->drv[i] && h->drv[i]->heads)
- newhighest = i;
- }
- h->highest_lun = newhighest;
- }
- return 0;
-}
-
-static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
- size_t size, __u8 page_code, unsigned char *scsi3addr,
- int cmd_type)
-{
- u64bit buff_dma_handle;
- int status = IO_OK;
-
- c->cmd_type = CMD_IOCTL_PEND;
- c->Header.ReplyQueue = 0;
- if (buff != NULL) {
- c->Header.SGList = 1;
- c->Header.SGTotal = 1;
- } else {
- c->Header.SGList = 0;
- c->Header.SGTotal = 0;
- }
- c->Header.Tag.lower = c->busaddr;
- memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
-
- c->Request.Type.Type = cmd_type;
- if (cmd_type == TYPE_CMD) {
- switch (cmd) {
- case CISS_INQUIRY:
- /* are we trying to read a vital product page */
- if (page_code != 0) {
- c->Request.CDB[1] = 0x01;
- c->Request.CDB[2] = page_code;
- }
- c->Request.CDBLen = 6;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = CISS_INQUIRY;
- c->Request.CDB[4] = size & 0xFF;
- break;
- case CISS_REPORT_LOG:
- case CISS_REPORT_PHYS:
- /* Talking to controller so It's a physical command
- mode = 00 target = 0. Nothing to write.
- */
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
- c->Request.CDB[7] = (size >> 16) & 0xFF;
- c->Request.CDB[8] = (size >> 8) & 0xFF;
- c->Request.CDB[9] = size & 0xFF;
- break;
-
- case CCISS_READ_CAPACITY:
- c->Request.CDBLen = 10;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
- case CCISS_READ_CAPACITY_16:
- c->Request.CDBLen = 16;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_READ;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- c->Request.CDB[1] = 0x10;
- c->Request.CDB[10] = (size >> 24) & 0xFF;
- c->Request.CDB[11] = (size >> 16) & 0xFF;
- c->Request.CDB[12] = (size >> 8) & 0xFF;
- c->Request.CDB[13] = size & 0xFF;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
- case CCISS_CACHE_FLUSH:
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = BMIC_WRITE;
- c->Request.CDB[6] = BMIC_CACHE_FLUSH;
- c->Request.CDB[7] = (size >> 8) & 0xFF;
- c->Request.CDB[8] = size & 0xFF;
- break;
- case TEST_UNIT_READY:
- c->Request.CDBLen = 6;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_NONE;
- c->Request.Timeout = 0;
- break;
- default:
- dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd);
- return IO_ERROR;
- }
- } else if (cmd_type == TYPE_MSG) {
- switch (cmd) {
- case CCISS_ABORT_MSG:
- c->Request.CDBLen = 12;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd; /* abort */
- c->Request.CDB[1] = 0; /* abort a command */
- /* buff contains the tag of the command to abort */
- memcpy(&c->Request.CDB[4], buff, 8);
- break;
- case CCISS_RESET_MSG:
- c->Request.CDBLen = 16;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_NONE;
- c->Request.Timeout = 0;
- memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
- c->Request.CDB[0] = cmd; /* reset */
- c->Request.CDB[1] = CCISS_RESET_TYPE_TARGET;
- break;
- case CCISS_NOOP_MSG:
- c->Request.CDBLen = 1;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = XFER_WRITE;
- c->Request.Timeout = 0;
- c->Request.CDB[0] = cmd;
- break;
- default:
- dev_warn(&h->pdev->dev,
- "unknown message type %d\n", cmd);
- return IO_ERROR;
- }
- } else {
- dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
- return IO_ERROR;
- }
- /* Fill in the scatter gather information */
- if (size > 0) {
- buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
- buff, size,
- PCI_DMA_BIDIRECTIONAL);
- c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
- c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
- c->SG[0].Len = size;
- c->SG[0].Ext = 0; /* we are not chaining */
- }
- return status;
-}
-
-static int cciss_send_reset(ctlr_info_t *h, unsigned char *scsi3addr,
- u8 reset_type)
-{
- CommandList_struct *c;
- int return_status;
-
- c = cmd_alloc(h);
- if (!c)
- return -ENOMEM;
- return_status = fill_cmd(h, c, CCISS_RESET_MSG, NULL, 0, 0,
- CTLR_LUNID, TYPE_MSG);
- c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
- if (return_status != IO_OK) {
- cmd_special_free(h, c);
- return return_status;
- }
- c->waiting = NULL;
- enqueue_cmd_and_start_io(h, c);
- /* Don't wait for completion, the reset won't complete. Don't free
- * the command either. This is the last command we will send before
- * re-initializing everything, so it doesn't matter and won't leak.
- */
- return 0;
-}
-
-static int check_target_status(ctlr_info_t *h, CommandList_struct *c)
-{
- switch (c->err_info->ScsiStatus) {
- case SAM_STAT_GOOD:
- return IO_OK;
- case SAM_STAT_CHECK_CONDITION:
- switch (0xf & c->err_info->SenseInfo[2]) {
- case 0: return IO_OK; /* no sense */
- case 1: return IO_OK; /* recovered error */
- default:
- if (check_for_unit_attention(h, c))
- return IO_NEEDS_RETRY;
- dev_warn(&h->pdev->dev, "cmd 0x%02x "
- "check condition, sense key = 0x%02x\n",
- c->Request.CDB[0], c->err_info->SenseInfo[2]);
- }
- break;
- default:
- dev_warn(&h->pdev->dev, "cmd 0x%02x"
- "scsi status = 0x%02x\n",
- c->Request.CDB[0], c->err_info->ScsiStatus);
- break;
- }
- return IO_ERROR;
-}
-
-static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c)
-{
- int return_status = IO_OK;
-
- if (c->err_info->CommandStatus == CMD_SUCCESS)
- return IO_OK;
-
- switch (c->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- return_status = check_target_status(h, c);
- break;
- case CMD_DATA_UNDERRUN:
- case CMD_DATA_OVERRUN:
- /* expected for inquiry and report lun commands */
- break;
- case CMD_INVALID:
- dev_warn(&h->pdev->dev, "cmd 0x%02x is "
- "reported invalid\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_PROTOCOL_ERR:
- dev_warn(&h->pdev->dev, "cmd 0x%02x has "
- "protocol error\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_HARDWARE_ERR:
- dev_warn(&h->pdev->dev, "cmd 0x%02x had "
- " hardware error\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_CONNECTION_LOST:
- dev_warn(&h->pdev->dev, "cmd 0x%02x had "
- "connection lost\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_ABORTED:
- dev_warn(&h->pdev->dev, "cmd 0x%02x was "
- "aborted\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_ABORT_FAILED:
- dev_warn(&h->pdev->dev, "cmd 0x%02x reports "
- "abort failed\n", c->Request.CDB[0]);
- return_status = IO_ERROR;
- break;
- case CMD_UNSOLICITED_ABORT:
- dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n",
- c->Request.CDB[0]);
- return_status = IO_NEEDS_RETRY;
- break;
- case CMD_UNABORTABLE:
- dev_warn(&h->pdev->dev, "cmd unabortable\n");
- return_status = IO_ERROR;
- break;
- default:
- dev_warn(&h->pdev->dev, "cmd 0x%02x returned "
- "unknown status %x\n", c->Request.CDB[0],
- c->err_info->CommandStatus);
- return_status = IO_ERROR;
- }
- return return_status;
-}
-
-static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
- int attempt_retry)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- u64bit buff_dma_handle;
- int return_status = IO_OK;
-
-resend_cmd2:
- c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
-
- wait_for_completion(&wait);
-
- if (c->err_info->CommandStatus == 0 || !attempt_retry)
- goto command_done;
-
- return_status = process_sendcmd_error(h, c);
-
- if (return_status == IO_NEEDS_RETRY &&
- c->retry_count < MAX_CMD_RETRIES) {
- dev_warn(&h->pdev->dev, "retrying 0x%02x\n",
- c->Request.CDB[0]);
- c->retry_count++;
- /* erase the old error information */
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
- return_status = IO_OK;
- reinit_completion(&wait);
- goto resend_cmd2;
- }
-
-command_done:
- /* unlock the buffers from DMA */
- buff_dma_handle.val32.lower = c->SG[0].Addr.lower;
- buff_dma_handle.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val,
- c->SG[0].Len, PCI_DMA_BIDIRECTIONAL);
- return return_status;
-}
-
-static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size,
- __u8 page_code, unsigned char scsi3addr[],
- int cmd_type)
-{
- CommandList_struct *c;
- int return_status;
-
- c = cmd_special_alloc(h);
- if (!c)
- return -ENOMEM;
- return_status = fill_cmd(h, c, cmd, buff, size, page_code,
- scsi3addr, cmd_type);
- if (return_status == IO_OK)
- return_status = sendcmd_withirq_core(h, c, 1);
-
- cmd_special_free(h, c);
- return return_status;
-}
-
-static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol,
- sector_t total_size,
- unsigned int block_size,
- InquiryData_struct *inq_buff,
- drive_info_struct *drv)
-{
- int return_code;
- unsigned long t;
- unsigned char scsi3addr[8];
-
- memset(inq_buff, 0, sizeof(InquiryData_struct));
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
- sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD);
- if (return_code == IO_OK) {
- if (inq_buff->data_byte[8] == 0xFF) {
- dev_warn(&h->pdev->dev,
- "reading geometry failed, volume "
- "does not support reading geometry\n");
- drv->heads = 255;
- drv->sectors = 32; /* Sectors per track */
- drv->cylinders = total_size + 1;
- drv->raid_level = RAID_UNKNOWN;
- } else {
- drv->heads = inq_buff->data_byte[6];
- drv->sectors = inq_buff->data_byte[7];
- drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
- drv->cylinders += inq_buff->data_byte[5];
- drv->raid_level = inq_buff->data_byte[8];
- }
- drv->block_size = block_size;
- drv->nr_blocks = total_size + 1;
- t = drv->heads * drv->sectors;
- if (t > 1) {
- sector_t real_size = total_size + 1;
- unsigned long rem = sector_div(real_size, t);
- if (rem)
- real_size++;
- drv->cylinders = real_size;
- }
- } else { /* Get geometry failed */
- dev_warn(&h->pdev->dev, "reading geometry failed\n");
- }
-}
-
-static void
-cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size,
- unsigned int *block_size)
-{
- ReadCapdata_struct *buf;
- int return_code;
- unsigned char scsi3addr[8];
-
- buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL);
- if (!buf) {
- dev_warn(&h->pdev->dev, "out of memory\n");
- return;
- }
-
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf,
- sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD);
- if (return_code == IO_OK) {
- *total_size = be32_to_cpu(*(__be32 *) buf->total_size);
- *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
- } else { /* read capacity command failed */
- dev_warn(&h->pdev->dev, "read capacity failed\n");
- *total_size = 0;
- *block_size = BLOCK_SIZE;
- }
- kfree(buf);
-}
-
-static void cciss_read_capacity_16(ctlr_info_t *h, int logvol,
- sector_t *total_size, unsigned int *block_size)
-{
- ReadCapdata_struct_16 *buf;
- int return_code;
- unsigned char scsi3addr[8];
-
- buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL);
- if (!buf) {
- dev_warn(&h->pdev->dev, "out of memory\n");
- return;
- }
-
- log_unit_to_scsi3addr(h, scsi3addr, logvol);
- return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16,
- buf, sizeof(ReadCapdata_struct_16),
- 0, scsi3addr, TYPE_CMD);
- if (return_code == IO_OK) {
- *total_size = be64_to_cpu(*(__be64 *) buf->total_size);
- *block_size = be32_to_cpu(*(__be32 *) buf->block_size);
- } else { /* read capacity command failed */
- dev_warn(&h->pdev->dev, "read capacity failed\n");
- *total_size = 0;
- *block_size = BLOCK_SIZE;
- }
- dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n",
- (unsigned long long)*total_size+1, *block_size);
- kfree(buf);
-}
-
-static int cciss_revalidate(struct gendisk *disk)
-{
- ctlr_info_t *h = get_host(disk);
- drive_info_struct *drv = get_drv(disk);
- int logvol;
- int FOUND = 0;
- unsigned int block_size;
- sector_t total_size;
- InquiryData_struct *inq_buff = NULL;
-
- for (logvol = 0; logvol <= h->highest_lun; logvol++) {
- if (!h->drv[logvol])
- continue;
- if (memcmp(h->drv[logvol]->LunID, drv->LunID,
- sizeof(drv->LunID)) == 0) {
- FOUND = 1;
- break;
- }
- }
-
- if (!FOUND)
- return 1;
-
- inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (inq_buff == NULL) {
- dev_warn(&h->pdev->dev, "out of memory\n");
- return 1;
- }
- if (h->cciss_read == CCISS_READ_10) {
- cciss_read_capacity(h, logvol,
- &total_size, &block_size);
- } else {
- cciss_read_capacity_16(h, logvol,
- &total_size, &block_size);
- }
- cciss_geometry_inquiry(h, logvol, total_size, block_size,
- inq_buff, drv);
-
- blk_queue_logical_block_size(drv->queue, drv->block_size);
- set_capacity(disk, drv->nr_blocks);
-
- kfree(inq_buff);
- return 0;
-}
-
-/*
- * Map (physical) PCI mem into (virtual) kernel space
- */
-static void __iomem *remap_pci_mem(ulong base, ulong size)
-{
- ulong page_base = ((ulong) base) & PAGE_MASK;
- ulong page_offs = ((ulong) base) - page_base;
- void __iomem *page_remapped = ioremap(page_base, page_offs + size);
-
- return page_remapped ? (page_remapped + page_offs) : NULL;
-}
-
-/*
- * Takes jobs of the Q and sends them to the hardware, then puts it on
- * the Q to wait for completion.
- */
-static void start_io(ctlr_info_t *h)
-{
- CommandList_struct *c;
-
- while (!list_empty(&h->reqQ)) {
- c = list_entry(h->reqQ.next, CommandList_struct, list);
- /* can't do anything if fifo is full */
- if ((h->access.fifo_full(h))) {
- dev_warn(&h->pdev->dev, "fifo full\n");
- break;
- }
-
- /* Get the first entry from the Request Q */
- removeQ(c);
- h->Qdepth--;
-
- /* Tell the controller execute command */
- h->access.submit_command(h, c);
-
- /* Put job onto the completed Q */
- addQ(&h->cmpQ, c);
- }
-}
-
-/* Assumes that h->lock is held. */
-/* Zeros out the error record and then resends the command back */
-/* to the controller */
-static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
-{
- /* erase the old error information */
- memset(c->err_info, 0, sizeof(ErrorInfo_struct));
-
- /* add it to software queue and then send it to the controller */
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- start_io(h);
-}
-
-static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
- unsigned int msg_byte, unsigned int host_byte,
- unsigned int driver_byte)
-{
- /* inverse of macros in scsi.h */
- return (scsi_status_byte & 0xff) |
- ((msg_byte & 0xff) << 8) |
- ((host_byte & 0xff) << 16) |
- ((driver_byte & 0xff) << 24);
-}
-
-static inline int evaluate_target_status(ctlr_info_t *h,
- CommandList_struct *cmd, int *retry_cmd)
-{
- unsigned char sense_key;
- unsigned char status_byte, msg_byte, host_byte, driver_byte;
- int error_value;
-
- *retry_cmd = 0;
- /* If we get in here, it means we got "target status", that is, scsi status */
- status_byte = cmd->err_info->ScsiStatus;
- driver_byte = DRIVER_OK;
- msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */
-
- if (blk_rq_is_passthrough(cmd->rq))
- host_byte = DID_PASSTHROUGH;
- else
- host_byte = DID_OK;
-
- error_value = make_status_bytes(status_byte, msg_byte,
- host_byte, driver_byte);
-
- if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) {
- if (!blk_rq_is_passthrough(cmd->rq))
- dev_warn(&h->pdev->dev, "cmd %p "
- "has SCSI Status 0x%x\n",
- cmd, cmd->err_info->ScsiStatus);
- return error_value;
- }
-
- /* check the sense key */
- sense_key = 0xf & cmd->err_info->SenseInfo[2];
- /* no status or recovered error */
- if (((sense_key == 0x0) || (sense_key == 0x1)) &&
- !blk_rq_is_passthrough(cmd->rq))
- error_value = 0;
-
- if (check_for_unit_attention(h, cmd)) {
- *retry_cmd = !blk_rq_is_passthrough(cmd->rq);
- return 0;
- }
-
- /* Not SG_IO or similar? */
- if (!blk_rq_is_passthrough(cmd->rq)) {
- if (error_value != 0)
- dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION"
- " sense key = 0x%x\n", cmd, sense_key);
- return error_value;
- }
-
- scsi_req(cmd->rq)->sense_len = cmd->err_info->SenseLen;
- return error_value;
-}
-
-/* checks the status of the job and calls complete buffers to mark all
- * buffers for the completed job. Note that this function does not need
- * to hold the hba/queue lock.
- */
-static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
- int timeout)
-{
- int retry_cmd = 0;
- struct request *rq = cmd->rq;
- struct scsi_request *sreq = scsi_req(rq);
-
- sreq->result = 0;
-
- if (timeout)
- sreq->result = make_status_bytes(0, 0, 0, DRIVER_TIMEOUT);
-
- if (cmd->err_info->CommandStatus == 0) /* no error has occurred */
- goto after_error_processing;
-
- switch (cmd->err_info->CommandStatus) {
- case CMD_TARGET_STATUS:
- sreq->result = evaluate_target_status(h, cmd, &retry_cmd);
- break;
- case CMD_DATA_UNDERRUN:
- if (!blk_rq_is_passthrough(cmd->rq)) {
- dev_warn(&h->pdev->dev, "cmd %p has"
- " completed with data underrun "
- "reported\n", cmd);
- }
- break;
- case CMD_DATA_OVERRUN:
- if (!blk_rq_is_passthrough(cmd->rq))
- dev_warn(&h->pdev->dev, "cciss: cmd %p has"
- " completed with data overrun "
- "reported\n", cmd);
- break;
- case CMD_INVALID:
- dev_warn(&h->pdev->dev, "cciss: cmd %p is "
- "reported invalid\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_PROTOCOL_ERR:
- dev_warn(&h->pdev->dev, "cciss: cmd %p has "
- "protocol error\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_HARDWARE_ERR:
- dev_warn(&h->pdev->dev, "cciss: cmd %p had "
- " hardware error\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_CONNECTION_LOST:
- dev_warn(&h->pdev->dev, "cciss: cmd %p had "
- "connection lost\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_ABORTED:
- dev_warn(&h->pdev->dev, "cciss: cmd %p was "
- "aborted\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ABORT);
- break;
- case CMD_ABORT_FAILED:
- dev_warn(&h->pdev->dev, "cciss: cmd %p reports "
- "abort failed\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_UNSOLICITED_ABORT:
- dev_warn(&h->pdev->dev, "cciss%d: unsolicited "
- "abort %p\n", h->ctlr, cmd);
- if (cmd->retry_count < MAX_CMD_RETRIES) {
- retry_cmd = 1;
- dev_warn(&h->pdev->dev, "retrying %p\n", cmd);
- cmd->retry_count++;
- } else
- dev_warn(&h->pdev->dev,
- "%p retried too many times\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ABORT);
- break;
- case CMD_TIMEOUT:
- dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- case CMD_UNABORTABLE:
- dev_warn(&h->pdev->dev, "cmd %p unabortable\n", cmd);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- break;
- default:
- dev_warn(&h->pdev->dev, "cmd %p returned "
- "unknown status %x\n", cmd,
- cmd->err_info->CommandStatus);
- sreq->result = make_status_bytes(SAM_STAT_GOOD,
- cmd->err_info->CommandStatus, DRIVER_OK,
- blk_rq_is_passthrough(cmd->rq) ?
- DID_PASSTHROUGH : DID_ERROR);
- }
-
-after_error_processing:
-
- /* We need to return this command */
- if (retry_cmd) {
- resend_cciss_cmd(h, cmd);
- return;
- }
- cmd->rq->completion_data = cmd;
- blk_complete_request(cmd->rq);
-}
-
-static inline u32 cciss_tag_contains_index(u32 tag)
-{
-#define DIRECT_LOOKUP_BIT 0x10
- return tag & DIRECT_LOOKUP_BIT;
-}
-
-static inline u32 cciss_tag_to_index(u32 tag)
-{
-#define DIRECT_LOOKUP_SHIFT 5
- return tag >> DIRECT_LOOKUP_SHIFT;
-}
-
-static inline u32 cciss_tag_discard_error_bits(ctlr_info_t *h, u32 tag)
-{
-#define CCISS_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
-#define CCISS_SIMPLE_ERROR_BITS 0x03
- if (likely(h->transMethod & CFGTBL_Trans_Performant))
- return tag & ~CCISS_PERF_ERROR_BITS;
- return tag & ~CCISS_SIMPLE_ERROR_BITS;
-}
-
-static inline void cciss_mark_tag_indexed(u32 *tag)
-{
- *tag |= DIRECT_LOOKUP_BIT;
-}
-
-static inline void cciss_set_tag_index(u32 *tag, u32 index)
-{
- *tag |= (index << DIRECT_LOOKUP_SHIFT);
-}
-
-/*
- * Get a request and submit it to the controller.
- */
-static void do_cciss_request(struct request_queue *q)
-{
- ctlr_info_t *h = q->queuedata;
- CommandList_struct *c;
- sector_t start_blk;
- int seg;
- struct request *creq;
- u64bit temp64;
- struct scatterlist *tmp_sg;
- SGDescriptor_struct *curr_sg;
- drive_info_struct *drv;
- int i, dir;
- int sg_index = 0;
- int chained = 0;
-
- queue:
- creq = blk_peek_request(q);
- if (!creq)
- goto startio;
-
- BUG_ON(creq->nr_phys_segments > h->maxsgentries);
-
- c = cmd_alloc(h);
- if (!c)
- goto full;
-
- blk_start_request(creq);
-
- tmp_sg = h->scatter_list[c->cmdindex];
- spin_unlock_irq(q->queue_lock);
-
- c->cmd_type = CMD_RWREQ;
- c->rq = creq;
-
- /* fill in the request */
- drv = creq->rq_disk->private_data;
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- /* got command from pool, so use the command block index instead */
- /* for direct lookups. */
- /* The first 2 bits are reserved for controller error reporting. */
- cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex);
- cciss_mark_tag_indexed(&c->Header.Tag.lower);
- memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
- c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */
- c->Request.Type.Type = TYPE_CMD; /* It is a command. */
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction =
- (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE;
- c->Request.Timeout = 0; /* Don't time out */
- c->Request.CDB[0] =
- (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write;
- start_blk = blk_rq_pos(creq);
- dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n",
- (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq));
- sg_init_table(tmp_sg, h->maxsgentries);
- seg = blk_rq_map_sg(q, creq, tmp_sg);
-
- /* get the DMA records for the setup */
- if (c->Request.Type.Direction == XFER_READ)
- dir = PCI_DMA_FROMDEVICE;
- else
- dir = PCI_DMA_TODEVICE;
-
- curr_sg = c->SG;
- sg_index = 0;
- chained = 0;
-
- for (i = 0; i < seg; i++) {
- if (((sg_index+1) == (h->max_cmd_sgentries)) &&
- !chained && ((seg - i) > 1)) {
- /* Point to next chain block. */
- curr_sg = h->cmd_sg_list[c->cmdindex];
- sg_index = 0;
- chained = 1;
- }
- curr_sg[sg_index].Len = tmp_sg[i].length;
- temp64.val = (__u64) pci_map_page(h->pdev, sg_page(&tmp_sg[i]),
- tmp_sg[i].offset,
- tmp_sg[i].length, dir);
- if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
- dev_warn(&h->pdev->dev,
- "%s: error mapping page for DMA\n", __func__);
- scsi_req(creq)->result =
- make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK,
- DID_SOFT_ERROR);
- cmd_free(h, c);
- return;
- }
- curr_sg[sg_index].Addr.lower = temp64.val32.lower;
- curr_sg[sg_index].Addr.upper = temp64.val32.upper;
- curr_sg[sg_index].Ext = 0; /* we are not chaining */
- ++sg_index;
- }
- if (chained) {
- if (cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex],
- (seg - (h->max_cmd_sgentries - 1)) *
- sizeof(SGDescriptor_struct))) {
- scsi_req(creq)->result =
- make_status_bytes(SAM_STAT_GOOD, 0, DRIVER_OK,
- DID_SOFT_ERROR);
- cmd_free(h, c);
- return;
- }
- }
-
- /* track how many SG entries we are using */
- if (seg > h->maxSG)
- h->maxSG = seg;
-
- dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments "
- "chained[%d]\n",
- blk_rq_sectors(creq), seg, chained);
-
- c->Header.SGTotal = seg + chained;
- if (seg <= h->max_cmd_sgentries)
- c->Header.SGList = c->Header.SGTotal;
- else
- c->Header.SGList = h->max_cmd_sgentries;
- set_performant_mode(h, c);
-
- switch (req_op(creq)) {
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- if(h->cciss_read == CCISS_READ_10) {
- c->Request.CDB[1] = 0;
- c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */
- c->Request.CDB[3] = (start_blk >> 16) & 0xff;
- c->Request.CDB[4] = (start_blk >> 8) & 0xff;
- c->Request.CDB[5] = start_blk & 0xff;
- c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */
- c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff;
- c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff;
- c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
- } else {
- u32 upper32 = upper_32_bits(start_blk);
-
- c->Request.CDBLen = 16;
- c->Request.CDB[1]= 0;
- c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */
- c->Request.CDB[3]= (upper32 >> 16) & 0xff;
- c->Request.CDB[4]= (upper32 >> 8) & 0xff;
- c->Request.CDB[5]= upper32 & 0xff;
- c->Request.CDB[6]= (start_blk >> 24) & 0xff;
- c->Request.CDB[7]= (start_blk >> 16) & 0xff;
- c->Request.CDB[8]= (start_blk >> 8) & 0xff;
- c->Request.CDB[9]= start_blk & 0xff;
- c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff;
- c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff;
- c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff;
- c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff;
- c->Request.CDB[14] = c->Request.CDB[15] = 0;
- }
- break;
- case REQ_OP_SCSI_IN:
- case REQ_OP_SCSI_OUT:
- c->Request.CDBLen = scsi_req(creq)->cmd_len;
- memcpy(c->Request.CDB, scsi_req(creq)->cmd, BLK_MAX_CDB);
- scsi_req(creq)->sense = c->err_info->SenseInfo;
- break;
- default:
- dev_warn(&h->pdev->dev, "bad request type %d\n",
- creq->cmd_flags);
- BUG();
- }
-
- spin_lock_irq(q->queue_lock);
-
- addQ(&h->reqQ, c);
- h->Qdepth++;
- if (h->Qdepth > h->maxQsinceinit)
- h->maxQsinceinit = h->Qdepth;
-
- goto queue;
-full:
- blk_stop_queue(q);
-startio:
- /* We will already have the driver lock here so not need
- * to lock it.
- */
- start_io(h);
-}
-
-static inline unsigned long get_next_completion(ctlr_info_t *h)
-{
- return h->access.command_completed(h);
-}
-
-static inline int interrupt_pending(ctlr_info_t *h)
-{
- return h->access.intr_pending(h);
-}
-
-static inline long interrupt_not_for_us(ctlr_info_t *h)
-{
- return ((h->access.intr_pending(h) == 0) ||
- (h->interrupts_enabled == 0));
-}
-
-static inline int bad_tag(ctlr_info_t *h, u32 tag_index,
- u32 raw_tag)
-{
- if (unlikely(tag_index >= h->nr_cmds)) {
- dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
- return 1;
- }
- return 0;
-}
-
-static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c,
- u32 raw_tag)
-{
- removeQ(c);
- if (likely(c->cmd_type == CMD_RWREQ))
- complete_command(h, c, 0);
- else if (c->cmd_type == CMD_IOCTL_PEND)
- complete(c->waiting);
-#ifdef CONFIG_CISS_SCSI_TAPE
- else if (c->cmd_type == CMD_SCSI)
- complete_scsi_command(c, 0, raw_tag);
-#endif
-}
-
-static inline u32 next_command(ctlr_info_t *h)
-{
- u32 a;
-
- if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
- return h->access.command_completed(h);
-
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- a = *(h->reply_pool_head); /* Next cmd in ring buffer */
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- a = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
- return a;
-}
-
-/* process completion of an indexed ("direct lookup") command */
-static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag)
-{
- u32 tag_index;
- CommandList_struct *c;
-
- tag_index = cciss_tag_to_index(raw_tag);
- if (bad_tag(h, tag_index, raw_tag))
- return next_command(h);
- c = h->cmd_pool + tag_index;
- finish_cmd(h, c, raw_tag);
- return next_command(h);
-}
-
-/* process completion of a non-indexed command */
-static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag)
-{
- CommandList_struct *c = NULL;
- __u32 busaddr_masked, tag_masked;
-
- tag_masked = cciss_tag_discard_error_bits(h, raw_tag);
- list_for_each_entry(c, &h->cmpQ, list) {
- busaddr_masked = cciss_tag_discard_error_bits(h, c->busaddr);
- if (busaddr_masked == tag_masked) {
- finish_cmd(h, c, raw_tag);
- return next_command(h);
- }
- }
- bad_tag(h, h->nr_cmds + 1, raw_tag);
- return next_command(h);
-}
-
-/* Some controllers, like p400, will give us one interrupt
- * after a soft reset, even if we turned interrupts off.
- * Only need to check for this in the cciss_xxx_discard_completions
- * functions.
- */
-static int ignore_bogus_interrupt(ctlr_info_t *h)
-{
- if (likely(!reset_devices))
- return 0;
-
- if (likely(h->interrupts_enabled))
- return 0;
-
- dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
- "(known firmware bug.) Ignoring.\n");
-
- return 1;
-}
-
-static irqreturn_t cciss_intx_discard_completions(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- if (ignore_bogus_interrupt(h))
- return IRQ_NONE;
-
- if (interrupt_not_for_us(h))
- return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
- while (interrupt_pending(h)) {
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY)
- raw_tag = next_command(h);
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t cciss_msix_discard_completions(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- if (ignore_bogus_interrupt(h))
- return IRQ_NONE;
-
- spin_lock_irqsave(&h->lock, flags);
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY)
- raw_tag = next_command(h);
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t do_cciss_intx(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- if (interrupt_not_for_us(h))
- return IRQ_NONE;
- spin_lock_irqsave(&h->lock, flags);
- while (interrupt_pending(h)) {
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY) {
- if (cciss_tag_contains_index(raw_tag))
- raw_tag = process_indexed_cmd(h, raw_tag);
- else
- raw_tag = process_nonindexed_cmd(h, raw_tag);
- }
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-/* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never
- * check the interrupt pending register because it is not set.
- */
-static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id)
-{
- ctlr_info_t *h = dev_id;
- unsigned long flags;
- u32 raw_tag;
-
- spin_lock_irqsave(&h->lock, flags);
- raw_tag = get_next_completion(h);
- while (raw_tag != FIFO_EMPTY) {
- if (cciss_tag_contains_index(raw_tag))
- raw_tag = process_indexed_cmd(h, raw_tag);
- else
- raw_tag = process_nonindexed_cmd(h, raw_tag);
- }
- spin_unlock_irqrestore(&h->lock, flags);
- return IRQ_HANDLED;
-}
-
-/**
- * add_to_scan_list() - add controller to rescan queue
- * @h: Pointer to the controller.
- *
- * Adds the controller to the rescan queue if not already on the queue.
- *
- * returns 1 if added to the queue, 0 if skipped (could be on the
- * queue already, or the controller could be initializing or shutting
- * down).
- **/
-static int add_to_scan_list(struct ctlr_info *h)
-{
- struct ctlr_info *test_h;
- int found = 0;
- int ret = 0;
-
- if (h->busy_initializing)
- return 0;
-
- if (!mutex_trylock(&h->busy_shutting_down))
- return 0;
-
- mutex_lock(&scan_mutex);
- list_for_each_entry(test_h, &scan_q, scan_list) {
- if (test_h == h) {
- found = 1;
- break;
- }
- }
- if (!found && !h->busy_scanning) {
- reinit_completion(&h->scan_wait);
- list_add_tail(&h->scan_list, &scan_q);
- ret = 1;
- }
- mutex_unlock(&scan_mutex);
- mutex_unlock(&h->busy_shutting_down);
-
- return ret;
-}
-
-/**
- * remove_from_scan_list() - remove controller from rescan queue
- * @h: Pointer to the controller.
- *
- * Removes the controller from the rescan queue if present. Blocks if
- * the controller is currently conducting a rescan. The controller
- * can be in one of three states:
- * 1. Doesn't need a scan
- * 2. On the scan list, but not scanning yet (we remove it)
- * 3. Busy scanning (and not on the list). In this case we want to wait for
- * the scan to complete to make sure the scanning thread for this
- * controller is completely idle.
- **/
-static void remove_from_scan_list(struct ctlr_info *h)
-{
- struct ctlr_info *test_h, *tmp_h;
-
- mutex_lock(&scan_mutex);
- list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
- if (test_h == h) { /* state 2. */
- list_del(&h->scan_list);
- complete_all(&h->scan_wait);
- mutex_unlock(&scan_mutex);
- return;
- }
- }
- if (h->busy_scanning) { /* state 3. */
- mutex_unlock(&scan_mutex);
- wait_for_completion(&h->scan_wait);
- } else { /* state 1, nothing to do. */
- mutex_unlock(&scan_mutex);
- }
-}
-
-/**
- * scan_thread() - kernel thread used to rescan controllers
- * @data: Ignored.
- *
- * A kernel thread used scan for drive topology changes on
- * controllers. The thread processes only one controller at a time
- * using a queue. Controllers are added to the queue using
- * add_to_scan_list() and removed from the queue either after done
- * processing or using remove_from_scan_list().
- *
- * returns 0.
- **/
-static int scan_thread(void *data)
-{
- struct ctlr_info *h;
-
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- schedule();
- if (kthread_should_stop())
- break;
-
- while (1) {
- mutex_lock(&scan_mutex);
- if (list_empty(&scan_q)) {
- mutex_unlock(&scan_mutex);
- break;
- }
-
- h = list_entry(scan_q.next,
- struct ctlr_info,
- scan_list);
- list_del(&h->scan_list);
- h->busy_scanning = 1;
- mutex_unlock(&scan_mutex);
-
- rebuild_lun_table(h, 0, 0);
- complete_all(&h->scan_wait);
- mutex_lock(&scan_mutex);
- h->busy_scanning = 0;
- mutex_unlock(&scan_mutex);
- }
- }
-
- return 0;
-}
-
-static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
-{
- if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
- return 0;
-
- switch (c->err_info->SenseInfo[12]) {
- case STATE_CHANGED:
- dev_warn(&h->pdev->dev, "a state change "
- "detected, command retried\n");
- return 1;
- break;
- case LUN_FAILED:
- dev_warn(&h->pdev->dev, "LUN failure "
- "detected, action required\n");
- return 1;
- break;
- case REPORT_LUNS_CHANGED:
- dev_warn(&h->pdev->dev, "report LUN data changed\n");
- /*
- * Here, we could call add_to_scan_list and wake up the scan thread,
- * except that it's quite likely that we will get more than one
- * REPORT_LUNS_CHANGED condition in quick succession, which means
- * that those which occur after the first one will likely happen
- * *during* the scan_thread's rescan. And the rescan code is not
- * robust enough to restart in the middle, undoing what it has already
- * done, and it's not clear that it's even possible to do this, since
- * part of what it does is notify the block layer, which starts
- * doing it's own i/o to read partition tables and so on, and the
- * driver doesn't have visibility to know what might need undoing.
- * In any event, if possible, it is horribly complicated to get right
- * so we just don't do it for now.
- *
- * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
- */
- return 1;
- break;
- case POWER_OR_RESET:
- dev_warn(&h->pdev->dev,
- "a power on or device reset detected\n");
- return 1;
- break;
- case UNIT_ATTENTION_CLEARED:
- dev_warn(&h->pdev->dev,
- "unit attention cleared by another initiator\n");
- return 1;
- break;
- default:
- dev_warn(&h->pdev->dev, "unknown unit attention detected\n");
- return 1;
- }
-}
-
-/*
- * We cannot read the structure directly, for portability we must use
- * the io functions.
- * This is for debug only.
- */
-static void print_cfg_table(ctlr_info_t *h)
-{
- int i;
- char temp_name[17];
- CfgTable_struct *tb = h->cfgtable;
-
- dev_dbg(&h->pdev->dev, "Controller Configuration information\n");
- dev_dbg(&h->pdev->dev, "------------------------------------\n");
- for (i = 0; i < 4; i++)
- temp_name[i] = readb(&(tb->Signature[i]));
- temp_name[4] = '\0';
- dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name);
- dev_dbg(&h->pdev->dev, " Spec Number = %d\n",
- readl(&(tb->SpecValence)));
- dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n",
- readl(&(tb->TransportSupport)));
- dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n",
- readl(&(tb->TransportActive)));
- dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n",
- readl(&(tb->HostWrite.TransportRequest)));
- dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n",
- readl(&(tb->HostWrite.CoalIntDelay)));
- dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n",
- readl(&(tb->HostWrite.CoalIntCount)));
- dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%x\n",
- readl(&(tb->CmdsOutMax)));
- dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n",
- readl(&(tb->BusTypes)));
- for (i = 0; i < 16; i++)
- temp_name[i] = readb(&(tb->ServerName[i]));
- temp_name[16] = '\0';
- dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name);
- dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n",
- readl(&(tb->HeartBeat)));
-}
-
-static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
-{
- int i, offset, mem_type, bar_type;
- if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
- return 0;
- offset = 0;
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
- if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
- offset += 4;
- else {
- mem_type = pci_resource_flags(pdev, i) &
- PCI_BASE_ADDRESS_MEM_TYPE_MASK;
- switch (mem_type) {
- case PCI_BASE_ADDRESS_MEM_TYPE_32:
- case PCI_BASE_ADDRESS_MEM_TYPE_1M:
- offset += 4; /* 32 bit */
- break;
- case PCI_BASE_ADDRESS_MEM_TYPE_64:
- offset += 8;
- break;
- default: /* reserved in PCI 2.2 */
- dev_warn(&pdev->dev,
- "Base address is invalid\n");
- return -1;
- break;
- }
- }
- if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
- return i + 1;
- }
- return -1;
-}
-
-/* Fill in bucket_map[], given nsgs (the max number of
- * scatter gather elements supported) and bucket[],
- * which is an array of 8 integers. The bucket[] array
- * contains 8 different DMA transfer sizes (in 16
- * byte increments) which the controller uses to fetch
- * commands. This function fills in bucket_map[], which
- * maps a given number of scatter gather elements to one of
- * the 8 DMA transfer sizes. The point of it is to allow the
- * controller to only do as much DMA as needed to fetch the
- * command, with the DMA transfer size encoded in the lower
- * bits of the command address.
- */
-static void calc_bucket_map(int bucket[], int num_buckets,
- int nsgs, int *bucket_map)
-{
- int i, j, b, size;
-
- /* even a command with 0 SGs requires 4 blocks */
-#define MINIMUM_TRANSFER_BLOCKS 4
-#define NUM_BUCKETS 8
- /* Note, bucket_map must have nsgs+1 entries. */
- for (i = 0; i <= nsgs; i++) {
- /* Compute size of a command with i SG entries */
- size = i + MINIMUM_TRANSFER_BLOCKS;
- b = num_buckets; /* Assume the biggest bucket */
- /* Find the bucket that is just big enough */
- for (j = 0; j < 8; j++) {
- if (bucket[j] >= size) {
- b = j;
- break;
- }
- }
- /* for a command with i SG entries, use bucket b. */
- bucket_map[i] = b;
- }
-}
-
-static void cciss_wait_for_mode_change_ack(ctlr_info_t *h)
-{
- int i;
-
- /* under certain very rare conditions, this can take awhile.
- * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
- * as we enter this code.) */
- for (i = 0; i < MAX_CONFIG_WAIT; i++) {
- if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
- break;
- usleep_range(10000, 20000);
- }
-}
-
-static void cciss_enter_performant_mode(ctlr_info_t *h, u32 use_short_tags)
-{
- /* This is a bit complicated. There are 8 registers on
- * the controller which we write to to tell it 8 different
- * sizes of commands which there may be. It's a way of
- * reducing the DMA done to fetch each command. Encoded into
- * each command's tag are 3 bits which communicate to the controller
- * which of the eight sizes that command fits within. The size of
- * each command depends on how many scatter gather entries there are.
- * Each SG entry requires 16 bytes. The eight registers are programmed
- * with the number of 16-byte blocks a command of that size requires.
- * The smallest command possible requires 5 such 16 byte blocks.
- * the largest command possible requires MAXSGENTRIES + 4 16-byte
- * blocks. Note, this only extends to the SG entries contained
- * within the command block, and does not extend to chained blocks
- * of SG elements. bft[] contains the eight values we write to
- * the registers. They are not evenly distributed, but have more
- * sizes for small commands, and fewer sizes for larger commands.
- */
- __u32 trans_offset;
- int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4};
- /*
- * 5 = 1 s/g entry or 4k
- * 6 = 2 s/g entry or 8k
- * 8 = 4 s/g entry or 16k
- * 10 = 6 s/g entry or 24k
- */
- unsigned long register_value;
- BUILD_BUG_ON(28 > MAXSGENTRIES + 4);
-
- h->reply_pool_wraparound = 1; /* spec: init to 1 */
-
- /* Controller spec: zero out this buffer. */
- memset(h->reply_pool, 0, h->max_commands * sizeof(__u64));
- h->reply_pool_head = h->reply_pool;
-
- trans_offset = readl(&(h->cfgtable->TransMethodOffset));
- calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries,
- h->blockFetchTable);
- writel(bft[0], &h->transtable->BlockFetch0);
- writel(bft[1], &h->transtable->BlockFetch1);
- writel(bft[2], &h->transtable->BlockFetch2);
- writel(bft[3], &h->transtable->BlockFetch3);
- writel(bft[4], &h->transtable->BlockFetch4);
- writel(bft[5], &h->transtable->BlockFetch5);
- writel(bft[6], &h->transtable->BlockFetch6);
- writel(bft[7], &h->transtable->BlockFetch7);
-
- /* size of controller ring buffer */
- writel(h->max_commands, &h->transtable->RepQSize);
- writel(1, &h->transtable->RepQCount);
- writel(0, &h->transtable->RepQCtrAddrLow32);
- writel(0, &h->transtable->RepQCtrAddrHigh32);
- writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
- writel(0, &h->transtable->RepQAddr0High32);
- writel(CFGTBL_Trans_Performant | use_short_tags,
- &(h->cfgtable->HostWrite.TransportRequest));
-
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- cciss_wait_for_mode_change_ack(h);
- register_value = readl(&(h->cfgtable->TransportActive));
- if (!(register_value & CFGTBL_Trans_Performant))
- dev_warn(&h->pdev->dev, "cciss: unable to get board into"
- " performant mode\n");
-}
-
-static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
-{
- __u32 trans_support;
-
- if (cciss_simple_mode)
- return;
-
- dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n");
- /* Attempt to put controller into performant mode if supported */
- /* Does board support performant mode? */
- trans_support = readl(&(h->cfgtable->TransportSupport));
- if (!(trans_support & PERFORMANT_MODE))
- return;
-
- dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n");
- /* Performant mode demands commands on a 32 byte boundary
- * pci_alloc_consistent aligns on page boundarys already.
- * Just need to check if divisible by 32
- */
- if ((sizeof(CommandList_struct) % 32) != 0) {
- dev_warn(&h->pdev->dev, "%s %d %s\n",
- "cciss info: command size[",
- (int)sizeof(CommandList_struct),
- "] not divisible by 32, no performant mode..\n");
- return;
- }
-
- /* Performant mode ring buffer and supporting data structures */
- h->reply_pool = (__u64 *)pci_alloc_consistent(
- h->pdev, h->max_commands * sizeof(__u64),
- &(h->reply_pool_dhandle));
-
- /* Need a block fetch table for performant mode */
- h->blockFetchTable = kmalloc(((h->maxsgentries+1) *
- sizeof(__u32)), GFP_KERNEL);
-
- if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL))
- goto clean_up;
-
- cciss_enter_performant_mode(h,
- trans_support & CFGTBL_Trans_use_short_tags);
-
- /* Change the access methods to the performant access methods */
- h->access = SA5_performant_access;
- h->transMethod = CFGTBL_Trans_Performant;
-
- return;
-clean_up:
- kfree(h->blockFetchTable);
- if (h->reply_pool)
- pci_free_consistent(h->pdev,
- h->max_commands * sizeof(__u64),
- h->reply_pool,
- h->reply_pool_dhandle);
- return;
-
-} /* cciss_put_controller_into_performant_mode */
-
-/* If MSI/MSI-X is supported by the kernel we will try to enable it on
- * controllers that are capable. If not, we use IO-APIC mode.
- */
-
-static void cciss_interrupt_mode(ctlr_info_t *h)
-{
- int ret;
-
- /* Some boards advertise MSI but don't really support it */
- if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
- (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
- goto default_int_mode;
-
- ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX);
- if (ret >= 0) {
- h->intr[0] = pci_irq_vector(h->pdev, 0);
- h->intr[1] = pci_irq_vector(h->pdev, 1);
- h->intr[2] = pci_irq_vector(h->pdev, 2);
- h->intr[3] = pci_irq_vector(h->pdev, 3);
- return;
- }
-
- ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI);
-
-default_int_mode:
- /* if we get here we're going to use the default interrupt mode */
- h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0);
- return;
-}
-
-static int cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
-{
- int i;
- u32 subsystem_vendor_id, subsystem_device_id;
-
- subsystem_vendor_id = pdev->subsystem_vendor;
- subsystem_device_id = pdev->subsystem_device;
- *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
- subsystem_vendor_id;
-
- for (i = 0; i < ARRAY_SIZE(products); i++) {
- /* Stand aside for hpsa driver on request */
- if (cciss_allow_hpsa)
- return -ENODEV;
- if (*board_id == products[i].board_id)
- return i;
- }
- dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
- *board_id);
- return -ENODEV;
-}
-
-static inline bool cciss_board_disabled(ctlr_info_t *h)
-{
- u16 command;
-
- (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command);
- return ((command & PCI_COMMAND_MEMORY) == 0);
-}
-
-static int cciss_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar)
-{
- int i;
-
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
- if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
- /* addressing mode bits already removed */
- *memory_bar = pci_resource_start(pdev, i);
- dev_dbg(&pdev->dev, "memory BAR = %lx\n",
- *memory_bar);
- return 0;
- }
- dev_warn(&pdev->dev, "no memory BAR found\n");
- return -ENODEV;
-}
-
-static int cciss_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready)
-#define BOARD_READY 1
-#define BOARD_NOT_READY 0
-{
- int i, iterations;
- u32 scratchpad;
-
- if (wait_for_ready)
- iterations = CCISS_BOARD_READY_ITERATIONS;
- else
- iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
-
- for (i = 0; i < iterations; i++) {
- scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
- if (wait_for_ready) {
- if (scratchpad == CCISS_FIRMWARE_READY)
- return 0;
- } else {
- if (scratchpad != CCISS_FIRMWARE_READY)
- return 0;
- }
- msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
- }
- dev_warn(&pdev->dev, "board not ready, timed out.\n");
- return -ENODEV;
-}
-
-static int cciss_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
- u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset)
-{
- *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
- *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
- *cfg_base_addr &= (u32) 0x0000ffff;
- *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
- if (*cfg_base_addr_index == -1) {
- dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, "
- "*cfg_base_addr = 0x%08x\n", *cfg_base_addr);
- return -ENODEV;
- }
- return 0;
-}
-
-static int cciss_find_cfgtables(ctlr_info_t *h)
-{
- u64 cfg_offset;
- u32 cfg_base_addr;
- u64 cfg_base_addr_index;
- u32 trans_offset;
- int rc;
-
- rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
- if (rc)
- return rc;
- h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
- cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
- if (!h->cfgtable)
- return -ENOMEM;
- rc = write_driver_ver_to_cfgtable(h->cfgtable);
- if (rc)
- return rc;
- /* Find performant mode table. */
- trans_offset = readl(&h->cfgtable->TransMethodOffset);
- h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
- cfg_base_addr_index)+cfg_offset+trans_offset,
- sizeof(*h->transtable));
- if (!h->transtable)
- return -ENOMEM;
- return 0;
-}
-
-static void cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
-{
- h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
-
- /* Limit commands in memory limited kdump scenario. */
- if (reset_devices && h->max_commands > 32)
- h->max_commands = 32;
-
- if (h->max_commands < 16) {
- dev_warn(&h->pdev->dev, "Controller reports "
- "max supported commands of %d, an obvious lie. "
- "Using 16. Ensure that firmware is up to date.\n",
- h->max_commands);
- h->max_commands = 16;
- }
-}
-
-/* Interrogate the hardware for some limits:
- * max commands, max SG elements without chaining, and with chaining,
- * SG chain block size, etc.
- */
-static void cciss_find_board_params(ctlr_info_t *h)
-{
- cciss_get_max_perf_mode_cmds(h);
- h->nr_cmds = h->max_commands - 4 - cciss_tape_cmds;
- h->maxsgentries = readl(&(h->cfgtable->MaxSGElements));
- /*
- * The P600 may exhibit poor performnace under some workloads
- * if we use the value in the configuration table. Limit this
- * controller to MAXSGENTRIES (32) instead.
- */
- if (h->board_id == 0x3225103C)
- h->maxsgentries = MAXSGENTRIES;
- /*
- * Limit in-command s/g elements to 32 save dma'able memory.
- * Howvever spec says if 0, use 31
- */
- h->max_cmd_sgentries = 31;
- if (h->maxsgentries > 512) {
- h->max_cmd_sgentries = 32;
- h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1;
- h->maxsgentries--; /* save one for chain pointer */
- } else {
- h->maxsgentries = 31; /* default to traditional values */
- h->chainsize = 0;
- }
-}
-
-static inline bool CISS_signature_present(ctlr_info_t *h)
-{
- if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
- dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
- return false;
- }
- return true;
-}
-
-/* Need to enable prefetch in the SCSI core for 6400 in x86 */
-static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h)
-{
-#ifdef CONFIG_X86
- u32 prefetch;
-
- prefetch = readl(&(h->cfgtable->SCSI_Prefetch));
- prefetch |= 0x100;
- writel(prefetch, &(h->cfgtable->SCSI_Prefetch));
-#endif
-}
-
-/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
- * in a prefetch beyond physical memory.
- */
-static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h)
-{
- u32 dma_prefetch;
- __u32 dma_refetch;
-
- if (h->board_id != 0x3225103C)
- return;
- dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
- dma_prefetch |= 0x8000;
- writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
- pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch);
- dma_refetch |= 0x1;
- pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch);
-}
-
-static int cciss_pci_init(ctlr_info_t *h)
-{
- int prod_index, err;
-
- prod_index = cciss_lookup_board_id(h->pdev, &h->board_id);
- if (prod_index < 0)
- return -ENODEV;
- h->product_name = products[prod_index].product_name;
- h->access = *(products[prod_index].access);
-
- if (cciss_board_disabled(h)) {
- dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
- return -ENODEV;
- }
-
- pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
- PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
-
- err = pci_enable_device(h->pdev);
- if (err) {
- dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n");
- return err;
- }
-
- err = pci_request_regions(h->pdev, "cciss");
- if (err) {
- dev_warn(&h->pdev->dev,
- "Cannot obtain PCI resources, aborting\n");
- return err;
- }
-
- dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq);
- dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id);
-
-/* If the kernel supports MSI/MSI-X we will try to enable that functionality,
- * else we use the IO-APIC interrupt assigned to us by system ROM.
- */
- cciss_interrupt_mode(h);
- err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr);
- if (err)
- goto err_out_free_res;
- h->vaddr = remap_pci_mem(h->paddr, 0x250);
- if (!h->vaddr) {
- err = -ENOMEM;
- goto err_out_free_res;
- }
- err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
- if (err)
- goto err_out_free_res;
- err = cciss_find_cfgtables(h);
- if (err)
- goto err_out_free_res;
- print_cfg_table(h);
- cciss_find_board_params(h);
-
- if (!CISS_signature_present(h)) {
- err = -ENODEV;
- goto err_out_free_res;
- }
- cciss_enable_scsi_prefetch(h);
- cciss_p600_dma_prefetch_quirk(h);
- err = cciss_enter_simple_mode(h);
- if (err)
- goto err_out_free_res;
- cciss_put_controller_into_performant_mode(h);
- return 0;
-
-err_out_free_res:
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
- if (h->transtable)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
- if (h->vaddr)
- iounmap(h->vaddr);
- pci_release_regions(h->pdev);
- return err;
-}
-
-/* Function to find the first free pointer into our hba[] array
- * Returns -1 if no free entries are left.
- */
-static int alloc_cciss_hba(struct pci_dev *pdev)
-{
- int i;
-
- for (i = 0; i < MAX_CTLR; i++) {
- if (!hba[i]) {
- ctlr_info_t *h;
-
- h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL);
- if (!h)
- goto Enomem;
- hba[i] = h;
- return i;
- }
- }
- dev_warn(&pdev->dev, "This driver supports a maximum"
- " of %d controllers.\n", MAX_CTLR);
- return -1;
-Enomem:
- dev_warn(&pdev->dev, "out of memory.\n");
- return -1;
-}
-
-static void free_hba(ctlr_info_t *h)
-{
- int i;
-
- hba[h->ctlr] = NULL;
- for (i = 0; i < h->highest_lun + 1; i++)
- if (h->gendisk[i] != NULL)
- put_disk(h->gendisk[i]);
- kfree(h);
-}
-
-/* Send a message CDB to the firmware. */
-static int cciss_message(struct pci_dev *pdev, unsigned char opcode,
- unsigned char type)
-{
- typedef struct {
- CommandListHeader_struct CommandHeader;
- RequestBlock_struct Request;
- ErrDescriptor_struct ErrorDescriptor;
- } Command;
- static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct);
- Command *cmd;
- dma_addr_t paddr64;
- uint32_t paddr32, tag;
- void __iomem *vaddr;
- int i, err;
-
- vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
- if (vaddr == NULL)
- return -ENOMEM;
-
- /* The Inbound Post Queue only accepts 32-bit physical addresses for the
- CCISS commands, so they must be allocated from the lower 4GiB of
- memory. */
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- iounmap(vaddr);
- return -ENOMEM;
- }
-
- cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
- if (cmd == NULL) {
- iounmap(vaddr);
- return -ENOMEM;
- }
-
- /* This must fit, because of the 32-bit consistent DMA mask. Also,
- although there's no guarantee, we assume that the address is at
- least 4-byte aligned (most likely, it's page-aligned). */
- paddr32 = paddr64;
-
- cmd->CommandHeader.ReplyQueue = 0;
- cmd->CommandHeader.SGList = 0;
- cmd->CommandHeader.SGTotal = 0;
- cmd->CommandHeader.Tag.lower = paddr32;
- cmd->CommandHeader.Tag.upper = 0;
- memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
-
- cmd->Request.CDBLen = 16;
- cmd->Request.Type.Type = TYPE_MSG;
- cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
- cmd->Request.Type.Direction = XFER_NONE;
- cmd->Request.Timeout = 0; /* Don't time out */
- cmd->Request.CDB[0] = opcode;
- cmd->Request.CDB[1] = type;
- memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */
-
- cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command);
- cmd->ErrorDescriptor.Addr.upper = 0;
- cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct);
-
- writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
-
- for (i = 0; i < 10; i++) {
- tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
- if ((tag & ~3) == paddr32)
- break;
- msleep(CCISS_POST_RESET_NOOP_TIMEOUT_MSECS);
- }
-
- iounmap(vaddr);
-
- /* we leak the DMA buffer here ... no choice since the controller could
- still complete the command. */
- if (i == 10) {
- dev_err(&pdev->dev,
- "controller message %02x:%02x timed out\n",
- opcode, type);
- return -ETIMEDOUT;
- }
-
- pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
-
- if (tag & 2) {
- dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
- opcode, type);
- return -EIO;
- }
-
- dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
- opcode, type);
- return 0;
-}
-
-#define cciss_noop(p) cciss_message(p, 3, 0)
-
-static int cciss_controller_hard_reset(struct pci_dev *pdev,
- void * __iomem vaddr, u32 use_doorbell)
-{
- u16 pmcsr;
- int pos;
-
- if (use_doorbell) {
- /* For everything after the P600, the PCI power state method
- * of resetting the controller doesn't work, so we have this
- * other way using the doorbell register.
- */
- dev_info(&pdev->dev, "using doorbell to reset controller\n");
- writel(use_doorbell, vaddr + SA5_DOORBELL);
- } else { /* Try to do it the PCI power state way */
-
- /* Quoting from the Open CISS Specification: "The Power
- * Management Control/Status Register (CSR) controls the power
- * state of the device. The normal operating state is D0,
- * CSR=00h. The software off state is D3, CSR=03h. To reset
- * the controller, place the interface device in D3 then to D0,
- * this causes a secondary PCI reset which will reset the
- * controller." */
-
- pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (pos == 0) {
- dev_err(&pdev->dev,
- "cciss_controller_hard_reset: "
- "PCI PM not supported\n");
- return -ENODEV;
- }
- dev_info(&pdev->dev, "using PCI PM to reset controller\n");
- /* enter the D3hot power management state */
- pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D3hot;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
-
- msleep(500);
-
- /* enter the D0 power management state */
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
- pmcsr |= PCI_D0;
- pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
-
- /*
- * The P600 requires a small delay when changing states.
- * Otherwise we may think the board did not reset and we bail.
- * This for kdump only and is particular to the P600.
- */
- msleep(500);
- }
- return 0;
-}
-
-static void init_driver_version(char *driver_version, int len)
-{
- memset(driver_version, 0, len);
- strncpy(driver_version, "cciss " DRIVER_NAME, len - 1);
-}
-
-static int write_driver_ver_to_cfgtable(CfgTable_struct __iomem *cfgtable)
-{
- char *driver_version;
- int i, size = sizeof(cfgtable->driver_version);
-
- driver_version = kmalloc(size, GFP_KERNEL);
- if (!driver_version)
- return -ENOMEM;
-
- init_driver_version(driver_version, size);
- for (i = 0; i < size; i++)
- writeb(driver_version[i], &cfgtable->driver_version[i]);
- kfree(driver_version);
- return 0;
-}
-
-static void read_driver_ver_from_cfgtable(CfgTable_struct __iomem *cfgtable,
- unsigned char *driver_ver)
-{
- int i;
-
- for (i = 0; i < sizeof(cfgtable->driver_version); i++)
- driver_ver[i] = readb(&cfgtable->driver_version[i]);
-}
-
-static int controller_reset_failed(CfgTable_struct __iomem *cfgtable)
-{
-
- char *driver_ver, *old_driver_ver;
- int rc, size = sizeof(cfgtable->driver_version);
-
- old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
- if (!old_driver_ver)
- return -ENOMEM;
- driver_ver = old_driver_ver + size;
-
- /* After a reset, the 32 bytes of "driver version" in the cfgtable
- * should have been changed, otherwise we know the reset failed.
- */
- init_driver_version(old_driver_ver, size);
- read_driver_ver_from_cfgtable(cfgtable, driver_ver);
- rc = !memcmp(driver_ver, old_driver_ver, size);
- kfree(old_driver_ver);
- return rc;
-}
-
-/* This does a hard reset of the controller using PCI power management
- * states or using the doorbell register. */
-static int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
-{
- u64 cfg_offset;
- u32 cfg_base_addr;
- u64 cfg_base_addr_index;
- void __iomem *vaddr;
- unsigned long paddr;
- u32 misc_fw_support;
- int rc;
- CfgTable_struct __iomem *cfgtable;
- u32 use_doorbell;
- u32 board_id;
- u16 command_register;
-
- /* For controllers as old a the p600, this is very nearly
- * the same thing as
- *
- * pci_save_state(pci_dev);
- * pci_set_power_state(pci_dev, PCI_D3hot);
- * pci_set_power_state(pci_dev, PCI_D0);
- * pci_restore_state(pci_dev);
- *
- * For controllers newer than the P600, the pci power state
- * method of resetting doesn't work so we have another way
- * using the doorbell register.
- */
-
- /* Exclude 640x boards. These are two pci devices in one slot
- * which share a battery backed cache module. One controls the
- * cache, the other accesses the cache through the one that controls
- * it. If we reset the one controlling the cache, the other will
- * likely not be happy. Just forbid resetting this conjoined mess.
- */
- cciss_lookup_board_id(pdev, &board_id);
- if (!ctlr_is_resettable(board_id)) {
- dev_warn(&pdev->dev, "Controller not resettable\n");
- return -ENODEV;
- }
-
- /* if controller is soft- but not hard resettable... */
- if (!ctlr_is_hard_resettable(board_id))
- return -ENOTSUPP; /* try soft reset later. */
-
- /* Save the PCI command register */
- pci_read_config_word(pdev, 4, &command_register);
- /* Turn the board off. This is so that later pci_restore_state()
- * won't turn the board on before the rest of config space is ready.
- */
- pci_disable_device(pdev);
- pci_save_state(pdev);
-
- /* find the first memory BAR, so we can find the cfg table */
- rc = cciss_pci_find_memory_BAR(pdev, &paddr);
- if (rc)
- return rc;
- vaddr = remap_pci_mem(paddr, 0x250);
- if (!vaddr)
- return -ENOMEM;
-
- /* find cfgtable in order to check if reset via doorbell is supported */
- rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
- &cfg_base_addr_index, &cfg_offset);
- if (rc)
- goto unmap_vaddr;
- cfgtable = remap_pci_mem(pci_resource_start(pdev,
- cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
- if (!cfgtable) {
- rc = -ENOMEM;
- goto unmap_vaddr;
- }
- rc = write_driver_ver_to_cfgtable(cfgtable);
- if (rc)
- goto unmap_vaddr;
-
- /* If reset via doorbell register is supported, use that.
- * There are two such methods. Favor the newest method.
- */
- misc_fw_support = readl(&cfgtable->misc_fw_support);
- use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
- if (use_doorbell) {
- use_doorbell = DOORBELL_CTLR_RESET2;
- } else {
- use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
- if (use_doorbell) {
- dev_warn(&pdev->dev, "Controller claims that "
- "'Bit 2 doorbell reset' is "
- "supported, but not 'bit 5 doorbell reset'. "
- "Firmware update is recommended.\n");
- rc = -ENOTSUPP; /* use the soft reset */
- goto unmap_cfgtable;
- }
- }
-
- rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
- if (rc)
- goto unmap_cfgtable;
- pci_restore_state(pdev);
- rc = pci_enable_device(pdev);
- if (rc) {
- dev_warn(&pdev->dev, "failed to enable device.\n");
- goto unmap_cfgtable;
- }
- pci_write_config_word(pdev, 4, command_register);
-
- /* Some devices (notably the HP Smart Array 5i Controller)
- need a little pause here */
- msleep(CCISS_POST_RESET_PAUSE_MSECS);
-
- /* Wait for board to become not ready, then ready. */
- dev_info(&pdev->dev, "Waiting for board to reset.\n");
- rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
- if (rc) {
- dev_warn(&pdev->dev, "Failed waiting for board to hard reset."
- " Will try soft reset.\n");
- rc = -ENOTSUPP; /* Not expected, but try soft reset later */
- goto unmap_cfgtable;
- }
- rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
- if (rc) {
- dev_warn(&pdev->dev,
- "failed waiting for board to become ready "
- "after hard reset\n");
- goto unmap_cfgtable;
- }
-
- rc = controller_reset_failed(vaddr);
- if (rc < 0)
- goto unmap_cfgtable;
- if (rc) {
- dev_warn(&pdev->dev, "Unable to successfully hard reset "
- "controller. Will try soft reset.\n");
- rc = -ENOTSUPP; /* Not expected, but try soft reset later */
- } else {
- dev_info(&pdev->dev, "Board ready after hard reset.\n");
- }
-
-unmap_cfgtable:
- iounmap(cfgtable);
-
-unmap_vaddr:
- iounmap(vaddr);
- return rc;
-}
-
-static int cciss_init_reset_devices(struct pci_dev *pdev)
-{
- int rc, i;
-
- if (!reset_devices)
- return 0;
-
- /* Reset the controller with a PCI power-cycle or via doorbell */
- rc = cciss_kdump_hard_reset_controller(pdev);
-
- /* -ENOTSUPP here means we cannot reset the controller
- * but it's already (and still) up and running in
- * "performant mode". Or, it might be 640x, which can't reset
- * due to concerns about shared bbwc between 6402/6404 pair.
- */
- if (rc == -ENOTSUPP)
- return rc; /* just try to do the kdump anyhow. */
- if (rc)
- return -ENODEV;
-
- /* Now try to get the controller to respond to a no-op */
- dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
- for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
- if (cciss_noop(pdev) == 0)
- break;
- else
- dev_warn(&pdev->dev, "no-op failed%s\n",
- (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ?
- "; re-trying" : ""));
- msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS);
- }
- return 0;
-}
-
-static int cciss_allocate_cmd_pool(ctlr_info_t *h)
-{
- h->cmd_pool_bits = kmalloc(BITS_TO_LONGS(h->nr_cmds) *
- sizeof(unsigned long), GFP_KERNEL);
- h->cmd_pool = pci_alloc_consistent(h->pdev,
- h->nr_cmds * sizeof(CommandList_struct),
- &(h->cmd_pool_dhandle));
- h->errinfo_pool = pci_alloc_consistent(h->pdev,
- h->nr_cmds * sizeof(ErrorInfo_struct),
- &(h->errinfo_pool_dhandle));
- if ((h->cmd_pool_bits == NULL)
- || (h->cmd_pool == NULL)
- || (h->errinfo_pool == NULL)) {
- dev_err(&h->pdev->dev, "out of memory");
- return -ENOMEM;
- }
- return 0;
-}
-
-static int cciss_allocate_scatterlists(ctlr_info_t *h)
-{
- int i;
-
- /* zero it, so that on free we need not know how many were alloc'ed */
- h->scatter_list = kzalloc(h->max_commands *
- sizeof(struct scatterlist *), GFP_KERNEL);
- if (!h->scatter_list)
- return -ENOMEM;
-
- for (i = 0; i < h->nr_cmds; i++) {
- h->scatter_list[i] = kmalloc(sizeof(struct scatterlist) *
- h->maxsgentries, GFP_KERNEL);
- if (h->scatter_list[i] == NULL) {
- dev_err(&h->pdev->dev, "could not allocate "
- "s/g lists\n");
- return -ENOMEM;
- }
- }
- return 0;
-}
-
-static void cciss_free_scatterlists(ctlr_info_t *h)
-{
- int i;
-
- if (h->scatter_list) {
- for (i = 0; i < h->nr_cmds; i++)
- kfree(h->scatter_list[i]);
- kfree(h->scatter_list);
- }
-}
-
-static void cciss_free_cmd_pool(ctlr_info_t *h)
-{
- kfree(h->cmd_pool_bits);
- if (h->cmd_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(CommandList_struct),
- h->cmd_pool, h->cmd_pool_dhandle);
- if (h->errinfo_pool)
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(ErrorInfo_struct),
- h->errinfo_pool, h->errinfo_pool_dhandle);
-}
-
-static int cciss_request_irq(ctlr_info_t *h,
- irqreturn_t (*msixhandler)(int, void *),
- irqreturn_t (*intxhandler)(int, void *))
-{
- if (h->pdev->msi_enabled || h->pdev->msix_enabled) {
- if (!request_irq(h->intr[h->intr_mode], msixhandler,
- 0, h->devname, h))
- return 0;
- dev_err(&h->pdev->dev, "Unable to get msi irq %d"
- " for %s\n", h->intr[h->intr_mode],
- h->devname);
- return -1;
- }
-
- if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_SHARED, h->devname, h))
- return 0;
- dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
- h->intr[h->intr_mode], h->devname);
- return -1;
-}
-
-static int cciss_kdump_soft_reset(ctlr_info_t *h)
-{
- if (cciss_send_reset(h, CTLR_LUNID, CCISS_RESET_TYPE_CONTROLLER)) {
- dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
- return -EIO;
- }
-
- dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
- if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
- dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
- return -1;
- }
-
- dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
- if (cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
- dev_warn(&h->pdev->dev, "Board failed to become ready "
- "after soft reset.\n");
- return -1;
- }
-
- return 0;
-}
-
-static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
-{
- int ctlr = h->ctlr;
-
- free_irq(h->intr[h->intr_mode], h);
- pci_free_irq_vectors(h->pdev);
- cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- cciss_free_scatterlists(h);
- cciss_free_cmd_pool(h);
- kfree(h->blockFetchTable);
- if (h->reply_pool)
- pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
- h->reply_pool, h->reply_pool_dhandle);
- if (h->transtable)
- iounmap(h->transtable);
- if (h->cfgtable)
- iounmap(h->cfgtable);
- if (h->vaddr)
- iounmap(h->vaddr);
- unregister_blkdev(h->major, h->devname);
- cciss_destroy_hba_sysfs_entry(h);
- pci_release_regions(h->pdev);
- kfree(h);
- hba[ctlr] = NULL;
-}
-
-/*
- * This is it. Find all the controllers and register them. I really hate
- * stealing all these major device numbers.
- * returns the number of block devices registered.
- */
-static int cciss_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int i;
- int j = 0;
- int rc;
- int try_soft_reset = 0;
- int dac, return_code;
- InquiryData_struct *inq_buff;
- ctlr_info_t *h;
- unsigned long flags;
-
- /*
- * By default the cciss driver is used for all older HP Smart Array
- * controllers. There are module paramaters that allow a user to
- * override this behavior and instead use the hpsa SCSI driver. If
- * this is the case cciss may be loaded first from the kdump initrd
- * image and cause a kernel panic. So if reset_devices is true and
- * cciss_allow_hpsa is set just bail.
- */
- if ((reset_devices) && (cciss_allow_hpsa == 1))
- return -ENODEV;
- rc = cciss_init_reset_devices(pdev);
- if (rc) {
- if (rc != -ENOTSUPP)
- return rc;
- /* If the reset fails in a particular way (it has no way to do
- * a proper hard reset, so returns -ENOTSUPP) we can try to do
- * a soft reset once we get the controller configured up to the
- * point that it can accept a command.
- */
- try_soft_reset = 1;
- rc = 0;
- }
-
-reinit_after_soft_reset:
-
- i = alloc_cciss_hba(pdev);
- if (i < 0)
- return -ENOMEM;
-
- h = hba[i];
- h->pdev = pdev;
- h->busy_initializing = 1;
- h->intr_mode = cciss_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
- INIT_LIST_HEAD(&h->cmpQ);
- INIT_LIST_HEAD(&h->reqQ);
- mutex_init(&h->busy_shutting_down);
-
- if (cciss_pci_init(h) != 0)
- goto clean_no_release_regions;
-
- sprintf(h->devname, "cciss%d", i);
- h->ctlr = i;
-
- if (cciss_tape_cmds < 2)
- cciss_tape_cmds = 2;
- if (cciss_tape_cmds > 16)
- cciss_tape_cmds = 16;
-
- init_completion(&h->scan_wait);
-
- if (cciss_create_hba_sysfs_entry(h))
- goto clean0;
-
- /* configure PCI DMA stuff */
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- dac = 1;
- else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
- dac = 0;
- else {
- dev_err(&h->pdev->dev, "no suitable DMA available\n");
- goto clean1;
- }
-
- /*
- * register with the major number, or get a dynamic major number
- * by passing 0 as argument. This is done for greater than
- * 8 controller support.
- */
- if (i < MAX_CTLR_ORIG)
- h->major = COMPAQ_CISS_MAJOR + i;
- rc = register_blkdev(h->major, h->devname);
- if (rc == -EBUSY || rc == -EINVAL) {
- dev_err(&h->pdev->dev,
- "Unable to get major number %d for %s "
- "on hba %d\n", h->major, h->devname, i);
- goto clean1;
- } else {
- if (i >= MAX_CTLR_ORIG)
- h->major = rc;
- }
-
- /* make sure the board interrupts are off */
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
- rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
- if (rc)
- goto clean2;
-
- dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n",
- h->devname, pdev->device, pci_name(pdev),
- h->intr[h->intr_mode], dac ? "" : " not");
-
- if (cciss_allocate_cmd_pool(h))
- goto clean4;
-
- if (cciss_allocate_scatterlists(h))
- goto clean4;
-
- h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
- h->chainsize, h->nr_cmds);
- if (!h->cmd_sg_list && h->chainsize > 0)
- goto clean4;
-
- spin_lock_init(&h->lock);
-
- /* Initialize the pdev driver private data.
- have it point to h. */
- pci_set_drvdata(pdev, h);
- /* command and error info recs zeroed out before
- they are used */
- bitmap_zero(h->cmd_pool_bits, h->nr_cmds);
-
- h->num_luns = 0;
- h->highest_lun = -1;
- for (j = 0; j < CISS_MAX_LUN; j++) {
- h->drv[j] = NULL;
- h->gendisk[j] = NULL;
- }
-
- /* At this point, the controller is ready to take commands.
- * Now, if reset_devices and the hard reset didn't work, try
- * the soft reset and see if that works.
- */
- if (try_soft_reset) {
-
- /* This is kind of gross. We may or may not get a completion
- * from the soft reset command, and if we do, then the value
- * from the fifo may or may not be valid. So, we wait 10 secs
- * after the reset throwing away any completions we get during
- * that time. Unregister the interrupt handler and register
- * fake ones to scoop up any residual completions.
- */
- spin_lock_irqsave(&h->lock, flags);
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
- spin_unlock_irqrestore(&h->lock, flags);
- free_irq(h->intr[h->intr_mode], h);
- rc = cciss_request_irq(h, cciss_msix_discard_completions,
- cciss_intx_discard_completions);
- if (rc) {
- dev_warn(&h->pdev->dev, "Failed to request_irq after "
- "soft reset.\n");
- goto clean4;
- }
-
- rc = cciss_kdump_soft_reset(h);
- if (rc) {
- dev_warn(&h->pdev->dev, "Soft reset failed.\n");
- goto clean4;
- }
-
- dev_info(&h->pdev->dev, "Board READY.\n");
- dev_info(&h->pdev->dev,
- "Waiting for stale completions to drain.\n");
- h->access.set_intr_mask(h, CCISS_INTR_ON);
- msleep(10000);
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
-
- rc = controller_reset_failed(h->cfgtable);
- if (rc)
- dev_info(&h->pdev->dev,
- "Soft reset appears to have failed.\n");
-
- /* since the controller's reset, we have to go back and re-init
- * everything. Easiest to just forget what we've done and do it
- * all over again.
- */
- cciss_undo_allocations_after_kdump_soft_reset(h);
- try_soft_reset = 0;
- if (rc)
- /* don't go to clean4, we already unallocated */
- return -ENODEV;
-
- goto reinit_after_soft_reset;
- }
-
- cciss_scsi_setup(h);
-
- /* Turn the interrupts on so we can service requests */
- h->access.set_intr_mask(h, CCISS_INTR_ON);
-
- /* Get the firmware version */
- inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
- if (inq_buff == NULL) {
- dev_err(&h->pdev->dev, "out of memory\n");
- goto clean4;
- }
-
- return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff,
- sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD);
- if (return_code == IO_OK) {
- h->firm_ver[0] = inq_buff->data_byte[32];
- h->firm_ver[1] = inq_buff->data_byte[33];
- h->firm_ver[2] = inq_buff->data_byte[34];
- h->firm_ver[3] = inq_buff->data_byte[35];
- } else { /* send command failed */
- dev_warn(&h->pdev->dev, "unable to determine firmware"
- " version of controller\n");
- }
- kfree(inq_buff);
-
- cciss_procinit(h);
-
- h->cciss_max_sectors = 8192;
-
- rebuild_lun_table(h, 1, 0);
- cciss_engage_scsi(h);
- h->busy_initializing = 0;
- return 0;
-
-clean4:
- cciss_free_cmd_pool(h);
- cciss_free_scatterlists(h);
- cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- free_irq(h->intr[h->intr_mode], h);
-clean2:
- unregister_blkdev(h->major, h->devname);
-clean1:
- cciss_destroy_hba_sysfs_entry(h);
-clean0:
- pci_release_regions(pdev);
-clean_no_release_regions:
- h->busy_initializing = 0;
-
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
- pci_set_drvdata(pdev, NULL);
- free_hba(h);
- return -ENODEV;
-}
-
-static void cciss_shutdown(struct pci_dev *pdev)
-{
- ctlr_info_t *h;
- char *flush_buf;
- int return_code;
-
- h = pci_get_drvdata(pdev);
- flush_buf = kzalloc(4, GFP_KERNEL);
- if (!flush_buf) {
- dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n");
- return;
- }
- /* write all data in the battery backed cache to disk */
- return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
- 4, 0, CTLR_LUNID, TYPE_CMD);
- kfree(flush_buf);
- if (return_code != IO_OK)
- dev_warn(&h->pdev->dev, "Error flushing cache\n");
- h->access.set_intr_mask(h, CCISS_INTR_OFF);
- free_irq(h->intr[h->intr_mode], h);
-}
-
-static int cciss_enter_simple_mode(struct ctlr_info *h)
-{
- u32 trans_support;
-
- trans_support = readl(&(h->cfgtable->TransportSupport));
- if (!(trans_support & SIMPLE_MODE))
- return -ENOTSUPP;
-
- h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
- writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
- writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
- cciss_wait_for_mode_change_ack(h);
- print_cfg_table(h);
- if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) {
- dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
- return -ENODEV;
- }
- h->transMethod = CFGTBL_Trans_Simple;
- return 0;
-}
-
-
-static void cciss_remove_one(struct pci_dev *pdev)
-{
- ctlr_info_t *h;
- int i, j;
-
- if (pci_get_drvdata(pdev) == NULL) {
- dev_err(&pdev->dev, "Unable to remove device\n");
- return;
- }
-
- h = pci_get_drvdata(pdev);
- i = h->ctlr;
- if (hba[i] == NULL) {
- dev_err(&pdev->dev, "device appears to already be removed\n");
- return;
- }
-
- mutex_lock(&h->busy_shutting_down);
-
- remove_from_scan_list(h);
- remove_proc_entry(h->devname, proc_cciss);
- unregister_blkdev(h->major, h->devname);
-
- /* remove it from the disk list */
- for (j = 0; j < CISS_MAX_LUN; j++) {
- struct gendisk *disk = h->gendisk[j];
- if (disk) {
- struct request_queue *q = disk->queue;
-
- if (disk->flags & GENHD_FL_UP) {
- cciss_destroy_ld_sysfs_entry(h, j, 1);
- del_gendisk(disk);
- }
- if (q)
- blk_cleanup_queue(q);
- }
- }
-
-#ifdef CONFIG_CISS_SCSI_TAPE
- cciss_unregister_scsi(h); /* unhook from SCSI subsystem */
-#endif
-
- cciss_shutdown(pdev);
-
- pci_free_irq_vectors(h->pdev);
-
- iounmap(h->transtable);
- iounmap(h->cfgtable);
- iounmap(h->vaddr);
-
- cciss_free_cmd_pool(h);
- /* Free up sg elements */
- for (j = 0; j < h->nr_cmds; j++)
- kfree(h->scatter_list[j]);
- kfree(h->scatter_list);
- cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
- kfree(h->blockFetchTable);
- if (h->reply_pool)
- pci_free_consistent(h->pdev, h->max_commands * sizeof(__u64),
- h->reply_pool, h->reply_pool_dhandle);
- /*
- * Deliberately omit pci_disable_device(): it does something nasty to
- * Smart Array controllers that pci_enable_device does not undo
- */
- pci_release_regions(pdev);
- pci_set_drvdata(pdev, NULL);
- cciss_destroy_hba_sysfs_entry(h);
- mutex_unlock(&h->busy_shutting_down);
- free_hba(h);
-}
-
-static struct pci_driver cciss_pci_driver = {
- .name = "cciss",
- .probe = cciss_init_one,
- .remove = cciss_remove_one,
- .id_table = cciss_pci_device_id, /* id_table */
- .shutdown = cciss_shutdown,
-};
-
-/*
- * This is it. Register the PCI driver information for the cards we control
- * the OS will call our registered routines when it finds one of our cards.
- */
-static int __init cciss_init(void)
-{
- int err;
-
- /*
- * The hardware requires that commands are aligned on a 64-bit
- * boundary. Given that we use pci_alloc_consistent() to allocate an
- * array of them, the size must be a multiple of 8 bytes.
- */
- BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT);
- printk(KERN_INFO DRIVER_NAME "\n");
-
- err = bus_register(&cciss_bus_type);
- if (err)
- return err;
-
- /* Start the scan thread */
- cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
- if (IS_ERR(cciss_scan_thread)) {
- err = PTR_ERR(cciss_scan_thread);
- goto err_bus_unregister;
- }
-
- /* Register for our PCI devices */
- err = pci_register_driver(&cciss_pci_driver);
- if (err)
- goto err_thread_stop;
-
- return err;
-
-err_thread_stop:
- kthread_stop(cciss_scan_thread);
-err_bus_unregister:
- bus_unregister(&cciss_bus_type);
-
- return err;
-}
-
-static void __exit cciss_cleanup(void)
-{
- int i;
-
- pci_unregister_driver(&cciss_pci_driver);
- /* double check that all controller entrys have been removed */
- for (i = 0; i < MAX_CTLR; i++) {
- if (hba[i] != NULL) {
- dev_warn(&hba[i]->pdev->dev,
- "had to remove controller\n");
- cciss_remove_one(hba[i]->pdev);
- }
- }
- kthread_stop(cciss_scan_thread);
- if (proc_cciss)
- remove_proc_entry("driver/cciss", NULL);
- bus_unregister(&cciss_bus_type);
-}
-
-module_init(cciss_init);
-module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
deleted file mode 100644
index 24b5fd75501a..000000000000
--- a/drivers/block/cciss.h
+++ /dev/null
@@ -1,433 +0,0 @@
-#ifndef CCISS_H
-#define CCISS_H
-
-#include <linux/genhd.h>
-#include <linux/mutex.h>
-
-#include "cciss_cmd.h"
-
-
-#define NWD_SHIFT 4
-#define MAX_PART (1 << NWD_SHIFT)
-
-#define IO_OK 0
-#define IO_ERROR 1
-#define IO_NEEDS_RETRY 3
-
-#define VENDOR_LEN 8
-#define MODEL_LEN 16
-#define REV_LEN 4
-
-struct ctlr_info;
-typedef struct ctlr_info ctlr_info_t;
-
-struct access_method {
- void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
- void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
- unsigned long (*fifo_full)(ctlr_info_t *h);
- bool (*intr_pending)(ctlr_info_t *h);
- unsigned long (*command_completed)(ctlr_info_t *h);
-};
-typedef struct _drive_info_struct
-{
- unsigned char LunID[8];
- int usage_count;
- struct request_queue *queue;
- sector_t nr_blocks;
- int block_size;
- int heads;
- int sectors;
- int cylinders;
- int raid_level; /* set to -1 to indicate that
- * the drive is not in use/configured
- */
- int busy_configuring; /* This is set when a drive is being removed
- * to prevent it from being opened or it's
- * queue from being started.
- */
- struct device dev;
- __u8 serial_no[16]; /* from inquiry page 0x83,
- * not necc. null terminated.
- */
- char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
- char model[MODEL_LEN + 1]; /* SCSI model string */
- char rev[REV_LEN + 1]; /* SCSI revision string */
- char device_initialized; /* indicates whether dev is initialized */
-} drive_info_struct;
-
-struct ctlr_info
-{
- int ctlr;
- char devname[8];
- char *product_name;
- char firm_ver[4]; /* Firmware version */
- struct pci_dev *pdev;
- __u32 board_id;
- void __iomem *vaddr;
- unsigned long paddr;
- int nr_cmds; /* Number of commands allowed on this controller */
- CfgTable_struct __iomem *cfgtable;
- int interrupts_enabled;
- int major;
- int max_commands;
- int commands_outstanding;
- int max_outstanding; /* Debug */
- int num_luns;
- int highest_lun;
- int usage_count; /* number of opens all all minor devices */
- /* Need space for temp sg list
- * number of scatter/gathers supported
- * number of scatter/gathers in chained block
- */
- struct scatterlist **scatter_list;
- int maxsgentries;
- int chainsize;
- int max_cmd_sgentries;
- SGDescriptor_struct **cmd_sg_list;
-
-# define PERF_MODE_INT 0
-# define DOORBELL_INT 1
-# define SIMPLE_MODE_INT 2
-# define MEMQ_MODE_INT 3
- unsigned int intr[4];
- int intr_mode;
- int cciss_max_sectors;
- BYTE cciss_read;
- BYTE cciss_write;
- BYTE cciss_read_capacity;
-
- /* information about each logical volume */
- drive_info_struct *drv[CISS_MAX_LUN];
-
- struct access_method access;
-
- /* queue and queue Info */
- struct list_head reqQ;
- struct list_head cmpQ;
- unsigned int Qdepth;
- unsigned int maxQsinceinit;
- unsigned int maxSG;
- spinlock_t lock;
-
- /* pointers to command and error info pool */
- CommandList_struct *cmd_pool;
- dma_addr_t cmd_pool_dhandle;
- ErrorInfo_struct *errinfo_pool;
- dma_addr_t errinfo_pool_dhandle;
- unsigned long *cmd_pool_bits;
- int nr_allocs;
- int nr_frees;
- int busy_configuring;
- int busy_initializing;
- int busy_scanning;
- struct mutex busy_shutting_down;
-
- /* This element holds the zero based queue number of the last
- * queue to be started. It is used for fairness.
- */
- int next_to_run;
-
- /* Disk structures we need to pass back */
- struct gendisk *gendisk[CISS_MAX_LUN];
-#ifdef CONFIG_CISS_SCSI_TAPE
- struct cciss_scsi_adapter_data_t *scsi_ctlr;
-#endif
- unsigned char alive;
- struct list_head scan_list;
- struct completion scan_wait;
- struct device dev;
- /*
- * Performant mode tables.
- */
- u32 trans_support;
- u32 trans_offset;
- struct TransTable_struct *transtable;
- unsigned long transMethod;
-
- /*
- * Performant mode completion buffer
- */
- u64 *reply_pool;
- dma_addr_t reply_pool_dhandle;
- u64 *reply_pool_head;
- size_t reply_pool_size;
- unsigned char reply_pool_wraparound;
- u32 *blockFetchTable;
-};
-
-/* Defining the diffent access_methods
- *
- * Memory mapped FIFO interface (SMART 53xx cards)
- */
-#define SA5_DOORBELL 0x20
-#define SA5_REQUEST_PORT_OFFSET 0x40
-#define SA5_REPLY_INTR_MASK_OFFSET 0x34
-#define SA5_REPLY_PORT_OFFSET 0x44
-#define SA5_INTR_STATUS 0x30
-#define SA5_SCRATCHPAD_OFFSET 0xB0
-
-#define SA5_CTCFG_OFFSET 0xB4
-#define SA5_CTMEM_OFFSET 0xB8
-
-#define SA5_INTR_OFF 0x08
-#define SA5B_INTR_OFF 0x04
-#define SA5_INTR_PENDING 0x08
-#define SA5B_INTR_PENDING 0x04
-#define FIFO_EMPTY 0xffffffff
-#define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */
-/* Perf. mode flags */
-#define SA5_PERF_INTR_PENDING 0x04
-#define SA5_PERF_INTR_OFF 0x05
-#define SA5_OUTDB_STATUS_PERF_BIT 0x01
-#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
-#define SA5_OUTDB_CLEAR 0xA0
-#define SA5_OUTDB_CLEAR_PERF_BIT 0x01
-#define SA5_OUTDB_STATUS 0x9C
-
-
-#define CISS_ERROR_BIT 0x02
-
-#define CCISS_INTR_ON 1
-#define CCISS_INTR_OFF 0
-
-
-/* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board
- * to become ready, in seconds, before giving up on it.
- * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait
- * between polling the board to see if it is ready, in
- * milliseconds. CCISS_BOARD_READY_ITERATIONS is derived
- * the above.
- */
-#define CCISS_BOARD_READY_WAIT_SECS (120)
-#define CCISS_BOARD_NOT_READY_WAIT_SECS (100)
-#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
-#define CCISS_BOARD_READY_ITERATIONS \
- ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
- CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
-#define CCISS_BOARD_NOT_READY_ITERATIONS \
- ((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
- CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
-#define CCISS_POST_RESET_PAUSE_MSECS (3000)
-#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (4000)
-#define CCISS_POST_RESET_NOOP_RETRIES (12)
-#define CCISS_POST_RESET_NOOP_TIMEOUT_MSECS (10000)
-
-/*
- Send the command to the hardware
-*/
-static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c)
-{
-#ifdef CCISS_DEBUG
- printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n",
- h->ctlr, c->busaddr);
-#endif /* CCISS_DEBUG */
- writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
- readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
- h->commands_outstanding++;
- if ( h->commands_outstanding > h->max_outstanding)
- h->max_outstanding = h->commands_outstanding;
-}
-
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x08 turns them off...
- */
-static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- h->interrupts_enabled = 1;
- writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- h->interrupts_enabled = 0;
- writel( SA5_INTR_OFF,
- h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- }
-}
-/*
- * This card is the opposite of the other cards.
- * 0 turns interrupts on...
- * 0x04 turns them off...
- */
-static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val)
- { /* Turn interrupts on */
- h->interrupts_enabled = 1;
- writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- } else /* Turn them off */
- {
- h->interrupts_enabled = 0;
- writel( SA5B_INTR_OFF,
- h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/* Performant mode intr_mask */
-static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val)
-{
- if (val) { /* turn on interrupts */
- h->interrupts_enabled = 1;
- writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- } else {
- h->interrupts_enabled = 0;
- writel(SA5_PERF_INTR_OFF,
- h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- (void) readl(h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
- }
-}
-
-/*
- * Returns true if fifo is full.
- *
- */
-static unsigned long SA5_fifo_full(ctlr_info_t *h)
-{
- if( h->commands_outstanding >= h->max_commands)
- return(1);
- else
- return(0);
-
-}
-/*
- * returns value read from hardware.
- * returns FIFO_EMPTY if there is nothing to read
- */
-static unsigned long SA5_completed(ctlr_info_t *h)
-{
- unsigned long register_value
- = readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
- if(register_value != FIFO_EMPTY)
- {
- h->commands_outstanding--;
-#ifdef CCISS_DEBUG
- printk("cciss: Read %lx back from board\n", register_value);
-#endif /* CCISS_DEBUG */
- }
-#ifdef CCISS_DEBUG
- else
- {
- printk("cciss: FIFO Empty read\n");
- }
-#endif
- return ( register_value);
-
-}
-
-/* Performant mode command completed */
-static unsigned long SA5_performant_completed(ctlr_info_t *h)
-{
- unsigned long register_value = FIFO_EMPTY;
-
- /* flush the controller write of the reply queue by reading
- * outbound doorbell status register.
- */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
- /* msi auto clears the interrupt pending bit. */
- if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) {
- writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
- /* Do a read in order to flush the write to the controller
- * (as per spec.)
- */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
- }
-
- if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
- register_value = *(h->reply_pool_head);
- (h->reply_pool_head)++;
- h->commands_outstanding--;
- } else {
- register_value = FIFO_EMPTY;
- }
- /* Check for wraparound */
- if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
- h->reply_pool_head = h->reply_pool;
- h->reply_pool_wraparound ^= 1;
- }
-
- return register_value;
-}
-/*
- * Returns true if an interrupt is pending..
- */
-static bool SA5_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef CCISS_DEBUG
- printk("cciss: intr_pending %lx\n", register_value);
-#endif /* CCISS_DEBUG */
- if( register_value & SA5_INTR_PENDING)
- return 1;
- return 0 ;
-}
-
-/*
- * Returns true if an interrupt is pending..
- */
-static bool SA5B_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value =
- readl(h->vaddr + SA5_INTR_STATUS);
-#ifdef CCISS_DEBUG
- printk("cciss: intr_pending %lx\n", register_value);
-#endif /* CCISS_DEBUG */
- if( register_value & SA5B_INTR_PENDING)
- return 1;
- return 0 ;
-}
-
-static bool SA5_performant_intr_pending(ctlr_info_t *h)
-{
- unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS);
-
- if (!register_value)
- return false;
-
- if (h->pdev->msi_enabled || h->pdev->msix_enabled)
- return true;
-
- /* Read outbound doorbell to flush */
- register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
- return register_value & SA5_OUTDB_STATUS_PERF_BIT;
-}
-
-static struct access_method SA5_access = {
- .submit_command = SA5_submit_command,
- .set_intr_mask = SA5_intr_mask,
- .fifo_full = SA5_fifo_full,
- .intr_pending = SA5_intr_pending,
- .command_completed = SA5_completed,
-};
-
-static struct access_method SA5B_access = {
- .submit_command = SA5_submit_command,
- .set_intr_mask = SA5B_intr_mask,
- .fifo_full = SA5_fifo_full,
- .intr_pending = SA5B_intr_pending,
- .command_completed = SA5_completed,
-};
-
-static struct access_method SA5_performant_access = {
- .submit_command = SA5_submit_command,
- .set_intr_mask = SA5_performant_intr_mask,
- .fifo_full = SA5_fifo_full,
- .intr_pending = SA5_performant_intr_pending,
- .command_completed = SA5_performant_completed,
-};
-
-struct board_type {
- __u32 board_id;
- char *product_name;
- struct access_method *access;
- int nr_cmds; /* Max cmds this kind of ctlr can handle. */
-};
-
-#endif /* CCISS_H */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
deleted file mode 100644
index d9be6b4d49a6..000000000000
--- a/drivers/block/cciss_cmd.h
+++ /dev/null
@@ -1,269 +0,0 @@
-#ifndef CCISS_CMD_H
-#define CCISS_CMD_H
-
-#include <linux/cciss_defs.h>
-
-/* DEFINES */
-#define CISS_VERSION "1.00"
-
-/* general boundary definitions */
-#define MAXSGENTRIES 32
-#define CCISS_SG_CHAIN 0x80000000
-#define MAXREPLYQS 256
-
-/* Unit Attentions ASC's as defined for the MSA2012sa */
-#define POWER_OR_RESET 0x29
-#define STATE_CHANGED 0x2a
-#define UNIT_ATTENTION_CLEARED 0x2f
-#define LUN_FAILED 0x3e
-#define REPORT_LUNS_CHANGED 0x3f
-
-/* Unit Attentions ASCQ's as defined for the MSA2012sa */
-
- /* These ASCQ's defined for ASC = POWER_OR_RESET */
-#define POWER_ON_RESET 0x00
-#define POWER_ON_REBOOT 0x01
-#define SCSI_BUS_RESET 0x02
-#define MSA_TARGET_RESET 0x03
-#define CONTROLLER_FAILOVER 0x04
-#define TRANSCEIVER_SE 0x05
-#define TRANSCEIVER_LVD 0x06
-
- /* These ASCQ's defined for ASC = STATE_CHANGED */
-#define RESERVATION_PREEMPTED 0x03
-#define ASYM_ACCESS_CHANGED 0x06
-#define LUN_CAPACITY_CHANGED 0x09
-
-/* config space register offsets */
-#define CFG_VENDORID 0x00
-#define CFG_DEVICEID 0x02
-#define CFG_I2OBAR 0x10
-#define CFG_MEM1BAR 0x14
-
-/* i2o space register offsets */
-#define I2O_IBDB_SET 0x20
-#define I2O_IBDB_CLEAR 0x70
-#define I2O_INT_STATUS 0x30
-#define I2O_INT_MASK 0x34
-#define I2O_IBPOST_Q 0x40
-#define I2O_OBPOST_Q 0x44
-#define I2O_DMA1_CFG 0x214
-
-/* Configuration Table */
-#define CFGTBL_ChangeReq 0x00000001l
-#define CFGTBL_AccCmds 0x00000001l
-#define DOORBELL_CTLR_RESET 0x00000004l
-#define DOORBELL_CTLR_RESET2 0x00000020l
-
-#define CFGTBL_Trans_Simple 0x00000002l
-#define CFGTBL_Trans_Performant 0x00000004l
-#define CFGTBL_Trans_use_short_tags 0x20000000l
-
-#define CFGTBL_BusType_Ultra2 0x00000001l
-#define CFGTBL_BusType_Ultra3 0x00000002l
-#define CFGTBL_BusType_Fibre1G 0x00000100l
-#define CFGTBL_BusType_Fibre2G 0x00000200l
-typedef struct _vals32
-{
- __u32 lower;
- __u32 upper;
-} vals32;
-
-typedef union _u64bit
-{
- vals32 val32;
- __u64 val;
-} u64bit;
-
-/* Type defs used in the following structs */
-#define QWORD vals32
-
-/* STRUCTURES */
-#define CISS_MAX_PHYS_LUN 1024
-/* SCSI-3 Cmmands */
-
-#pragma pack(1)
-
-#define CISS_INQUIRY 0x12
-/* Date returned */
-typedef struct _InquiryData_struct
-{
- BYTE data_byte[36];
-} InquiryData_struct;
-
-#define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */
-#define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */
-/* Data returned */
-typedef struct _ReportLUNdata_struct
-{
- BYTE LUNListLength[4];
- DWORD reserved;
- BYTE LUN[CISS_MAX_LUN][8];
-} ReportLunData_struct;
-
-#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */
-typedef struct _ReadCapdata_struct
-{
- BYTE total_size[4]; /* Total size in blocks */
- BYTE block_size[4]; /* Size of blocks in bytes */
-} ReadCapdata_struct;
-
-#define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */
-
-/* service action to differentiate a 16 byte read capacity from
- other commands that use the 0x9e SCSI op code */
-
-#define CCISS_READ_CAPACITY_16_SERVICE_ACT 0x10
-
-typedef struct _ReadCapdata_struct_16
-{
- BYTE total_size[8]; /* Total size in blocks */
- BYTE block_size[4]; /* Size of blocks in bytes */
- BYTE prot_en:1; /* protection enable bit */
- BYTE rto_en:1; /* reference tag own enable bit */
- BYTE reserved:6; /* reserved bits */
- BYTE reserved2[18]; /* reserved bytes per spec */
-} ReadCapdata_struct_16;
-
-/* Define the supported read/write commands for cciss based controllers */
-
-#define CCISS_READ_10 0x28 /* Read(10) */
-#define CCISS_WRITE_10 0x2a /* Write(10) */
-#define CCISS_READ_16 0x88 /* Read(16) */
-#define CCISS_WRITE_16 0x8a /* Write(16) */
-
-/* Define the CDB lengths supported by cciss based controllers */
-
-#define CDB_LEN10 10
-#define CDB_LEN16 16
-
-/* BMIC commands */
-#define BMIC_READ 0x26
-#define BMIC_WRITE 0x27
-#define BMIC_CACHE_FLUSH 0xc2
-#define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */
-
-#define CCISS_ABORT_MSG 0x00
-#define CCISS_RESET_MSG 0x01
-#define CCISS_RESET_TYPE_CONTROLLER 0x00
-#define CCISS_RESET_TYPE_BUS 0x01
-#define CCISS_RESET_TYPE_TARGET 0x03
-#define CCISS_RESET_TYPE_LUN 0x04
-#define CCISS_NOOP_MSG 0x03
-
-/* Command List Structure */
-#define CTLR_LUNID "\0\0\0\0\0\0\0\0"
-
-typedef struct _CommandListHeader_struct {
- BYTE ReplyQueue;
- BYTE SGList;
- HWORD SGTotal;
- QWORD Tag;
- LUNAddr_struct LUN;
-} CommandListHeader_struct;
-typedef struct _ErrDescriptor_struct {
- QWORD Addr;
- DWORD Len;
-} ErrDescriptor_struct;
-typedef struct _SGDescriptor_struct {
- QWORD Addr;
- DWORD Len;
- DWORD Ext;
-} SGDescriptor_struct;
-
-/* Command types */
-#define CMD_RWREQ 0x00
-#define CMD_IOCTL_PEND 0x01
-#define CMD_SCSI 0x03
-#define CMD_MSG_DONE 0x04
-#define CMD_MSG_TIMEOUT 0x05
-#define CMD_MSG_STALE 0xff
-
-/* This structure needs to be divisible by COMMANDLIST_ALIGNMENT
- * because low bits of the address are used to to indicate that
- * whether the tag contains an index or an address. PAD_32 and
- * PAD_64 can be adjusted independently as needed for 32-bit
- * and 64-bits systems.
- */
-#define COMMANDLIST_ALIGNMENT (32)
-#define IS_64_BIT ((sizeof(long) - 4)/4)
-#define IS_32_BIT (!IS_64_BIT)
-#define PAD_32 (0)
-#define PAD_64 (4)
-#define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
-#define DIRECT_LOOKUP_BIT 0x10
-#define DIRECT_LOOKUP_SHIFT 5
-
-typedef struct _CommandList_struct {
- CommandListHeader_struct Header;
- RequestBlock_struct Request;
- ErrDescriptor_struct ErrDesc;
- SGDescriptor_struct SG[MAXSGENTRIES];
- /* information associated with the command */
- __u32 busaddr; /* physical address of this record */
- ErrorInfo_struct * err_info; /* pointer to the allocated mem */
- int ctlr;
- int cmd_type;
- long cmdindex;
- struct list_head list;
- struct request * rq;
- struct completion *waiting;
- int retry_count;
- void * scsi_cmd;
- char pad[PADSIZE];
-} CommandList_struct;
-
-/* Configuration Table Structure */
-typedef struct _HostWrite_struct {
- DWORD TransportRequest;
- DWORD Reserved;
- DWORD CoalIntDelay;
- DWORD CoalIntCount;
-} HostWrite_struct;
-
-typedef struct _CfgTable_struct {
- BYTE Signature[4];
- DWORD SpecValence;
-#define SIMPLE_MODE 0x02
-#define PERFORMANT_MODE 0x04
-#define MEMQ_MODE 0x08
- DWORD TransportSupport;
- DWORD TransportActive;
- HostWrite_struct HostWrite;
- DWORD CmdsOutMax;
- DWORD BusTypes;
- DWORD TransMethodOffset;
- BYTE ServerName[16];
- DWORD HeartBeat;
- DWORD SCSI_Prefetch;
- DWORD MaxSGElements;
- DWORD MaxLogicalUnits;
- DWORD MaxPhysicalDrives;
- DWORD MaxPhysicalDrivesPerLogicalUnit;
- DWORD MaxPerformantModeCommands;
- u8 reserved[0x78 - 0x58];
- u32 misc_fw_support; /* offset 0x78 */
-#define MISC_FW_DOORBELL_RESET (0x02)
-#define MISC_FW_DOORBELL_RESET2 (0x10)
- u8 driver_version[32];
-} CfgTable_struct;
-
-struct TransTable_struct {
- u32 BlockFetch0;
- u32 BlockFetch1;
- u32 BlockFetch2;
- u32 BlockFetch3;
- u32 BlockFetch4;
- u32 BlockFetch5;
- u32 BlockFetch6;
- u32 BlockFetch7;
- u32 RepQSize;
- u32 RepQCount;
- u32 RepQCtrAddrLow32;
- u32 RepQCtrAddrHigh32;
- u32 RepQAddr0Low32;
- u32 RepQAddr0High32;
-};
-
-#pragma pack()
-#endif /* CCISS_CMD_H */
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
deleted file mode 100644
index 01a1f7e24978..000000000000
--- a/drivers/block/cciss_scsi.c
+++ /dev/null
@@ -1,1653 +0,0 @@
-/*
- * Disk Array driver for HP Smart Array controllers, SCSI Tape module.
- * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
- * 02111-1307, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- * Author: Stephen M. Cameron
- */
-#ifdef CONFIG_CISS_SCSI_TAPE
-
-/* Here we have code to present the driver as a scsi driver
- as it is simultaneously presented as a block driver. The
- reason for doing this is to allow access to SCSI tape drives
- through the array controller. Note in particular, neither
- physical nor logical disks are presented through the scsi layer. */
-
-#include <linux/timer.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <linux/atomic.h>
-
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_device.h>
-#include <scsi/scsi_host.h>
-
-#include "cciss_scsi.h"
-
-#define CCISS_ABORT_MSG 0x00
-#define CCISS_RESET_MSG 0x01
-
-static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
- size_t size,
- __u8 page_code, unsigned char *scsi3addr,
- int cmd_type);
-
-static CommandList_struct *cmd_alloc(ctlr_info_t *h);
-static CommandList_struct *cmd_special_alloc(ctlr_info_t *h);
-static void cmd_free(ctlr_info_t *h, CommandList_struct *c);
-static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c);
-
-static int cciss_scsi_write_info(struct Scsi_Host *sh,
- char *buffer, /* data buffer */
- int length); /* length of data in buffer */
-static int cciss_scsi_show_info(struct seq_file *m,
- struct Scsi_Host *sh);
-
-static int cciss_scsi_queue_command (struct Scsi_Host *h,
- struct scsi_cmnd *cmd);
-static int cciss_eh_device_reset_handler(struct scsi_cmnd *);
-static int cciss_eh_abort_handler(struct scsi_cmnd *);
-
-static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
- { .name = "cciss0", .ndevices = 0 },
- { .name = "cciss1", .ndevices = 0 },
- { .name = "cciss2", .ndevices = 0 },
- { .name = "cciss3", .ndevices = 0 },
- { .name = "cciss4", .ndevices = 0 },
- { .name = "cciss5", .ndevices = 0 },
- { .name = "cciss6", .ndevices = 0 },
- { .name = "cciss7", .ndevices = 0 },
-};
-
-static struct scsi_host_template cciss_driver_template = {
- .module = THIS_MODULE,
- .name = "cciss",
- .proc_name = "cciss",
- .write_info = cciss_scsi_write_info,
- .show_info = cciss_scsi_show_info,
- .queuecommand = cciss_scsi_queue_command,
- .this_id = 7,
- .use_clustering = DISABLE_CLUSTERING,
- /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */
- .eh_device_reset_handler= cciss_eh_device_reset_handler,
- .eh_abort_handler = cciss_eh_abort_handler,
-};
-
-#pragma pack(1)
-
-#define SCSI_PAD_32 8
-#define SCSI_PAD_64 8
-
-struct cciss_scsi_cmd_stack_elem_t {
- CommandList_struct cmd;
- ErrorInfo_struct Err;
- __u32 busaddr;
- int cmdindex;
- u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64];
-};
-
-#pragma pack()
-
-#pragma pack(1)
-struct cciss_scsi_cmd_stack_t {
- struct cciss_scsi_cmd_stack_elem_t *pool;
- struct cciss_scsi_cmd_stack_elem_t **elem;
- dma_addr_t cmd_pool_handle;
- int top;
- int nelems;
-};
-#pragma pack()
-
-struct cciss_scsi_adapter_data_t {
- struct Scsi_Host *scsi_host;
- struct cciss_scsi_cmd_stack_t cmd_stack;
- SGDescriptor_struct **cmd_sg_list;
- int registered;
- spinlock_t lock; // to protect ccissscsi[ctlr];
-};
-
-#define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \
- &h->scsi_ctlr->lock, flags);
-#define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \
- &h->scsi_ctlr->lock, flags);
-
-static CommandList_struct *
-scsi_cmd_alloc(ctlr_info_t *h)
-{
- /* assume only one process in here at a time, locking done by caller. */
- /* use h->lock */
- /* might be better to rewrite how we allocate scsi commands in a way that */
- /* needs no locking at all. */
-
- /* take the top memory chunk off the stack and return it, if any. */
- struct cciss_scsi_cmd_stack_elem_t *c;
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- u64bit temp64;
-
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
-
- if (stk->top < 0)
- return NULL;
- c = stk->elem[stk->top];
- /* memset(c, 0, sizeof(*c)); */
- memset(&c->cmd, 0, sizeof(c->cmd));
- memset(&c->Err, 0, sizeof(c->Err));
- /* set physical addr of cmd and addr of scsi parameters */
- c->cmd.busaddr = c->busaddr;
- c->cmd.cmdindex = c->cmdindex;
- /* (__u32) (stk->cmd_pool_handle +
- (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
-
- temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
- /* (__u64) (stk->cmd_pool_handle +
- (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
- sizeof(CommandList_struct)); */
- stk->top--;
- c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
- c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
- c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
-
- c->cmd.ctlr = h->ctlr;
- c->cmd.err_info = &c->Err;
-
- return (CommandList_struct *) c;
-}
-
-static void
-scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c)
-{
- /* assume only one process in here at a time, locking done by caller. */
- /* use h->lock */
- /* drop the free memory chunk on top of the stack. */
-
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
-
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
- stk->top++;
- if (stk->top >= stk->nelems) {
- dev_err(&h->pdev->dev,
- "scsi_cmd_free called too many times.\n");
- BUG();
- }
- stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c;
-}
-
-static int
-scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa)
-{
- int i;
- struct cciss_scsi_cmd_stack_t *stk;
- size_t size;
-
- stk = &sa->cmd_stack;
- stk->nelems = cciss_tape_cmds + 2;
- sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h,
- h->chainsize, stk->nelems);
- if (!sa->cmd_sg_list && h->chainsize > 0)
- return -ENOMEM;
-
- size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
-
- /* Check alignment, see cciss_cmd.h near CommandList_struct def. */
- BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0);
- /* pci_alloc_consistent guarantees 32-bit DMA address will be used */
- stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
- pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle);
-
- if (stk->pool == NULL) {
- cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
- sa->cmd_sg_list = NULL;
- return -ENOMEM;
- }
- stk->elem = kmalloc(sizeof(stk->elem[0]) * stk->nelems, GFP_KERNEL);
- if (!stk->elem) {
- pci_free_consistent(h->pdev, size, stk->pool,
- stk->cmd_pool_handle);
- return -1;
- }
- for (i = 0; i < stk->nelems; i++) {
- stk->elem[i] = &stk->pool[i];
- stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle +
- (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
- stk->elem[i]->cmdindex = i;
- }
- stk->top = stk->nelems-1;
- return 0;
-}
-
-static void
-scsi_cmd_stack_free(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- size_t size;
-
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
- if (stk->top != stk->nelems-1) {
- dev_warn(&h->pdev->dev,
- "bug: %d scsi commands are still outstanding.\n",
- stk->nelems - stk->top);
- }
- size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * stk->nelems;
-
- pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle);
- stk->pool = NULL;
- cciss_free_sg_chain_blocks(sa->cmd_sg_list, stk->nelems);
- kfree(stk->elem);
- stk->elem = NULL;
-}
-
-#if 0
-static void
-print_cmd(CommandList_struct *cp)
-{
- printk("queue:%d\n", cp->Header.ReplyQueue);
- printk("sglist:%d\n", cp->Header.SGList);
- printk("sgtot:%d\n", cp->Header.SGTotal);
- printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper,
- cp->Header.Tag.lower);
- printk("LUN:0x%8phN\n", cp->Header.LUN.LunAddrBytes);
- printk("CDBLen:%d\n", cp->Request.CDBLen);
- printk("Type:%d\n",cp->Request.Type.Type);
- printk("Attr:%d\n",cp->Request.Type.Attribute);
- printk(" Dir:%d\n",cp->Request.Type.Direction);
- printk("Timeout:%d\n",cp->Request.Timeout);
- printk("CDB: %16ph\n", cp->Request.CDB);
- printk("edesc.Addr: 0x%08x/0%08x, Len = %d\n",
- cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower,
- cp->ErrDesc.Len);
- printk("sgs..........Errorinfo:\n");
- printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
- printk("senselen:%d\n", cp->err_info->SenseLen);
- printk("cmd status:%d\n", cp->err_info->CommandStatus);
- printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
- printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
- printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
- printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
-}
-#endif
-
-static int
-find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun)
-{
- /* finds an unused bus, target, lun for a new device */
- /* assumes h->scsi_ctlr->lock is held */
- int i, found=0;
- unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
-
- memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
-
- target_taken[SELF_SCSI_ID] = 1;
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++)
- target_taken[ccissscsi[h->ctlr].dev[i].target] = 1;
-
- for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) {
- if (!target_taken[i]) {
- *bus = 0; *target=i; *lun = 0; found=1;
- break;
- }
- }
- return (!found);
-}
-struct scsi2map {
- char scsi3addr[8];
- int bus, target, lun;
-};
-
-static int
-cciss_scsi_add_entry(ctlr_info_t *h, int hostno,
- struct cciss_scsi_dev_t *device,
- struct scsi2map *added, int *nadded)
-{
- /* assumes h->scsi_ctlr->lock is held */
- int n = ccissscsi[h->ctlr].ndevices;
- struct cciss_scsi_dev_t *sd;
- int i, bus, target, lun;
- unsigned char addr1[8], addr2[8];
-
- if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
- dev_warn(&h->pdev->dev, "Too many devices, "
- "some will be inaccessible.\n");
- return -1;
- }
-
- bus = target = -1;
- lun = 0;
- /* Is this device a non-zero lun of a multi-lun device */
- /* byte 4 of the 8-byte LUN addr will contain the logical unit no. */
- if (device->scsi3addr[4] != 0) {
- /* Search through our list and find the device which */
- /* has the same 8 byte LUN address, excepting byte 4. */
- /* Assign the same bus and target for this new LUN. */
- /* Use the logical unit number from the firmware. */
- memcpy(addr1, device->scsi3addr, 8);
- addr1[4] = 0;
- for (i = 0; i < n; i++) {
- sd = &ccissscsi[h->ctlr].dev[i];
- memcpy(addr2, sd->scsi3addr, 8);
- addr2[4] = 0;
- /* differ only in byte 4? */
- if (memcmp(addr1, addr2, 8) == 0) {
- bus = sd->bus;
- target = sd->target;
- lun = device->scsi3addr[4];
- break;
- }
- }
- }
-
- sd = &ccissscsi[h->ctlr].dev[n];
- if (lun == 0) {
- if (find_bus_target_lun(h,
- &sd->bus, &sd->target, &sd->lun) != 0)
- return -1;
- } else {
- sd->bus = bus;
- sd->target = target;
- sd->lun = lun;
- }
- added[*nadded].bus = sd->bus;
- added[*nadded].target = sd->target;
- added[*nadded].lun = sd->lun;
- (*nadded)++;
-
- memcpy(sd->scsi3addr, device->scsi3addr, 8);
- memcpy(sd->vendor, device->vendor, sizeof(sd->vendor));
- memcpy(sd->revision, device->revision, sizeof(sd->revision));
- memcpy(sd->device_id, device->device_id, sizeof(sd->device_id));
- sd->devtype = device->devtype;
-
- ccissscsi[h->ctlr].ndevices++;
-
- /* initially, (before registering with scsi layer) we don't
- know our hostno and we don't want to print anything first
- time anyway (the scsi layer's inquiries will show that info) */
- if (hostno != -1)
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
- scsi_device_type(sd->devtype), hostno,
- sd->bus, sd->target, sd->lun);
- return 0;
-}
-
-static void
-cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry,
- struct scsi2map *removed, int *nremoved)
-{
- /* assumes h->ctlr]->scsi_ctlr->lock is held */
- int i;
- struct cciss_scsi_dev_t sd;
-
- if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
- sd = ccissscsi[h->ctlr].dev[entry];
- removed[*nremoved].bus = sd.bus;
- removed[*nremoved].target = sd.target;
- removed[*nremoved].lun = sd.lun;
- (*nremoved)++;
- for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++)
- ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1];
- ccissscsi[h->ctlr].ndevices--;
- dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
- scsi_device_type(sd.devtype), hostno,
- sd.bus, sd.target, sd.lun);
-}
-
-
-#define SCSI3ADDR_EQ(a,b) ( \
- (a)[7] == (b)[7] && \
- (a)[6] == (b)[6] && \
- (a)[5] == (b)[5] && \
- (a)[4] == (b)[4] && \
- (a)[3] == (b)[3] && \
- (a)[2] == (b)[2] && \
- (a)[1] == (b)[1] && \
- (a)[0] == (b)[0])
-
-static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr)
-{
- /* called when scsi_add_device fails in order to re-adjust */
- /* ccissscsi[] to match the mid layer's view. */
- unsigned long flags;
- int i, j;
- CPQ_TAPE_LOCK(h, flags);
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- if (memcmp(scsi3addr,
- ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) {
- for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++)
- ccissscsi[h->ctlr].dev[j] =
- ccissscsi[h->ctlr].dev[j+1];
- ccissscsi[h->ctlr].ndevices--;
- break;
- }
- }
- CPQ_TAPE_UNLOCK(h, flags);
-}
-
-static int device_is_the_same(struct cciss_scsi_dev_t *dev1,
- struct cciss_scsi_dev_t *dev2)
-{
- return dev1->devtype == dev2->devtype &&
- memcmp(dev1->scsi3addr, dev2->scsi3addr,
- sizeof(dev1->scsi3addr)) == 0 &&
- memcmp(dev1->device_id, dev2->device_id,
- sizeof(dev1->device_id)) == 0 &&
- memcmp(dev1->vendor, dev2->vendor,
- sizeof(dev1->vendor)) == 0 &&
- memcmp(dev1->model, dev2->model,
- sizeof(dev1->model)) == 0 &&
- memcmp(dev1->revision, dev2->revision,
- sizeof(dev1->revision)) == 0;
-}
-
-static int
-adjust_cciss_scsi_table(ctlr_info_t *h, int hostno,
- struct cciss_scsi_dev_t sd[], int nsds)
-{
- /* sd contains scsi3 addresses and devtypes, but
- bus target and lun are not filled in. This funciton
- takes what's in sd to be the current and adjusts
- ccissscsi[] to be in line with what's in sd. */
-
- int i,j, found, changes=0;
- struct cciss_scsi_dev_t *csd;
- unsigned long flags;
- struct scsi2map *added, *removed;
- int nadded, nremoved;
- struct Scsi_Host *sh = NULL;
-
- added = kzalloc(sizeof(*added) * CCISS_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
- removed = kzalloc(sizeof(*removed) * CCISS_MAX_SCSI_DEVS_PER_HBA,
- GFP_KERNEL);
-
- if (!added || !removed) {
- dev_warn(&h->pdev->dev,
- "Out of memory in adjust_cciss_scsi_table\n");
- goto free_and_out;
- }
-
- CPQ_TAPE_LOCK(h, flags);
-
- if (hostno != -1) /* if it's not the first time... */
- sh = h->scsi_ctlr->scsi_host;
-
- /* find any devices in ccissscsi[] that are not in
- sd[] and remove them from ccissscsi[] */
-
- i = 0;
- nremoved = 0;
- nadded = 0;
- while (i < ccissscsi[h->ctlr].ndevices) {
- csd = &ccissscsi[h->ctlr].dev[i];
- found=0;
- for (j=0;j<nsds;j++) {
- if (SCSI3ADDR_EQ(sd[j].scsi3addr,
- csd->scsi3addr)) {
- if (device_is_the_same(&sd[j], csd))
- found=2;
- else
- found=1;
- break;
- }
- }
-
- if (found == 0) { /* device no longer present. */
- changes++;
- cciss_scsi_remove_entry(h, hostno, i,
- removed, &nremoved);
- /* remove ^^^, hence i not incremented */
- } else if (found == 1) { /* device is different in some way */
- changes++;
- dev_info(&h->pdev->dev,
- "device c%db%dt%dl%d has changed.\n",
- hostno, csd->bus, csd->target, csd->lun);
- cciss_scsi_remove_entry(h, hostno, i,
- removed, &nremoved);
- /* remove ^^^, hence i not incremented */
- if (cciss_scsi_add_entry(h, hostno, &sd[j],
- added, &nadded) != 0)
- /* we just removed one, so add can't fail. */
- BUG();
- csd->devtype = sd[j].devtype;
- memcpy(csd->device_id, sd[j].device_id,
- sizeof(csd->device_id));
- memcpy(csd->vendor, sd[j].vendor,
- sizeof(csd->vendor));
- memcpy(csd->model, sd[j].model,
- sizeof(csd->model));
- memcpy(csd->revision, sd[j].revision,
- sizeof(csd->revision));
- } else /* device is same as it ever was, */
- i++; /* so just move along. */
- }
-
- /* Now, make sure every device listed in sd[] is also
- listed in ccissscsi[], adding them if they aren't found */
-
- for (i=0;i<nsds;i++) {
- found=0;
- for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) {
- csd = &ccissscsi[h->ctlr].dev[j];
- if (SCSI3ADDR_EQ(sd[i].scsi3addr,
- csd->scsi3addr)) {
- if (device_is_the_same(&sd[i], csd))
- found=2; /* found device */
- else
- found=1; /* found a bug. */
- break;
- }
- }
- if (!found) {
- changes++;
- if (cciss_scsi_add_entry(h, hostno, &sd[i],
- added, &nadded) != 0)
- break;
- } else if (found == 1) {
- /* should never happen... */
- changes++;
- dev_warn(&h->pdev->dev,
- "device unexpectedly changed\n");
- /* but if it does happen, we just ignore that device */
- }
- }
- CPQ_TAPE_UNLOCK(h, flags);
-
- /* Don't notify scsi mid layer of any changes the first time through */
- /* (or if there are no changes) scsi_scan_host will do it later the */
- /* first time through. */
- if (hostno == -1 || !changes)
- goto free_and_out;
-
- /* Notify scsi mid layer of any removed devices */
- for (i = 0; i < nremoved; i++) {
- struct scsi_device *sdev =
- scsi_device_lookup(sh, removed[i].bus,
- removed[i].target, removed[i].lun);
- if (sdev != NULL) {
- scsi_remove_device(sdev);
- scsi_device_put(sdev);
- } else {
- /* We don't expect to get here. */
- /* future cmds to this device will get selection */
- /* timeout as if the device was gone. */
- dev_warn(&h->pdev->dev, "didn't find "
- "c%db%dt%dl%d\n for removal.",
- hostno, removed[i].bus,
- removed[i].target, removed[i].lun);
- }
- }
-
- /* Notify scsi mid layer of any added devices */
- for (i = 0; i < nadded; i++) {
- int rc;
- rc = scsi_add_device(sh, added[i].bus,
- added[i].target, added[i].lun);
- if (rc == 0)
- continue;
- dev_warn(&h->pdev->dev, "scsi_add_device "
- "c%db%dt%dl%d failed, device not added.\n",
- hostno, added[i].bus, added[i].target, added[i].lun);
- /* now we have to remove it from ccissscsi, */
- /* since it didn't get added to scsi mid layer */
- fixup_botched_add(h, added[i].scsi3addr);
- }
-
-free_and_out:
- kfree(added);
- kfree(removed);
- return 0;
-}
-
-static int
-lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr)
-{
- int i;
- struct cciss_scsi_dev_t *sd;
- unsigned long flags;
-
- CPQ_TAPE_LOCK(h, flags);
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- sd = &ccissscsi[h->ctlr].dev[i];
- if (sd->bus == bus &&
- sd->target == target &&
- sd->lun == lun) {
- memcpy(scsi3addr, &sd->scsi3addr[0], 8);
- CPQ_TAPE_UNLOCK(h, flags);
- return 0;
- }
- }
- CPQ_TAPE_UNLOCK(h, flags);
- return -1;
-}
-
-static void
-cciss_scsi_setup(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t * shba;
-
- ccissscsi[h->ctlr].ndevices = 0;
- shba = kmalloc(sizeof(*shba), GFP_KERNEL);
- if (shba == NULL)
- return;
- shba->scsi_host = NULL;
- spin_lock_init(&shba->lock);
- shba->registered = 0;
- if (scsi_cmd_stack_setup(h, shba) != 0) {
- kfree(shba);
- shba = NULL;
- }
- h->scsi_ctlr = shba;
- return;
-}
-
-static void complete_scsi_command(CommandList_struct *c, int timeout,
- __u32 tag)
-{
- struct scsi_cmnd *cmd;
- ctlr_info_t *h;
- ErrorInfo_struct *ei;
-
- ei = c->err_info;
-
- /* First, see if it was a message rather than a command */
- if (c->Request.Type.Type == TYPE_MSG) {
- c->cmd_type = CMD_MSG_DONE;
- return;
- }
-
- cmd = (struct scsi_cmnd *) c->scsi_cmd;
- h = hba[c->ctlr];
-
- scsi_dma_unmap(cmd);
- if (c->Header.SGTotal > h->max_cmd_sgentries)
- cciss_unmap_sg_chain_block(h, c);
-
- cmd->result = (DID_OK << 16); /* host byte */
- cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
- /* cmd->result |= (GOOD < 1); */ /* status byte */
-
- cmd->result |= (ei->ScsiStatus);
- /* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus); */
-
- /* copy the sense data whether we need to or not. */
-
- memcpy(cmd->sense_buffer, ei->SenseInfo,
- ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
- SCSI_SENSE_BUFFERSIZE :
- ei->SenseLen);
- scsi_set_resid(cmd, ei->ResidualCnt);
-
- if (ei->CommandStatus != 0) { /* an error has occurred */
- switch (ei->CommandStatus) {
- case CMD_TARGET_STATUS:
- /* Pass it up to the upper layers... */
- if (!ei->ScsiStatus) {
-
- /* Ordinarily, this case should never happen, but there is a bug
- in some released firmware revisions that allows it to happen
- if, for example, a 4100 backplane loses power and the tape
- drive is in it. We assume that it's a fatal error of some
- kind because we can't show that it wasn't. We will make it
- look like selection timeout since that is the most common
- reason for this to occur, and it's severe enough. */
-
- cmd->result = DID_NO_CONNECT << 16;
- }
- break;
- case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- break;
- case CMD_DATA_OVERRUN:
- dev_warn(&h->pdev->dev, "%p has"
- " completed with data overrun "
- "reported\n", c);
- break;
- case CMD_INVALID: {
- /*
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
- print_cmd(c);
- */
- /* We get CMD_INVALID if you address a non-existent tape drive instead
- of a selection timeout (no response). You will see this if you yank
- out a tape drive, then try to access it. This is kind of a shame
- because it means that any other CMD_INVALID (e.g. driver bug) will
- get interpreted as a missing target. */
- cmd->result = DID_NO_CONNECT << 16;
- }
- break;
- case CMD_PROTOCOL_ERR:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p has protocol error\n", c);
- break;
- case CMD_HARDWARE_ERR:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p had hardware error\n", c);
- break;
- case CMD_CONNECTION_LOST:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p had connection lost\n", c);
- break;
- case CMD_ABORTED:
- cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "%p was aborted\n", c);
- break;
- case CMD_ABORT_FAILED:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p reports abort failed\n", c);
- break;
- case CMD_UNSOLICITED_ABORT:
- cmd->result = DID_ABORT << 16;
- dev_warn(&h->pdev->dev, "%p aborted due to an "
- "unsolicited abort\n", c);
- break;
- case CMD_TIMEOUT:
- cmd->result = DID_TIME_OUT << 16;
- dev_warn(&h->pdev->dev, "%p timedout\n", c);
- break;
- case CMD_UNABORTABLE:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev, "c %p command "
- "unabortable\n", c);
- break;
- default:
- cmd->result = DID_ERROR << 16;
- dev_warn(&h->pdev->dev,
- "%p returned unknown status %x\n", c,
- ei->CommandStatus);
- }
- }
- cmd->scsi_done(cmd);
- scsi_cmd_free(h, c);
-}
-
-static int
-cciss_scsi_detect(ctlr_info_t *h)
-{
- struct Scsi_Host *sh;
- int error;
-
- sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
- if (sh == NULL)
- goto fail;
- sh->io_port = 0; // good enough? FIXME,
- sh->n_io_port = 0; // I don't think we use these two...
- sh->this_id = SELF_SCSI_ID;
- sh->can_queue = cciss_tape_cmds;
- sh->sg_tablesize = h->maxsgentries;
- sh->max_cmd_len = MAX_COMMAND_SIZE;
- sh->max_sectors = h->cciss_max_sectors;
-
- ((struct cciss_scsi_adapter_data_t *)
- h->scsi_ctlr)->scsi_host = sh;
- sh->hostdata[0] = (unsigned long) h;
- sh->irq = h->intr[SIMPLE_MODE_INT];
- sh->unique_id = sh->irq;
- error = scsi_add_host(sh, &h->pdev->dev);
- if (error)
- goto fail_host_put;
- scsi_scan_host(sh);
- return 1;
-
- fail_host_put:
- scsi_host_put(sh);
- fail:
- return 0;
-}
-
-static void
-cciss_unmap_one(struct pci_dev *pdev,
- CommandList_struct *c,
- size_t buflen,
- int data_direction)
-{
- u64bit addr64;
-
- addr64.val32.lower = c->SG[0].Addr.lower;
- addr64.val32.upper = c->SG[0].Addr.upper;
- pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
-}
-
-static void
-cciss_map_one(struct pci_dev *pdev,
- CommandList_struct *c,
- unsigned char *buf,
- size_t buflen,
- int data_direction)
-{
- __u64 addr64;
-
- addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
- c->SG[0].Addr.lower =
- (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
- c->SG[0].Addr.upper =
- (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
- c->SG[0].Len = buflen;
- c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */
- c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
-}
-
-static int
-cciss_scsi_do_simple_cmd(ctlr_info_t *h,
- CommandList_struct *c,
- unsigned char *scsi3addr,
- unsigned char *cdb,
- unsigned char cdblen,
- unsigned char *buf, int bufsize,
- int direction)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
-
- c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */
- c->scsi_cmd = NULL;
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN));
- c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
- // Fill in the request block...
-
- /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n",
- scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
- scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
-
- memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
- memcpy(c->Request.CDB, cdb, cdblen);
- c->Request.Timeout = 0;
- c->Request.CDBLen = cdblen;
- c->Request.Type.Type = TYPE_CMD;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- c->Request.Type.Direction = direction;
-
- /* Fill in the SG list and do dma mapping */
- cciss_map_one(h->pdev, c, (unsigned char *) buf,
- bufsize, DMA_FROM_DEVICE);
-
- c->waiting = &wait;
- enqueue_cmd_and_start_io(h, c);
- wait_for_completion(&wait);
-
- /* undo the dma mapping */
- cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE);
- return(0);
-}
-
-static void
-cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c)
-{
- ErrorInfo_struct *ei;
-
- ei = c->err_info;
- switch (ei->CommandStatus) {
- case CMD_TARGET_STATUS:
- dev_warn(&h->pdev->dev,
- "cmd %p has completed with errors\n", c);
- dev_warn(&h->pdev->dev,
- "cmd %p has SCSI Status = %x\n",
- c, ei->ScsiStatus);
- if (ei->ScsiStatus == 0)
- dev_warn(&h->pdev->dev,
- "SCSI status is abnormally zero. "
- "(probably indicates selection timeout "
- "reported incorrectly due to a known "
- "firmware bug, circa July, 2001.)\n");
- break;
- case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
- dev_info(&h->pdev->dev, "UNDERRUN\n");
- break;
- case CMD_DATA_OVERRUN:
- dev_warn(&h->pdev->dev, "%p has"
- " completed with data overrun "
- "reported\n", c);
- break;
- case CMD_INVALID: {
- /* controller unfortunately reports SCSI passthru's */
- /* to non-existent targets as invalid commands. */
- dev_warn(&h->pdev->dev,
- "%p is reported invalid (probably means "
- "target device no longer present)\n", c);
- /*
- print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1, c, sizeof(*c), false);
- print_cmd(c);
- */
- }
- break;
- case CMD_PROTOCOL_ERR:
- dev_warn(&h->pdev->dev, "%p has protocol error\n", c);
- break;
- case CMD_HARDWARE_ERR:
- /* cmd->result = DID_ERROR << 16; */
- dev_warn(&h->pdev->dev, "%p had hardware error\n", c);
- break;
- case CMD_CONNECTION_LOST:
- dev_warn(&h->pdev->dev, "%p had connection lost\n", c);
- break;
- case CMD_ABORTED:
- dev_warn(&h->pdev->dev, "%p was aborted\n", c);
- break;
- case CMD_ABORT_FAILED:
- dev_warn(&h->pdev->dev,
- "%p reports abort failed\n", c);
- break;
- case CMD_UNSOLICITED_ABORT:
- dev_warn(&h->pdev->dev,
- "%p aborted due to an unsolicited abort\n", c);
- break;
- case CMD_TIMEOUT:
- dev_warn(&h->pdev->dev, "%p timedout\n", c);
- break;
- case CMD_UNABORTABLE:
- dev_warn(&h->pdev->dev,
- "%p unabortable\n", c);
- break;
- default:
- dev_warn(&h->pdev->dev,
- "%p returned unknown status %x\n",
- c, ei->CommandStatus);
- }
-}
-
-static int
-cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr,
- unsigned char page, unsigned char *buf,
- unsigned char bufsize)
-{
- int rc;
- CommandList_struct *c;
- char cdb[6];
- ErrorInfo_struct *ei;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- c = scsi_cmd_alloc(h);
- spin_unlock_irqrestore(&h->lock, flags);
-
- if (c == NULL) { /* trouble... */
- printk("cmd_alloc returned NULL!\n");
- return -1;
- }
-
- ei = c->err_info;
-
- cdb[0] = CISS_INQUIRY;
- cdb[1] = (page != 0);
- cdb[2] = page;
- cdb[3] = 0;
- cdb[4] = bufsize;
- cdb[5] = 0;
- rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb,
- 6, buf, bufsize, XFER_READ);
-
- if (rc != 0) return rc; /* something went wrong */
-
- if (ei->CommandStatus != 0 &&
- ei->CommandStatus != CMD_DATA_UNDERRUN) {
- cciss_scsi_interpret_error(h, c);
- rc = -1;
- }
- spin_lock_irqsave(&h->lock, flags);
- scsi_cmd_free(h, c);
- spin_unlock_irqrestore(&h->lock, flags);
- return rc;
-}
-
-/* Get the device id from inquiry page 0x83 */
-static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr,
- unsigned char *device_id, int buflen)
-{
- int rc;
- unsigned char *buf;
-
- if (buflen > 16)
- buflen = 16;
- buf = kzalloc(64, GFP_KERNEL);
- if (!buf)
- return -1;
- rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
- if (rc == 0)
- memcpy(device_id, &buf[8], buflen);
- kfree(buf);
- return rc != 0;
-}
-
-static int
-cciss_scsi_do_report_phys_luns(ctlr_info_t *h,
- ReportLunData_struct *buf, int bufsize)
-{
- int rc;
- CommandList_struct *c;
- unsigned char cdb[12];
- unsigned char scsi3addr[8];
- ErrorInfo_struct *ei;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- c = scsi_cmd_alloc(h);
- spin_unlock_irqrestore(&h->lock, flags);
- if (c == NULL) { /* trouble... */
- printk("cmd_alloc returned NULL!\n");
- return -1;
- }
-
- memset(&scsi3addr[0], 0, 8); /* address the controller */
- cdb[0] = CISS_REPORT_PHYS;
- cdb[1] = 0;
- cdb[2] = 0;
- cdb[3] = 0;
- cdb[4] = 0;
- cdb[5] = 0;
- cdb[6] = (bufsize >> 24) & 0xFF; //MSB
- cdb[7] = (bufsize >> 16) & 0xFF;
- cdb[8] = (bufsize >> 8) & 0xFF;
- cdb[9] = bufsize & 0xFF;
- cdb[10] = 0;
- cdb[11] = 0;
-
- rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr,
- cdb, 12,
- (unsigned char *) buf,
- bufsize, XFER_READ);
-
- if (rc != 0) return rc; /* something went wrong */
-
- ei = c->err_info;
- if (ei->CommandStatus != 0 &&
- ei->CommandStatus != CMD_DATA_UNDERRUN) {
- cciss_scsi_interpret_error(h, c);
- rc = -1;
- }
- spin_lock_irqsave(&h->lock, flags);
- scsi_cmd_free(h, c);
- spin_unlock_irqrestore(&h->lock, flags);
- return rc;
-}
-
-static void
-cciss_update_non_disk_devices(ctlr_info_t *h, int hostno)
-{
- /* the idea here is we could get notified from /proc
- that some devices have changed, so we do a report
- physical luns cmd, and adjust our list of devices
- accordingly. (We can't rely on the scsi-mid layer just
- doing inquiries, because the "busses" that the scsi
- mid-layer probes are totally fabricated by this driver,
- so new devices wouldn't show up.
-
- the scsi3addr's of devices won't change so long as the
- adapter is not reset. That means we can rescan and
- tell which devices we already know about, vs. new
- devices, vs. disappearing devices.
-
- Also, if you yank out a tape drive, then put in a disk
- in it's place, (say, a configured volume from another
- array controller for instance) _don't_ poke this driver
- (so it thinks it's still a tape, but _do_ poke the scsi
- mid layer, so it does an inquiry... the scsi mid layer
- will see the physical disk. This would be bad. Need to
- think about how to prevent that. One idea would be to
- snoop all scsi responses and if an inquiry repsonse comes
- back that reports a disk, chuck it an return selection
- timeout instead and adjust our table... Not sure i like
- that though.
-
- */
-#define OBDR_TAPE_INQ_SIZE 49
-#define OBDR_TAPE_SIG "$DR-10"
- ReportLunData_struct *ld_buff;
- unsigned char *inq_buff;
- unsigned char scsi3addr[8];
- __u32 num_luns=0;
- unsigned char *ch;
- struct cciss_scsi_dev_t *currentsd, *this_device;
- int ncurrent=0;
- int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
- int i;
-
- ld_buff = kzalloc(reportlunsize, GFP_KERNEL);
- inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
- currentsd = kzalloc(sizeof(*currentsd) *
- (CCISS_MAX_SCSI_DEVS_PER_HBA+1), GFP_KERNEL);
- if (ld_buff == NULL || inq_buff == NULL || currentsd == NULL) {
- printk(KERN_ERR "cciss: out of memory\n");
- goto out;
- }
- this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
- if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) {
- ch = &ld_buff->LUNListLength[0];
- num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
- if (num_luns > CISS_MAX_PHYS_LUN) {
- printk(KERN_WARNING
- "cciss: Maximum physical LUNs (%d) exceeded. "
- "%d LUNs ignored.\n", CISS_MAX_PHYS_LUN,
- num_luns - CISS_MAX_PHYS_LUN);
- num_luns = CISS_MAX_PHYS_LUN;
- }
- }
- else {
- printk(KERN_ERR "cciss: Report physical LUNs failed.\n");
- goto out;
- }
-
-
- /* adjust our table of devices */
- for (i = 0; i < num_luns; i++) {
- /* for each physical lun, do an inquiry */
- if (ld_buff->LUN[i][3] & 0xC0) continue;
- memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE);
- memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
-
- if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
- (unsigned char) OBDR_TAPE_INQ_SIZE) != 0)
- /* Inquiry failed (msg printed already) */
- continue; /* so we will skip this device. */
-
- this_device->devtype = (inq_buff[0] & 0x1f);
- this_device->bus = -1;
- this_device->target = -1;
- this_device->lun = -1;
- memcpy(this_device->scsi3addr, scsi3addr, 8);
- memcpy(this_device->vendor, &inq_buff[8],
- sizeof(this_device->vendor));
- memcpy(this_device->model, &inq_buff[16],
- sizeof(this_device->model));
- memcpy(this_device->revision, &inq_buff[32],
- sizeof(this_device->revision));
- memset(this_device->device_id, 0,
- sizeof(this_device->device_id));
- cciss_scsi_get_device_id(h, scsi3addr,
- this_device->device_id, sizeof(this_device->device_id));
-
- switch (this_device->devtype) {
- case 0x05: /* CD-ROM */ {
-
- /* We don't *really* support actual CD-ROM devices,
- * just this "One Button Disaster Recovery" tape drive
- * which temporarily pretends to be a CD-ROM drive.
- * So we check that the device is really an OBDR tape
- * device by checking for "$DR-10" in bytes 43-48 of
- * the inquiry data.
- */
- char obdr_sig[7];
-
- strncpy(obdr_sig, &inq_buff[43], 6);
- obdr_sig[6] = '\0';
- if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0)
- /* Not OBDR device, ignore it. */
- break;
- }
- /* fall through . . . */
- case 0x01: /* sequential access, (tape) */
- case 0x08: /* medium changer */
- if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
- printk(KERN_INFO "cciss%d: %s ignored, "
- "too many devices.\n", h->ctlr,
- scsi_device_type(this_device->devtype));
- break;
- }
- currentsd[ncurrent] = *this_device;
- ncurrent++;
- break;
- default:
- break;
- }
- }
-
- adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent);
-out:
- kfree(inq_buff);
- kfree(ld_buff);
- kfree(currentsd);
- return;
-}
-
-static int
-is_keyword(char *ptr, int len, char *verb) // Thanks to ncr53c8xx.c
-{
- int verb_len = strlen(verb);
- if (len >= verb_len && !memcmp(verb,ptr,verb_len))
- return verb_len;
- else
- return 0;
-}
-
-static int
-cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length)
-{
- int arg_len;
-
- if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
- cciss_update_non_disk_devices(h, hostno);
- else
- return -EINVAL;
- return length;
-}
-
-static int
-cciss_scsi_write_info(struct Scsi_Host *sh,
- char *buffer, /* data buffer */
- int length) /* length of data in buffer */
-{
- ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
- if (h == NULL) /* This really shouldn't ever happen. */
- return -EINVAL;
-
- return cciss_scsi_user_command(h, sh->host_no,
- buffer, length);
-}
-
-static int
-cciss_scsi_show_info(struct seq_file *m, struct Scsi_Host *sh)
-{
-
- ctlr_info_t *h = (ctlr_info_t *) sh->hostdata[0];
- int i;
-
- if (h == NULL) /* This really shouldn't ever happen. */
- return -EINVAL;
-
- seq_printf(m, "cciss%d: SCSI host: %d\n",
- h->ctlr, sh->host_no);
-
- /* this information is needed by apps to know which cciss
- device corresponds to which scsi host number without
- having to open a scsi target device node. The device
- information is not a duplicate of /proc/scsi/scsi because
- the two may be out of sync due to scsi hotplug, rather
- this info is for an app to be able to use to know how to
- get them back in sync. */
-
- for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) {
- struct cciss_scsi_dev_t *sd =
- &ccissscsi[h->ctlr].dev[i];
- seq_printf(m, "c%db%dt%dl%d %02d "
- "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- sh->host_no, sd->bus, sd->target, sd->lun,
- sd->devtype,
- sd->scsi3addr[0], sd->scsi3addr[1],
- sd->scsi3addr[2], sd->scsi3addr[3],
- sd->scsi3addr[4], sd->scsi3addr[5],
- sd->scsi3addr[6], sd->scsi3addr[7]);
- }
- return 0;
-}
-
-/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
- dma mapping and fills in the scatter gather entries of the
- cciss command, c. */
-
-static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
- struct scsi_cmnd *cmd)
-{
- unsigned int len;
- struct scatterlist *sg;
- __u64 addr64;
- int request_nsgs, i, chained, sg_index;
- struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr;
- SGDescriptor_struct *curr_sg;
-
- BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
-
- chained = 0;
- sg_index = 0;
- curr_sg = c->SG;
- request_nsgs = scsi_dma_map(cmd);
- if (request_nsgs) {
- scsi_for_each_sg(cmd, sg, request_nsgs, i) {
- if (sg_index + 1 == h->max_cmd_sgentries &&
- !chained && request_nsgs - i > 1) {
- chained = 1;
- sg_index = 0;
- curr_sg = sa->cmd_sg_list[c->cmdindex];
- }
- addr64 = (__u64) sg_dma_address(sg);
- len = sg_dma_len(sg);
- curr_sg[sg_index].Addr.lower =
- (__u32) (addr64 & 0x0FFFFFFFFULL);
- curr_sg[sg_index].Addr.upper =
- (__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
- curr_sg[sg_index].Len = len;
- curr_sg[sg_index].Ext = 0;
- ++sg_index;
- }
- if (chained)
- cciss_map_sg_chain_block(h, c,
- sa->cmd_sg_list[c->cmdindex],
- (request_nsgs - (h->max_cmd_sgentries - 1)) *
- sizeof(SGDescriptor_struct));
- }
- /* track how many SG entries we are using */
- if (request_nsgs > h->maxSG)
- h->maxSG = request_nsgs;
- c->Header.SGTotal = (u16) request_nsgs + chained;
- if (request_nsgs > h->max_cmd_sgentries)
- c->Header.SGList = h->max_cmd_sgentries;
- else
- c->Header.SGList = c->Header.SGTotal;
- return;
-}
-
-
-static int
-cciss_scsi_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
-{
- ctlr_info_t *h;
- int rc;
- unsigned char scsi3addr[8];
- CommandList_struct *c;
- unsigned long flags;
-
- // Get the ptr to our adapter structure (hba[i]) out of cmd->host.
- // We violate cmd->host privacy here. (Is there another way?)
- h = (ctlr_info_t *) cmd->device->host->hostdata[0];
-
- rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id,
- cmd->device->lun, scsi3addr);
- if (rc != 0) {
- /* the scsi nexus does not match any that we presented... */
- /* pretend to mid layer that we got selection timeout */
- cmd->result = DID_NO_CONNECT << 16;
- done(cmd);
- /* we might want to think about registering controller itself
- as a processor device on the bus so sg binds to it. */
- return 0;
- }
-
- /* Ok, we have a reasonable scsi nexus, so send the cmd down, and
- see what the device thinks of it. */
-
- spin_lock_irqsave(&h->lock, flags);
- c = scsi_cmd_alloc(h);
- spin_unlock_irqrestore(&h->lock, flags);
- if (c == NULL) { /* trouble... */
- dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n");
- /* FIXME: next 3 lines are -> BAD! <- */
- cmd->result = DID_NO_CONNECT << 16;
- done(cmd);
- return 0;
- }
-
- // Fill in the command list header
-
- cmd->scsi_done = done; // save this for use by completion code
-
- /* save c in case we have to abort it */
- cmd->host_scribble = (unsigned char *) c;
-
- c->cmd_type = CMD_SCSI;
- c->scsi_cmd = cmd;
- c->Header.ReplyQueue = 0; /* unused in simple mode */
- memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
- c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */
-
- // Fill in the request block...
-
- c->Request.Timeout = 0;
- memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
- BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
- c->Request.CDBLen = cmd->cmd_len;
- memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
- c->Request.Type.Type = TYPE_CMD;
- c->Request.Type.Attribute = ATTR_SIMPLE;
- switch (cmd->sc_data_direction) {
- case DMA_TO_DEVICE:
- c->Request.Type.Direction = XFER_WRITE;
- break;
- case DMA_FROM_DEVICE:
- c->Request.Type.Direction = XFER_READ;
- break;
- case DMA_NONE:
- c->Request.Type.Direction = XFER_NONE;
- break;
- case DMA_BIDIRECTIONAL:
- // This can happen if a buggy application does a scsi passthru
- // and sets both inlen and outlen to non-zero. ( see
- // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
-
- c->Request.Type.Direction = XFER_RSVD;
- // This is technically wrong, and cciss controllers should
- // reject it with CMD_INVALID, which is the most correct
- // response, but non-fibre backends appear to let it
- // slide by, and give the same results as if this field
- // were set correctly. Either way is acceptable for
- // our purposes here.
-
- break;
-
- default:
- dev_warn(&h->pdev->dev, "unknown data direction: %d\n",
- cmd->sc_data_direction);
- BUG();
- break;
- }
- cciss_scatter_gather(h, c, cmd);
- enqueue_cmd_and_start_io(h, c);
- /* the cmd'll come back via intr handler in complete_scsi_command() */
- return 0;
-}
-
-static DEF_SCSI_QCMD(cciss_scsi_queue_command)
-
-static void cciss_unregister_scsi(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- unsigned long flags;
-
- /* we are being forcibly unloaded, and may not refuse. */
-
- spin_lock_irqsave(&h->lock, flags);
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
-
- /* if we weren't ever actually registered, don't unregister */
- if (sa->registered) {
- spin_unlock_irqrestore(&h->lock, flags);
- scsi_remove_host(sa->scsi_host);
- scsi_host_put(sa->scsi_host);
- spin_lock_irqsave(&h->lock, flags);
- }
-
- /* set scsi_host to NULL so our detect routine will
- find us on register */
- sa->scsi_host = NULL;
- spin_unlock_irqrestore(&h->lock, flags);
- scsi_cmd_stack_free(h);
- kfree(sa);
-}
-
-static int cciss_engage_scsi(ctlr_info_t *h)
-{
- struct cciss_scsi_adapter_data_t *sa;
- struct cciss_scsi_cmd_stack_t *stk;
- unsigned long flags;
-
- spin_lock_irqsave(&h->lock, flags);
- sa = h->scsi_ctlr;
- stk = &sa->cmd_stack;
-
- if (sa->registered) {
- dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n");
- spin_unlock_irqrestore(&h->lock, flags);
- return -ENXIO;
- }
- sa->registered = 1;
- spin_unlock_irqrestore(&h->lock, flags);
- cciss_update_non_disk_devices(h, -1);
- cciss_scsi_detect(h);
- return 0;
-}
-
-static void
-cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h)
-{
- unsigned long flags;
-
- CPQ_TAPE_LOCK(h, flags);
- seq_printf(seq,
- "Sequential access devices: %d\n\n",
- ccissscsi[h->ctlr].ndevices);
- CPQ_TAPE_UNLOCK(h, flags);
-}
-
-static int wait_for_device_to_become_ready(ctlr_info_t *h,
- unsigned char lunaddr[])
-{
- int rc;
- int count = 0;
- int waittime = HZ;
- CommandList_struct *c;
-
- c = cmd_alloc(h);
- if (!c) {
- dev_warn(&h->pdev->dev, "out of memory in "
- "wait_for_device_to_become_ready.\n");
- return IO_ERROR;
- }
-
- /* Send test unit ready until device ready, or give up. */
- while (count < 20) {
-
- /* Wait for a bit. do this first, because if we send
- * the TUR right away, the reset will just abort it.
- */
- schedule_timeout_uninterruptible(waittime);
- count++;
-
- /* Increase wait time with each try, up to a point. */
- if (waittime < (HZ * 30))
- waittime = waittime * 2;
-
- /* Send the Test Unit Ready */
- rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0,
- lunaddr, TYPE_CMD);
- if (rc == 0)
- rc = sendcmd_withirq_core(h, c, 0);
-
- (void) process_sendcmd_error(h, c);
-
- if (rc != 0)
- goto retry_tur;
-
- if (c->err_info->CommandStatus == CMD_SUCCESS)
- break;
-
- if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
- c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
- if (c->err_info->SenseInfo[2] == NO_SENSE)
- break;
- if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) {
- unsigned char asc;
- asc = c->err_info->SenseInfo[12];
- check_for_unit_attention(h, c);
- if (asc == POWER_OR_RESET)
- break;
- }
- }
-retry_tur:
- dev_warn(&h->pdev->dev, "Waiting %d secs "
- "for device to become ready.\n",
- waittime / HZ);
- rc = 1; /* device not ready. */
- }
-
- if (rc)
- dev_warn(&h->pdev->dev, "giving up on device.\n");
- else
- dev_warn(&h->pdev->dev, "device is ready.\n");
-
- cmd_free(h, c);
- return rc;
-}
-
-/* Need at least one of these error handlers to keep ../scsi/hosts.c from
- * complaining. Doing a host- or bus-reset can't do anything good here.
- * Despite what it might say in scsi_error.c, there may well be commands
- * on the controller, as the cciss driver registers twice, once as a block
- * device for the logical drives, and once as a scsi device, for any tape
- * drives. So we know there are no commands out on the tape drives, but we
- * don't know there are no commands on the controller, and it is likely
- * that there probably are, as the cciss block device is most commonly used
- * as a boot device (embedded controller on HP/Compaq systems.)
-*/
-
-static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
-{
- int rc;
- CommandList_struct *cmd_in_trouble;
- unsigned char lunaddr[8];
- ctlr_info_t *h;
-
- /* find the controller to which the command to be aborted was sent */
- h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
- if (h == NULL) /* paranoia */
- return FAILED;
- dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n");
- /* find the command that's giving us trouble */
- cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_in_trouble == NULL) /* paranoia */
- return FAILED;
- memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8);
- /* send a reset to the SCSI LUN which the command was sent to */
- rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr,
- TYPE_MSG);
- if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0)
- return SUCCESS;
- dev_warn(&h->pdev->dev, "resetting device failed.\n");
- return FAILED;
-}
-
-static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd)
-{
- int rc;
- CommandList_struct *cmd_to_abort;
- unsigned char lunaddr[8];
- ctlr_info_t *h;
-
- /* find the controller to which the command to be aborted was sent */
- h = (ctlr_info_t *) scsicmd->device->host->hostdata[0];
- if (h == NULL) /* paranoia */
- return FAILED;
- dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n");
-
- /* find the command to be aborted */
- cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble;
- if (cmd_to_abort == NULL) /* paranoia */
- return FAILED;
- memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8);
- rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag,
- 0, 0, lunaddr, TYPE_MSG);
- if (rc == 0)
- return SUCCESS;
- return FAILED;
-
-}
-
-#else /* no CONFIG_CISS_SCSI_TAPE */
-
-/* If no tape support, then these become defined out of existence */
-
-#define cciss_scsi_setup(cntl_num)
-#define cciss_engage_scsi(h)
-
-#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
deleted file mode 100644
index e71d986727ca..000000000000
--- a/drivers/block/cciss_scsi.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Disk Array driver for HP Smart Array controllers, SCSI Tape module.
- * (C) Copyright 2001, 2007 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 300, Boston, MA
- * 02111-1307, USA.
- *
- * Questions/Comments/Bugfixes to iss_storagedev@hp.com
- *
- */
-#ifdef CONFIG_CISS_SCSI_TAPE
-#ifndef _CCISS_SCSI_H_
-#define _CCISS_SCSI_H_
-
-#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
-
- /* the scsi id of the adapter... */
-#define SELF_SCSI_ID 15
- /* 15 is somewhat arbitrary, since the scsi-2 bus
- that's presented by the driver to the OS is
- fabricated. The "real" scsi-3 bus the
- hardware presents is fabricated too.
- The actual, honest-to-goodness physical
- bus that the devices are attached to is not
- addressible natively, and may in fact turn
- out to be not scsi at all. */
-
-
-/*
-
-If the upper scsi layer tries to track how many commands we have
-outstanding, it will be operating under the misapprehension that it is
-the only one sending us requests. We also have the block interface,
-which is where most requests must surely come from, so the upper layer's
-notion of how many requests we have outstanding will be wrong most or
-all of the time.
-
-Note, the normal SCSI mid-layer error handling doesn't work well
-for this driver because 1) it takes the io_request_lock before
-calling error handlers and uses a local variable to store flags,
-so the io_request_lock cannot be released and interrupts enabled
-inside the error handlers, and, the error handlers cannot poll
-for command completion because they might get commands from the
-block half of the driver completing, and not know what to do
-with them. That's what we get for making a hybrid scsi/block
-driver, I suppose.
-
-*/
-
-struct cciss_scsi_dev_t {
- int devtype;
- int bus, target, lun; /* as presented to the OS */
- unsigned char scsi3addr[8]; /* as presented to the HW */
- unsigned char device_id[16]; /* from inquiry pg. 0x83 */
- unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
- unsigned char model[16]; /* bytes 16-31 of inquiry data */
- unsigned char revision[4]; /* bytes 32-35 of inquiry data */
-};
-
-struct cciss_scsi_hba_t {
- char *name;
- int ndevices;
-#define CCISS_MAX_SCSI_DEVS_PER_HBA 16
- struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
-};
-
-#endif /* _CCISS_SCSI_H_ */
-#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index e02c45cd3c5a..5f0eaee8c8a7 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO);
- bio->bi_bdev = bdev->md_bdev;
+ bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector;
err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size)
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 809fd245c3dc..bd97908c766f 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr);
} else
page = b->bm_pages[page_nr];
- bio->bi_bdev = device->ldev->md_bdev;
+ bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index d17b6e6393c7..7e8589ce631c 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -63,19 +63,15 @@
# define __must_hold(x)
#endif
-/* module parameter, defined in drbd_main.c */
-extern unsigned int minor_count;
-extern bool disable_sendpage;
-extern bool allow_oos;
-void tl_abort_disk_io(struct drbd_device *device);
-
+/* shared module parameters, defined in drbd_main.c */
#ifdef CONFIG_DRBD_FAULT_INJECTION
-extern int enable_faults;
-extern int fault_rate;
-extern int fault_devs;
+extern int drbd_enable_faults;
+extern int drbd_fault_rate;
#endif
-extern char usermode_helper[];
+extern unsigned int drbd_minor_count;
+extern char drbd_usermode_helper[];
+extern int drbd_proc_details;
/* This is used to stop/restart our threads.
@@ -181,8 +177,8 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type);
static inline int
drbd_insert_fault(struct drbd_device *device, unsigned int type) {
#ifdef CONFIG_DRBD_FAULT_INJECTION
- return fault_rate &&
- (enable_faults & (1<<type)) &&
+ return drbd_fault_rate &&
+ (drbd_enable_faults & (1<<type)) &&
_drbd_insert_fault(device, type);
#else
return 0;
@@ -745,6 +741,8 @@ struct drbd_connection {
unsigned current_tle_writes; /* writes seen within this tl epoch */
unsigned long last_reconnect_jif;
+ /* empty member on older kernels without blk_start_plug() */
+ struct blk_plug receiver_plug;
struct drbd_thread receiver;
struct drbd_thread worker;
struct drbd_thread ack_receiver;
@@ -1131,7 +1129,8 @@ extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_sta
extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
extern void drbd_device_cleanup(struct drbd_device *device);
-void drbd_print_uuids(struct drbd_device *device, const char *text);
+extern void drbd_print_uuids(struct drbd_device *device, const char *text);
+extern void drbd_queue_unplug(struct drbd_device *device);
extern void conn_md_sync(struct drbd_connection *connection);
extern void drbd_md_write(struct drbd_device *device, void *buffer);
@@ -1463,8 +1462,6 @@ extern struct drbd_resource *drbd_find_resource(const char *name);
extern void drbd_destroy_resource(struct kref *kref);
extern void conn_free_crypto(struct drbd_connection *connection);
-extern int proc_details;
-
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
@@ -1628,8 +1625,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio)
{
__release(local);
- if (!bio->bi_bdev) {
- drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n");
+ if (!bio->bi_disk) {
+ drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
return;
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index e2ed28d45ce1..8cb3791898ae 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -77,41 +77,41 @@ MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
#include <linux/moduleparam.h>
-/* allow_open_on_secondary */
-MODULE_PARM_DESC(allow_oos, "DONT USE!");
/* thanks to these macros, if compiled into the kernel (not-module),
- * this becomes the boot parameter drbd.minor_count */
-module_param(minor_count, uint, 0444);
-module_param(disable_sendpage, bool, 0644);
-module_param(allow_oos, bool, 0);
-module_param(proc_details, int, 0644);
+ * these become boot parameters (e.g., drbd.minor_count) */
#ifdef CONFIG_DRBD_FAULT_INJECTION
-int enable_faults;
-int fault_rate;
-static int fault_count;
-int fault_devs;
+int drbd_enable_faults;
+int drbd_fault_rate;
+static int drbd_fault_count;
+static int drbd_fault_devs;
/* bitmap of enabled faults */
-module_param(enable_faults, int, 0664);
+module_param_named(enable_faults, drbd_enable_faults, int, 0664);
/* fault rate % value - applies to all enabled faults */
-module_param(fault_rate, int, 0664);
+module_param_named(fault_rate, drbd_fault_rate, int, 0664);
/* count of faults inserted */
-module_param(fault_count, int, 0664);
+module_param_named(fault_count, drbd_fault_count, int, 0664);
/* bitmap of devices to insert faults on */
-module_param(fault_devs, int, 0644);
+module_param_named(fault_devs, drbd_fault_devs, int, 0644);
#endif
-/* module parameter, defined */
-unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
-bool disable_sendpage;
-bool allow_oos;
-int proc_details; /* Detail level in proc drbd*/
-
+/* module parameters we can keep static */
+static bool drbd_allow_oos; /* allow_open_on_secondary */
+static bool drbd_disable_sendpage;
+MODULE_PARM_DESC(allow_oos, "DONT USE!");
+module_param_named(allow_oos, drbd_allow_oos, bool, 0);
+module_param_named(disable_sendpage, drbd_disable_sendpage, bool, 0644);
+
+/* module parameters we share */
+int drbd_proc_details; /* Detail level in proc drbd*/
+module_param_named(proc_details, drbd_proc_details, int, 0644);
+/* module parameters shared with defaults */
+unsigned int drbd_minor_count = DRBD_MINOR_COUNT_DEF;
/* Module parameter for setting the user mode helper program
* to run. Default is /sbin/drbdadm */
-char usermode_helper[80] = "/sbin/drbdadm";
-
-module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
+char drbd_usermode_helper[80] = "/sbin/drbdadm";
+module_param_named(minor_count, drbd_minor_count, uint, 0444);
+module_param_string(usermode_helper, drbd_usermode_helper, sizeof(drbd_usermode_helper), 0644);
/* in 2.6.x, our device mapping and config info contains our virtual gendisks
* as member "struct gendisk *vdisk;"
@@ -923,7 +923,9 @@ void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
}
/* communicated if (agreed_features & DRBD_FF_WSAME) */
-void assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p, struct request_queue *q)
+static void
+assign_p_sizes_qlim(struct drbd_device *device, struct p_sizes *p,
+ struct request_queue *q)
{
if (q) {
p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q));
@@ -1560,7 +1562,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa
* put_page(); and would cause either a VM_BUG directly, or
* __page_cache_release a page that would actually still be referenced
* by someone, leading to some obscure delayed Oops somewhere else. */
- if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
+ if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page))
return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL;
@@ -1932,7 +1934,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (device->state.role != R_PRIMARY) {
if (mode & FMODE_WRITE)
rv = -EROFS;
- else if (!allow_oos)
+ else if (!drbd_allow_oos)
rv = -EMEDIUMTYPE;
}
@@ -1952,6 +1954,19 @@ static void drbd_release(struct gendisk *gd, fmode_t mode)
mutex_unlock(&drbd_main_mutex);
}
+/* need to hold resource->req_lock */
+void drbd_queue_unplug(struct drbd_device *device)
+{
+ if (device->state.pdsk >= D_INCONSISTENT && device->state.conn >= C_CONNECTED) {
+ D_ASSERT(device, device->state.role == R_PRIMARY);
+ if (test_and_clear_bit(UNPLUG_REMOTE, &device->flags)) {
+ drbd_queue_work_if_unqueued(
+ &first_peer_device(device)->connection->sender_work,
+ &device->unplug_work);
+ }
+ }
+}
+
static void drbd_set_defaults(struct drbd_device *device)
{
/* Beware! The actual layout differs
@@ -2008,18 +2023,14 @@ void drbd_init_set_defaults(struct drbd_device *device)
device->unplug_work.cb = w_send_write_hint;
device->bm_io_work.w.cb = w_bitmap_io;
- init_timer(&device->resync_timer);
- init_timer(&device->md_sync_timer);
- init_timer(&device->start_resync_timer);
- init_timer(&device->request_timer);
- device->resync_timer.function = resync_timer_fn;
- device->resync_timer.data = (unsigned long) device;
- device->md_sync_timer.function = md_sync_timer_fn;
- device->md_sync_timer.data = (unsigned long) device;
- device->start_resync_timer.function = start_resync_timer_fn;
- device->start_resync_timer.data = (unsigned long) device;
- device->request_timer.function = request_timer_fn;
- device->request_timer.data = (unsigned long) device;
+ setup_timer(&device->resync_timer, resync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->md_sync_timer, md_sync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->start_resync_timer, start_resync_timer_fn,
+ (unsigned long)device);
+ setup_timer(&device->request_timer, request_timer_fn,
+ (unsigned long)device);
init_waitqueue_head(&device->misc_wait);
init_waitqueue_head(&device->state_wait);
@@ -2131,7 +2142,7 @@ static void drbd_destroy_mempools(void)
static int drbd_create_mempools(void)
{
struct page *page;
- const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
+ const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count;
int i;
/* prepare our caches and mempools */
@@ -2167,13 +2178,12 @@ static int drbd_create_mempools(void)
goto Enomem;
/* mempools */
- drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, BIOSET_NEED_RESCUER);
+ drbd_io_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
if (drbd_io_bio_set == NULL)
goto Enomem;
drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0,
- BIOSET_NEED_BVECS |
- BIOSET_NEED_RESCUER);
+ BIOSET_NEED_BVECS);
if (drbd_md_io_bio_set == NULL)
goto Enomem;
@@ -2409,7 +2419,6 @@ static void drbd_cleanup(void)
destroy_workqueue(retry.wq);
drbd_genl_unregister();
- drbd_debugfs_cleanup();
idr_for_each_entry(&drbd_devices, device, i)
drbd_delete_device(device);
@@ -2420,6 +2429,8 @@ static void drbd_cleanup(void)
drbd_free_resource(resource);
}
+ drbd_debugfs_cleanup();
+
drbd_destroy_mempools();
unregister_blkdev(DRBD_MAJOR, "drbd");
@@ -2972,12 +2983,12 @@ static int __init drbd_init(void)
{
int err;
- if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
- pr_err("invalid minor_count (%d)\n", minor_count);
+ if (drbd_minor_count < DRBD_MINOR_COUNT_MIN || drbd_minor_count > DRBD_MINOR_COUNT_MAX) {
+ pr_err("invalid minor_count (%d)\n", drbd_minor_count);
#ifdef MODULE
return -EINVAL;
#else
- minor_count = DRBD_MINOR_COUNT_DEF;
+ drbd_minor_count = DRBD_MINOR_COUNT_DEF;
#endif
}
@@ -3900,12 +3911,12 @@ _drbd_insert_fault(struct drbd_device *device, unsigned int type)
static struct fault_random_state rrs = {0, 0};
unsigned int ret = (
- (fault_devs == 0 ||
- ((1 << device_to_minor(device)) & fault_devs) != 0) &&
- (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
+ (drbd_fault_devs == 0 ||
+ ((1 << device_to_minor(device)) & drbd_fault_devs) != 0) &&
+ (((_drbd_fault_random(&rrs) % 100) + 1) <= drbd_fault_rate));
if (ret) {
- fault_count++;
+ drbd_fault_count++;
if (__ratelimit(&drbd_ratelimit_state))
drbd_warn(device, "***Simulating %s failure\n",
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index ad0fcb43e45c..a12f77e6891e 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -344,7 +344,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
(char[60]) { }, /* address */
NULL };
char mb[14];
- char *argv[] = {usermode_helper, cmd, mb, NULL };
+ char *argv[] = {drbd_usermode_helper, cmd, mb, NULL };
struct drbd_connection *connection = first_peer_device(device)->connection;
struct sib_info sib;
int ret;
@@ -359,19 +359,19 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
* write out any unsynced meta data changes now */
drbd_md_sync(device);
- drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
+ drbd_info(device, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, mb);
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
drbd_bcast_event(device, &sib);
notify_helper(NOTIFY_CALL, device, connection, cmd, 0);
- ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, mb,
+ drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
else
drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, mb,
+ drbd_usermode_helper, cmd, mb,
(ret >> 8) & 0xff, ret);
sib.sib_reason = SIB_HELPER_POST;
sib.helper_exit_code = ret;
@@ -396,24 +396,24 @@ enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd)
(char[60]) { }, /* address */
NULL };
char *resource_name = connection->resource->name;
- char *argv[] = {usermode_helper, cmd, resource_name, NULL };
+ char *argv[] = {drbd_usermode_helper, cmd, resource_name, NULL };
int ret;
setup_khelper_env(connection, envp);
conn_md_sync(connection);
- drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
+ drbd_info(connection, "helper command: %s %s %s\n", drbd_usermode_helper, cmd, resource_name);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_CALL, NULL, connection, cmd, 0);
- ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+ ret = call_usermodehelper(drbd_usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, resource_name,
+ drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
else
drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
- usermode_helper, cmd, resource_name,
+ drbd_usermode_helper, cmd, resource_name,
(ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */
notify_helper(NOTIFY_RESPONSE, NULL, connection, cmd, ret);
@@ -1236,12 +1236,18 @@ static void fixup_discard_if_not_supported(struct request_queue *q)
static void decide_on_write_same_support(struct drbd_device *device,
struct request_queue *q,
- struct request_queue *b, struct o_qlim *o)
+ struct request_queue *b, struct o_qlim *o,
+ bool disable_write_same)
{
struct drbd_peer_device *peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
bool can_do = b ? b->limits.max_write_same_sectors : true;
+ if (can_do && disable_write_same) {
+ can_do = false;
+ drbd_info(peer_device, "WRITE_SAME disabled by config\n");
+ }
+
if (can_do && connection->cstate >= C_CONNECTED && !(connection->agreed_features & DRBD_FF_WSAME)) {
can_do = false;
drbd_info(peer_device, "peer does not support WRITE_SAME\n");
@@ -1302,6 +1308,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
struct request_queue *b = NULL;
struct disk_conf *dc;
bool discard_zeroes_if_aligned = true;
+ bool disable_write_same = false;
if (bdev) {
b = bdev->backing_bdev->bd_disk->queue;
@@ -1311,6 +1318,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
dc = rcu_dereference(device->ldev->disk_conf);
max_segments = dc->max_bio_bvecs;
discard_zeroes_if_aligned = dc->discard_zeroes_if_aligned;
+ disable_write_same = dc->disable_write_same;
rcu_read_unlock();
blk_set_stacking_limits(&q->limits);
@@ -1321,7 +1329,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_SIZE-1);
decide_on_discard_support(device, q, b, discard_zeroes_if_aligned);
- decide_on_write_same_support(device, q, b, o);
+ decide_on_write_same_support(device, q, b, o, disable_write_same);
if (b) {
blk_queue_stack_limits(q, b);
@@ -1612,7 +1620,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
if (write_ordering_changed(old_disk_conf, new_disk_conf))
drbd_bump_write_ordering(device->resource, NULL, WO_BDEV_FLUSH);
- if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned)
+ if (old_disk_conf->discard_zeroes_if_aligned != new_disk_conf->discard_zeroes_if_aligned
+ || old_disk_conf->disable_write_same != new_disk_conf->disable_write_same)
drbd_reconsider_queue_parameters(device, device->ldev, NULL);
drbd_md_sync(device);
@@ -2140,34 +2149,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
static int adm_detach(struct drbd_device *device, int force)
{
- enum drbd_state_rv retcode;
- void *buffer;
- int ret;
-
if (force) {
set_bit(FORCE_DETACH, &device->flags);
drbd_force_state(device, NS(disk, D_FAILED));
- retcode = SS_SUCCESS;
- goto out;
+ return SS_SUCCESS;
}
- drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
- buffer = drbd_md_get_buffer(device, __func__); /* make sure there is no in-flight meta-data IO */
- if (buffer) {
- retcode = drbd_request_state(device, NS(disk, D_FAILED));
- drbd_md_put_buffer(device);
- } else /* already <= D_FAILED */
- retcode = SS_NOTHING_TO_DO;
- /* D_FAILED will transition to DISKLESS. */
- drbd_resume_io(device);
- ret = wait_event_interruptible(device->misc_wait,
- device->state.disk != D_FAILED);
- if ((int)retcode == (int)SS_IS_DISKLESS)
- retcode = SS_NOTHING_TO_DO;
- if (ret)
- retcode = ERR_INTR;
-out:
- return retcode;
+ return drbd_request_detach_interruptible(device);
}
/* Detaching the disk is a process in multiple stages. First we need to lock
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 8378142f7a55..582caeb0de86 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -127,7 +127,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_putc(seq, '=');
seq_putc(seq, '>');
for (i = 0; i < y; i++)
- seq_printf(seq, ".");
+ seq_putc(seq, '.');
seq_puts(seq, "] ");
if (state.conn == C_VERIFY_S || state.conn == C_VERIFY_T)
@@ -179,7 +179,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
seq_printf_with_thousands_grouping(seq, dbdt);
seq_puts(seq, " (");
/* ------------------------- ~3s average ------------------------ */
- if (proc_details >= 1) {
+ if (drbd_proc_details >= 1) {
/* this is what drbd_rs_should_slow_down() uses */
i = (device->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
dt = (jiffies - device->rs_mark_time[i]) / HZ;
@@ -209,7 +209,7 @@ static void drbd_syncer_progress(struct drbd_device *device, struct seq_file *se
}
seq_printf(seq, " K/sec%s\n", stalled ? " (stalled)" : "");
- if (proc_details >= 1) {
+ if (drbd_proc_details >= 1) {
/* 64 bit:
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(device);
@@ -332,13 +332,13 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
state.conn == C_VERIFY_T)
drbd_syncer_progress(device, seq, state);
- if (proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
+ if (drbd_proc_details >= 1 && get_ldev_if_state(device, D_FAILED)) {
lc_seq_printf_stats(seq, device->resync);
lc_seq_printf_stats(seq, device->act_log);
put_ldev(device);
}
- if (proc_details >= 2)
+ if (drbd_proc_details >= 2)
seq_printf(seq, "\tblocked on activity log: %d\n", atomic_read(&device->ap_actlog_cnt));
}
rcu_read_unlock();
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c7e95e6380fb..796eaf347dc0 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -332,7 +332,7 @@ static void drbd_free_pages(struct drbd_device *device, struct page *page, int i
if (page == NULL)
return;
- if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
+ if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * drbd_minor_count)
i = page_chain_free(page);
else {
struct page *tmp;
@@ -1100,7 +1100,10 @@ randomize:
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_lock(peer_device->device->state_mutex);
+ /* avoid a race with conn_request_state( C_DISCONNECTING ) */
+ spin_lock_irq(&connection->resource->req_lock);
set_bit(STATE_SENT, &connection->flags);
+ spin_unlock_irq(&connection->resource->req_lock);
idr_for_each_entry(&connection->peer_devices, peer_device, vnr)
mutex_unlock(peer_device->device->state_mutex);
@@ -1194,6 +1197,14 @@ static int decode_header(struct drbd_connection *connection, void *header, struc
return 0;
}
+static void drbd_unplug_all_devices(struct drbd_connection *connection)
+{
+ if (current->plug == &connection->receiver_plug) {
+ blk_finish_plug(&connection->receiver_plug);
+ blk_start_plug(&connection->receiver_plug);
+ } /* else: maybe just schedule() ?? */
+}
+
static int drbd_recv_header(struct drbd_connection *connection, struct packet_info *pi)
{
void *buffer = connection->data.rbuf;
@@ -1209,6 +1220,36 @@ static int drbd_recv_header(struct drbd_connection *connection, struct packet_in
return err;
}
+static int drbd_recv_header_maybe_unplug(struct drbd_connection *connection, struct packet_info *pi)
+{
+ void *buffer = connection->data.rbuf;
+ unsigned int size = drbd_header_size(connection);
+ int err;
+
+ err = drbd_recv_short(connection->data.socket, buffer, size, MSG_NOSIGNAL|MSG_DONTWAIT);
+ if (err != size) {
+ /* If we have nothing in the receive buffer now, to reduce
+ * application latency, try to drain the backend queues as
+ * quickly as possible, and let remote TCP know what we have
+ * received so far. */
+ if (err == -EAGAIN) {
+ drbd_tcp_quickack(connection->data.socket);
+ drbd_unplug_all_devices(connection);
+ }
+ if (err > 0) {
+ buffer += err;
+ size -= err;
+ }
+ err = drbd_recv_all_warn(connection, buffer, size);
+ if (err)
+ return err;
+ }
+
+ err = decode_header(connection, connection->data.rbuf, pi);
+ connection->last_received = jiffies;
+
+ return err;
+}
/* This is blkdev_issue_flush, but asynchronous.
* We want to submit to all component volumes in parallel,
* then wait for all completions.
@@ -1223,7 +1264,7 @@ struct one_flush_context {
struct issue_flush_context *ctx;
};
-void one_flush_endio(struct bio *bio)
+static void one_flush_endio(struct bio *bio)
{
struct one_flush_context *octx = bio->bi_private;
struct drbd_device *device = octx->device;
@@ -1265,7 +1306,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device;
octx->ctx = ctx;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx;
bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
@@ -1548,7 +1589,7 @@ next_bio:
}
/* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio;
@@ -4085,7 +4126,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
return config_unknown_volume(connection, pi);
device = peer_device->device;
- p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
+ p_uuid = kmalloc_array(UI_EXTENDED_SIZE, sizeof(*p_uuid), GFP_NOIO);
if (!p_uuid) {
drbd_err(device, "kmalloc of p_uuid failed\n");
return false;
@@ -4882,8 +4923,8 @@ static void drbdd(struct drbd_connection *connection)
struct data_cmd const *cmd;
drbd_thread_current_set_cpu(&connection->receiver);
- update_receiver_timing_details(connection, drbd_recv_header);
- if (drbd_recv_header(connection, &pi))
+ update_receiver_timing_details(connection, drbd_recv_header_maybe_unplug);
+ if (drbd_recv_header_maybe_unplug(connection, &pi))
goto err_out;
cmd = &drbd_cmd_handler[pi.cmd];
@@ -5375,8 +5416,11 @@ int drbd_receiver(struct drbd_thread *thi)
}
} while (h == 0);
- if (h > 0)
+ if (h > 0) {
+ blk_start_plug(&connection->receiver_plug);
drbdd(connection);
+ blk_finish_plug(&connection->receiver_plug);
+ }
conn_disconnect(connection);
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index f6e865b2d543..de8566e55334 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -36,14 +36,18 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector,
/* Update disk stats at start of I/O request */
static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9,
- &device->vdisk->part0);
+ struct request_queue *q = device->rq_queue;
+
+ generic_start_io_acct(q, bio_data_dir(req->master_bio),
+ req->i.size >> 9, &device->vdisk->part0);
}
/* Update disk stats when completing request upwards */
static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req)
{
- generic_end_io_acct(bio_data_dir(req->master_bio),
+ struct request_queue *q = device->rq_queue;
+
+ generic_end_io_acct(q, bio_data_dir(req->master_bio),
&device->vdisk->part0, req->start_jif);
}
@@ -1175,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else
type = DRBD_FAULT_DT_RD;
- bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(bio, device->ldev->backing_bdev);
/* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio.
@@ -1275,6 +1279,57 @@ static bool may_do_writes(struct drbd_device *device)
return s.disk == D_UP_TO_DATE || s.pdsk == D_UP_TO_DATE;
}
+struct drbd_plug_cb {
+ struct blk_plug_cb cb;
+ struct drbd_request *most_recent_req;
+ /* do we need more? */
+};
+
+static void drbd_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct drbd_plug_cb *plug = container_of(cb, struct drbd_plug_cb, cb);
+ struct drbd_resource *resource = plug->cb.data;
+ struct drbd_request *req = plug->most_recent_req;
+
+ kfree(cb);
+ if (!req)
+ return;
+
+ spin_lock_irq(&resource->req_lock);
+ /* In case the sender did not process it yet, raise the flag to
+ * have it followed with P_UNPLUG_REMOTE just after. */
+ req->rq_state |= RQ_UNPLUG;
+ /* but also queue a generic unplug */
+ drbd_queue_unplug(req->device);
+ kref_put(&req->kref, drbd_req_destroy);
+ spin_unlock_irq(&resource->req_lock);
+}
+
+static struct drbd_plug_cb* drbd_check_plugged(struct drbd_resource *resource)
+{
+ /* A lot of text to say
+ * return (struct drbd_plug_cb*)blk_check_plugged(); */
+ struct drbd_plug_cb *plug;
+ struct blk_plug_cb *cb = blk_check_plugged(drbd_unplug, resource, sizeof(*plug));
+
+ if (cb)
+ plug = container_of(cb, struct drbd_plug_cb, cb);
+ else
+ plug = NULL;
+ return plug;
+}
+
+static void drbd_update_plug(struct drbd_plug_cb *plug, struct drbd_request *req)
+{
+ struct drbd_request *tmp = plug->most_recent_req;
+ /* Will be sent to some peer.
+ * Remember to tag it with UNPLUG_REMOTE on unplug */
+ kref_get(&req->kref);
+ plug->most_recent_req = req;
+ if (tmp)
+ kref_put(&tmp->kref, drbd_req_destroy);
+}
+
static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req)
{
struct drbd_resource *resource = device->resource;
@@ -1347,6 +1402,12 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
no_remote = true;
}
+ if (no_remote == false) {
+ struct drbd_plug_cb *plug = drbd_check_plugged(resource);
+ if (plug)
+ drbd_update_plug(plug, req);
+ }
+
/* If it took the fast path in drbd_request_prepare, add it here.
* The slow path has added it already. */
if (list_empty(&req->req_pending_master_completion))
@@ -1395,7 +1456,10 @@ void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned l
static void submit_fast_path(struct drbd_device *device, struct list_head *incoming)
{
+ struct blk_plug plug;
struct drbd_request *req, *tmp;
+
+ blk_start_plug(&plug);
list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
const int rw = bio_data_dir(req->master_bio);
@@ -1413,6 +1477,7 @@ static void submit_fast_path(struct drbd_device *device, struct list_head *incom
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+ blk_finish_plug(&plug);
}
static bool prepare_al_transaction_nonblock(struct drbd_device *device,
@@ -1420,12 +1485,12 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
struct list_head *pending,
struct list_head *later)
{
- struct drbd_request *req, *tmp;
+ struct drbd_request *req;
int wake = 0;
int err;
spin_lock_irq(&device->al_lock);
- list_for_each_entry_safe(req, tmp, incoming, tl_requests) {
+ while ((req = list_first_entry_or_null(incoming, struct drbd_request, tl_requests))) {
err = drbd_al_begin_io_nonblock(device, &req->i);
if (err == -ENOBUFS)
break;
@@ -1442,17 +1507,20 @@ static bool prepare_al_transaction_nonblock(struct drbd_device *device,
return !list_empty(pending);
}
-void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
+static void send_and_submit_pending(struct drbd_device *device, struct list_head *pending)
{
- struct drbd_request *req, *tmp;
+ struct blk_plug plug;
+ struct drbd_request *req;
- list_for_each_entry_safe(req, tmp, pending, tl_requests) {
+ blk_start_plug(&plug);
+ while ((req = list_first_entry_or_null(pending, struct drbd_request, tl_requests))) {
req->rq_state |= RQ_IN_ACT_LOG;
req->in_actlog_jif = jiffies;
atomic_dec(&device->ap_actlog_cnt);
list_del_init(&req->tl_requests);
drbd_send_and_submit(device, req);
}
+ blk_finish_plug(&plug);
}
void do_submit(struct work_struct *ws)
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 9e1866ab238f..a2254f825601 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -212,6 +212,11 @@ enum drbd_req_state_bits {
/* Should call drbd_al_complete_io() for this request... */
__RQ_IN_ACT_LOG,
+ /* This was the most recent request during some blk_finish_plug()
+ * or its implicit from-schedule equivalent.
+ * We may use it as hint to send a P_UNPLUG_REMOTE */
+ __RQ_UNPLUG,
+
/* The peer has sent a retry ACK */
__RQ_POSTPONED,
@@ -249,6 +254,7 @@ enum drbd_req_state_bits {
#define RQ_WSAME (1UL << __RQ_WSAME)
#define RQ_UNMAP (1UL << __RQ_UNMAP)
#define RQ_IN_ACT_LOG (1UL << __RQ_IN_ACT_LOG)
+#define RQ_UNPLUG (1UL << __RQ_UNPLUG)
#define RQ_POSTPONED (1UL << __RQ_POSTPONED)
#define RQ_COMPLETION_SUSP (1UL << __RQ_COMPLETION_SUSP)
#define RQ_EXP_RECEIVE_ACK (1UL << __RQ_EXP_RECEIVE_ACK)
diff --git a/drivers/block/drbd/drbd_state.c b/drivers/block/drbd/drbd_state.c
index eea0c4aec978..0813c654c893 100644
--- a/drivers/block/drbd/drbd_state.c
+++ b/drivers/block/drbd/drbd_state.c
@@ -346,7 +346,7 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
enum drbd_role conn_highest_role(struct drbd_connection *connection)
{
- enum drbd_role role = R_UNKNOWN;
+ enum drbd_role role = R_SECONDARY;
struct drbd_peer_device *peer_device;
int vnr;
@@ -579,11 +579,14 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
unsigned long flags;
union drbd_state os, ns;
enum drbd_state_rv rv;
+ void *buffer = NULL;
init_completion(&done);
if (f & CS_SERIALIZE)
mutex_lock(device->state_mutex);
+ if (f & CS_INHIBIT_MD_IO)
+ buffer = drbd_md_get_buffer(device, __func__);
spin_lock_irqsave(&device->resource->req_lock, flags);
os = drbd_read_state(device);
@@ -636,6 +639,8 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
}
abort:
+ if (buffer)
+ drbd_md_put_buffer(device);
if (f & CS_SERIALIZE)
mutex_unlock(device->state_mutex);
@@ -664,6 +669,47 @@ _drbd_request_state(struct drbd_device *device, union drbd_state mask,
return rv;
}
+/*
+ * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
+ * there is IO in-flight: the transition into D_FAILED for detach purposes
+ * may get misinterpreted as actual IO error in a confused endio function.
+ *
+ * We wrap it all into wait_event(), to retry in case the drbd_req_state()
+ * returns SS_IN_TRANSIENT_STATE.
+ *
+ * To avoid potential deadlock with e.g. the receiver thread trying to grab
+ * drbd_md_get_buffer() while trying to get out of the "transient state", we
+ * need to grab and release the meta data buffer inside of that wait_event loop.
+ */
+static enum drbd_state_rv
+request_detach(struct drbd_device *device)
+{
+ return drbd_req_state(device, NS(disk, D_FAILED),
+ CS_VERBOSE | CS_ORDERED | CS_INHIBIT_MD_IO);
+}
+
+enum drbd_state_rv
+drbd_request_detach_interruptible(struct drbd_device *device)
+{
+ enum drbd_state_rv rv;
+ int ret;
+
+ drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
+ wait_event_interruptible(device->state_wait,
+ (rv = request_detach(device)) != SS_IN_TRANSIENT_STATE);
+ drbd_resume_io(device);
+
+ ret = wait_event_interruptible(device->misc_wait,
+ device->state.disk != D_FAILED);
+
+ if (rv == SS_IS_DISKLESS)
+ rv = SS_NOTHING_TO_DO;
+ if (ret)
+ rv = ERR_INTR;
+
+ return rv;
+}
+
enum drbd_state_rv
_drbd_request_state_holding_state_mutex(struct drbd_device *device, union drbd_state mask,
union drbd_state val, enum chg_state_flags f)
diff --git a/drivers/block/drbd/drbd_state.h b/drivers/block/drbd/drbd_state.h
index 6c9d5d4a8a75..0276c98fbbdd 100644
--- a/drivers/block/drbd/drbd_state.h
+++ b/drivers/block/drbd/drbd_state.h
@@ -71,6 +71,10 @@ enum chg_state_flags {
CS_DC_SUSP = 1 << 10,
CS_DC_MASK = CS_DC_ROLE + CS_DC_PEER + CS_DC_CONN + CS_DC_DISK + CS_DC_PDSK,
CS_IGN_OUTD_FAIL = 1 << 11,
+
+ /* Make sure no meta data IO is in flight, by calling
+ * drbd_md_get_buffer(). Used for graceful detach. */
+ CS_INHIBIT_MD_IO = 1 << 12,
};
/* drbd_dev_state and drbd_state are different types. This is to stress the
@@ -156,6 +160,10 @@ static inline int drbd_request_state(struct drbd_device *device,
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
}
+/* for use in adm_detach() (drbd_adm_detach(), drbd_adm_down()) */
+enum drbd_state_rv
+drbd_request_detach_interruptible(struct drbd_device *device);
+
enum drbd_role conn_highest_role(struct drbd_connection *connection);
enum drbd_role conn_highest_peer(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1d8726a8df34..03471b3fce86 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -65,6 +65,11 @@ void drbd_md_endio(struct bio *bio)
device = bio->bi_private;
device->md_io.error = blk_status_to_errno(bio->bi_status);
+ /* special case: drbd_md_read() during drbd_adm_attach() */
+ if (device->ldev)
+ put_ldev(device);
+ bio_put(bio);
+
/* We grabbed an extra reference in _drbd_md_sync_page_io() to be able
* to timeout on the lower level device, and eventually detach from it.
* If this io completion runs after that timeout expired, this
@@ -79,9 +84,6 @@ void drbd_md_endio(struct bio *bio)
drbd_md_put_buffer(device);
device->md_io.done = 1;
wake_up(&device->misc_wait);
- bio_put(bio);
- if (device->ldev) /* special case: drbd_md_read() during drbd_adm_attach() */
- put_ldev(device);
}
/* reads on behalf of the partner,
@@ -128,6 +130,14 @@ void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __releases(l
block_id = peer_req->block_id;
peer_req->flags &= ~EE_CALL_AL_COMPLETE_IO;
+ if (peer_req->flags & EE_WAS_ERROR) {
+ /* In protocol != C, we usually do not send write acks.
+ * In case of a write error, send the neg ack anyways. */
+ if (!__test_and_set_bit(__EE_SEND_WRITE_ACK, &peer_req->flags))
+ inc_unacked(device);
+ drbd_set_out_of_sync(device, peer_req->i.sector, peer_req->i.size);
+ }
+
spin_lock_irqsave(&device->resource->req_lock, flags);
device->writ_cnt += peer_req->i.size >> 9;
list_move_tail(&peer_req->w.list, &device->done_ee);
@@ -195,7 +205,8 @@ void drbd_peer_request_endio(struct bio *bio)
}
}
-void drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
+static void
+drbd_panic_after_delayed_completion_of_aborted_request(struct drbd_device *device)
{
panic("drbd%u %s/%u potential random memory corruption caused by delayed completion of aborted local request\n",
device->minor, device->resource->name, device->vnr);
@@ -1382,18 +1393,22 @@ static int drbd_send_barrier(struct drbd_connection *connection)
return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
}
+static int pd_send_unplug_remote(struct drbd_peer_device *pd)
+{
+ struct drbd_socket *sock = &pd->connection->data;
+ if (!drbd_prepare_command(pd, sock))
+ return -EIO;
+ return drbd_send_command(pd, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+}
+
int w_send_write_hint(struct drbd_work *w, int cancel)
{
struct drbd_device *device =
container_of(w, struct drbd_device, unplug_work);
- struct drbd_socket *sock;
if (cancel)
return 0;
- sock = &first_peer_device(device)->connection->data;
- if (!drbd_prepare_command(first_peer_device(device), sock))
- return -EIO;
- return drbd_send_command(first_peer_device(device), sock, P_UNPLUG_REMOTE, 0, NULL, 0);
+ return pd_send_unplug_remote(first_peer_device(device));
}
static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
@@ -1455,6 +1470,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
+ bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
@@ -1470,6 +1486,9 @@ int w_send_dblock(struct drbd_work *w, int cancel)
err = drbd_send_dblock(peer_device, req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ if (do_send_unplug && !err)
+ pd_send_unplug_remote(peer_device);
+
return err;
}
@@ -1484,6 +1503,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
struct drbd_device *device = req->device;
struct drbd_peer_device *const peer_device = first_peer_device(device);
struct drbd_connection *connection = peer_device->connection;
+ bool do_send_unplug = req->rq_state & RQ_UNPLUG;
int err;
if (unlikely(cancel)) {
@@ -1501,6 +1521,9 @@ int w_send_read_req(struct drbd_work *w, int cancel)
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
+ if (do_send_unplug && !err)
+ pd_send_unplug_remote(peer_device);
+
return err;
}
@@ -1513,7 +1536,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_al_begin_io(device, &req->i);
drbd_req_make_private_bio(req, req->master_bio);
- req->private_bio->bi_bdev = device->ldev->backing_bdev;
+ bio_set_dev(req->private_bio, device->ldev->backing_bdev);
generic_make_request(req->private_bio);
return 0;
@@ -1733,6 +1756,11 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
return;
}
+ if (!connection) {
+ drbd_err(device, "No connection to peer, aborting!\n");
+ return;
+ }
+
if (!test_bit(B_RS_H_DONE, &device->flags)) {
if (side == C_SYNC_TARGET) {
/* Since application IO was locked out during C_WF_BITMAP_T and
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9c00f29e40c1..60c086a53609 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1);
- bio.bi_bdev = bdev;
+ bio_set_dev(&bio, bdev);
bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ef8334949b42..85de67334695 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -213,16 +213,18 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
*/
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
- if (use_dio)
+ if (use_dio) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- else
+ } else {
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
+ }
blk_mq_unfreeze_queue(lo->lo_queue);
}
static int
-figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
- loff_t logical_blocksize)
+figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
{
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size;
@@ -234,12 +236,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
- lo->lo_logical_blocksize = logical_blocksize;
- blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
- blk_queue_logical_block_size(lo->lo_queue,
- lo->lo_logical_blocksize);
- }
set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */
@@ -467,12 +463,21 @@ static void lo_complete_rq(struct request *rq)
blk_mq_end_request(rq, cmd->ret < 0 ? BLK_STS_IOERR : BLK_STS_OK);
}
+static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
+{
+ if (!atomic_dec_and_test(&cmd->ref))
+ return;
+ kfree(cmd->bvec);
+ cmd->bvec = NULL;
+ blk_mq_complete_request(cmd->rq);
+}
+
static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
cmd->ret = ret;
- blk_mq_complete_request(cmd->rq);
+ lo_rw_aio_do_completion(cmd);
}
static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
@@ -480,22 +485,51 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
{
struct iov_iter iter;
struct bio_vec *bvec;
- struct bio *bio = cmd->rq->bio;
+ struct request *rq = cmd->rq;
+ struct bio *bio = rq->bio;
struct file *file = lo->lo_backing_file;
+ unsigned int offset;
+ int segments = 0;
int ret;
- /* nomerge for loop request queue */
- WARN_ON(cmd->rq->bio != cmd->rq->biotail);
+ if (rq->bio != rq->biotail) {
+ struct req_iterator iter;
+ struct bio_vec tmp;
+
+ __rq_for_each_bio(bio, rq)
+ segments += bio_segments(bio);
+ bvec = kmalloc(sizeof(struct bio_vec) * segments, GFP_NOIO);
+ if (!bvec)
+ return -EIO;
+ cmd->bvec = bvec;
+
+ /*
+ * The bios of the request may be started from the middle of
+ * the 'bvec' because of bio splitting, so we can't directly
+ * copy bio->bi_iov_vec to new bvec. The rq_for_each_segment
+ * API will take care of all details for us.
+ */
+ rq_for_each_segment(tmp, rq, iter) {
+ *bvec = tmp;
+ bvec++;
+ }
+ bvec = cmd->bvec;
+ offset = 0;
+ } else {
+ /*
+ * Same here, this bio may be started from the middle of the
+ * 'bvec' because of bio splitting, so offset from the bvec
+ * must be passed to iov iterator
+ */
+ offset = bio->bi_iter.bi_bvec_done;
+ bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ segments = bio_segments(bio);
+ }
+ atomic_set(&cmd->ref, 2);
- bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
- bio_segments(bio), blk_rq_bytes(cmd->rq));
- /*
- * This bio may be started from the middle of the 'bvec'
- * because of bio splitting, so offset from the bvec must
- * be passed to iov iterator
- */
- iter.iov_offset = bio->bi_iter.bi_bvec_done;
+ segments, blk_rq_bytes(rq));
+ iter.iov_offset = offset;
cmd->iocb.ki_pos = pos;
cmd->iocb.ki_filp = file;
@@ -507,6 +541,8 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
else
ret = call_read_iter(file, &cmd->iocb, &iter);
+ lo_rw_aio_do_completion(cmd);
+
if (ret != -EIOCBQUEUED)
cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
return 0;
@@ -553,74 +589,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
}
}
-struct switch_request {
- struct file *file;
- struct completion wait;
-};
-
static inline void loop_update_dio(struct loop_device *lo)
{
__loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
lo->use_dio);
}
-/*
- * Do the actual switch; called from the BIO completion routine
- */
-static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
-{
- struct file *file = p->file;
- struct file *old_file = lo->lo_backing_file;
- struct address_space *mapping;
-
- /* if no new file, only flush of queued bios requested */
- if (!file)
- return;
-
- mapping = file->f_mapping;
- mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
- lo->lo_backing_file = file;
- lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
- mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
- lo->old_gfp_mask = mapping_gfp_mask(mapping);
- mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
- loop_update_dio(lo);
-}
-
-/*
- * loop_switch performs the hard work of switching a backing store.
- * First it needs to flush existing IO, it does this by sending a magic
- * BIO down the pipe. The completion of this BIO does the actual switch.
- */
-static int loop_switch(struct loop_device *lo, struct file *file)
-{
- struct switch_request w;
-
- w.file = file;
-
- /* freeze queue and wait for completion of scheduled requests */
- blk_mq_freeze_queue(lo->lo_queue);
-
- /* do the switch action */
- do_loop_switch(lo, &w);
-
- /* unfreeze */
- blk_mq_unfreeze_queue(lo->lo_queue);
-
- return 0;
-}
-
-/*
- * Helper to flush the IOs in loop, but keeping loop thread running
- */
-static int loop_flush(struct loop_device *lo)
-{
- /* loop not yet configured, no running thread, nothing to flush */
- if (lo->lo_state != Lo_bound)
- return 0;
- return loop_switch(lo, NULL);
-}
-
static void loop_reread_partitions(struct loop_device *lo,
struct block_device *bdev)
{
@@ -685,9 +659,14 @@ static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
goto out_putf;
/* and ... switch */
- error = loop_switch(lo, file);
- if (error)
- goto out_putf;
+ blk_mq_freeze_queue(lo->lo_queue);
+ mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+ lo->lo_backing_file = file;
+ lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
+ mapping_set_gfp_mask(file->f_mapping,
+ lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+ loop_update_dio(lo);
+ blk_mq_unfreeze_queue(lo->lo_queue);
fput(old_file);
if (lo->lo_flags & LO_FLAGS_PARTSCAN)
@@ -820,7 +799,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue;
- int lo_bits = 9;
/*
* We use punch hole to reclaim the free space used by the
@@ -840,11 +818,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
- if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
- lo_bits = blksize_bits(lo->lo_logical_blocksize);
- blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits);
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
}
@@ -877,7 +853,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
struct file *file, *f;
struct inode *inode;
struct address_space *mapping;
- unsigned lo_blocksize;
int lo_flags = 0;
int error;
loff_t size;
@@ -921,9 +896,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
!file->f_op->write_iter)
lo_flags |= LO_FLAGS_READ_ONLY;
- lo_blocksize = S_ISBLK(inode->i_mode) ?
- inode->i_bdev->bd_block_size : PAGE_SIZE;
-
error = -EFBIG;
size = get_loop_size(lo, file);
if ((loff_t)(sector_t)size != size)
@@ -937,8 +909,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
lo->use_dio = false;
- lo->lo_blocksize = lo_blocksize;
- lo->lo_logical_blocksize = 512;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
lo->lo_backing_file = file;
@@ -958,7 +928,8 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
/* let user-space know about the new size */
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- set_blocksize(bdev, lo_blocksize);
+ set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
+ block_size(inode->i_bdev) : PAGE_SIZE);
lo->lo_state = Lo_bound;
if (part_shift)
@@ -1064,6 +1035,9 @@ static int loop_clr_fd(struct loop_device *lo)
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+ blk_queue_logical_block_size(lo->lo_queue, 512);
+ blk_queue_physical_block_size(lo->lo_queue, 512);
+ blk_queue_io_min(lo->lo_queue, 512);
if (bdev) {
bdput(bdev);
invalidate_bdev(bdev);
@@ -1104,7 +1078,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err;
struct loop_func_table *xfer;
kuid_t uid = current_uid();
- int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) &&
@@ -1137,26 +1110,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err)
goto exit;
- if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
- if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
- lo->lo_logical_blocksize = 512;
- lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
- if (LO_INFO_BLOCKSIZE(info) != 512 &&
- LO_INFO_BLOCKSIZE(info) != 1024 &&
- LO_INFO_BLOCKSIZE(info) != 2048 &&
- LO_INFO_BLOCKSIZE(info) != 4096)
- return -EINVAL;
- if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
- return -EINVAL;
- }
-
if (lo->lo_offset != info->lo_offset ||
- lo->lo_sizelimit != info->lo_sizelimit ||
- lo->lo_flags != lo_flags ||
- ((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
- lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
- if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
- LO_INFO_BLOCKSIZE(info))) {
+ lo->lo_sizelimit != info->lo_sizelimit) {
+ if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
err = -EFBIG;
goto exit;
}
@@ -1348,8 +1304,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO;
- return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit,
- lo->lo_logical_blocksize);
+ return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
}
static int loop_set_dio(struct loop_device *lo, unsigned long arg)
@@ -1366,6 +1321,26 @@ static int loop_set_dio(struct loop_device *lo, unsigned long arg)
return error;
}
+static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+{
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+
+ if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
+ return -EINVAL;
+
+ blk_mq_freeze_queue(lo->lo_queue);
+
+ blk_queue_logical_block_size(lo->lo_queue, arg);
+ blk_queue_physical_block_size(lo->lo_queue, arg);
+ blk_queue_io_min(lo->lo_queue, arg);
+ loop_update_dio(lo);
+
+ blk_mq_unfreeze_queue(lo->lo_queue);
+
+ return 0;
+}
+
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -1414,6 +1389,11 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_dio(lo, arg);
break;
+ case LOOP_SET_BLOCK_SIZE:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_block_size(lo, arg);
+ break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
@@ -1613,12 +1593,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
err = loop_clr_fd(lo);
if (!err)
return;
- } else {
+ } else if (lo->lo_state == Lo_bound) {
/*
* Otherwise keep thread (if running) and config,
* but flush possible ongoing bios in thread.
*/
- loop_flush(lo);
+ blk_mq_freeze_queue(lo->lo_queue);
+ blk_mq_unfreeze_queue(lo->lo_queue);
}
mutex_unlock(&lo->lo_ctl_mutex);
@@ -1800,9 +1781,13 @@ static int loop_add(struct loop_device **l, int i)
}
lo->lo_queue->queuedata = lo;
+ blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
+
/*
- * It doesn't make sense to enable merge because the I/O
- * submitted to backing file is handled page by page.
+ * By default, we do buffer IO, so it doesn't make sense to enable
+ * merge because the I/O submitted to backing file is handled page by
+ * page. For directio mode, merge does help to dispatch bigger request
+ * to underlayer disk. We will enable merge once directio is enabled.
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
@@ -1996,10 +1981,6 @@ static int __init loop_init(void)
struct loop_device *lo;
int err;
- err = misc_register(&loop_misc);
- if (err < 0)
- return err;
-
part_shift = 0;
if (max_part > 0) {
part_shift = fls(max_part);
@@ -2017,12 +1998,12 @@ static int __init loop_init(void)
if ((1UL << part_shift) > DISK_MAX_PARTS) {
err = -EINVAL;
- goto misc_out;
+ goto err_out;
}
if (max_loop > 1UL << (MINORBITS - part_shift)) {
err = -EINVAL;
- goto misc_out;
+ goto err_out;
}
/*
@@ -2041,6 +2022,11 @@ static int __init loop_init(void)
range = 1UL << MINORBITS;
}
+ err = misc_register(&loop_misc);
+ if (err < 0)
+ goto err_out;
+
+
if (register_blkdev(LOOP_MAJOR, "loop")) {
err = -EIO;
goto misc_out;
@@ -2060,6 +2046,7 @@ static int __init loop_init(void)
misc_out:
misc_deregister(&loop_misc);
+err_out:
return err;
}
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 2c096b9a17b8..f68c1d50802f 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -48,8 +48,6 @@ struct loop_device {
struct file * lo_backing_file;
struct block_device *lo_device;
- unsigned lo_blocksize;
- unsigned lo_logical_blocksize;
void *key_data;
gfp_t old_gfp_mask;
@@ -69,10 +67,13 @@ struct loop_device {
struct loop_cmd {
struct kthread_work work;
struct request *rq;
- struct list_head list;
- bool use_aio; /* use AIO interface to handle I/O */
+ union {
+ bool use_aio; /* use AIO interface to handle I/O */
+ atomic_t ref; /* only for aio */
+ };
long ret;
struct kiocb iocb;
+ struct bio_vec *bvec;
};
/* Support for loadable transfer modules */
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 5bdf923294a5..2aa87cbdede0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -128,7 +128,7 @@ static struct dentry *nbd_dbg_dir;
#define NBD_MAGIC 0x68797548
static unsigned int nbds_max = 16;
-static int max_part;
+static int max_part = 16;
static struct workqueue_struct *recv_workqueue;
static int part_shift;
@@ -165,7 +165,7 @@ static ssize_t pid_show(struct device *dev,
return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
}
-static struct device_attribute pid_attr = {
+static const struct device_attribute pid_attr = {
.attr = { .name = "pid", .mode = S_IRUGO},
.show = pid_show,
};
@@ -1584,6 +1584,15 @@ again:
}
} else {
nbd = idr_find(&nbd_index_idr, index);
+ if (!nbd) {
+ ret = nbd_dev_add(index);
+ if (ret < 0) {
+ mutex_unlock(&nbd_index_mutex);
+ printk(KERN_ERR "nbd: failed to add new device\n");
+ return ret;
+ }
+ nbd = idr_find(&nbd_index_idr, index);
+ }
}
if (!nbd) {
printk(KERN_ERR "nbd: couldn't find device at index %d\n",
@@ -2137,4 +2146,4 @@ MODULE_LICENSE("GPL");
module_param(nbds_max, int, 0444);
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
module_param(max_part, int, 0444);
-MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
+MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 85c24cace973..8042c26ea9e6 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,3 +1,7 @@
+/*
+ * Add configfs and memory store: Kyungchan Koh <kkc6196@fb.com> and
+ * Shaohua Li <shli@fb.com>
+ */
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -9,27 +13,110 @@
#include <linux/blk-mq.h>
#include <linux/hrtimer.h>
#include <linux/lightnvm.h>
+#include <linux/configfs.h>
+#include <linux/badblocks.h>
+
+#define SECTOR_SHIFT 9
+#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
+#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
+#define SECTOR_SIZE (1 << SECTOR_SHIFT)
+#define SECTOR_MASK (PAGE_SECTORS - 1)
+
+#define FREE_BATCH 16
+
+#define TICKS_PER_SEC 50ULL
+#define TIMER_INTERVAL (NSEC_PER_SEC / TICKS_PER_SEC)
+
+static inline u64 mb_per_tick(int mbps)
+{
+ return (1 << 20) / TICKS_PER_SEC * ((u64) mbps);
+}
struct nullb_cmd {
struct list_head list;
struct llist_node ll_list;
- struct call_single_data csd;
+ call_single_data_t csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
struct nullb_queue *nq;
struct hrtimer timer;
+ blk_status_t error;
};
struct nullb_queue {
unsigned long *tag_map;
wait_queue_head_t wait;
unsigned int queue_depth;
+ struct nullb_device *dev;
struct nullb_cmd *cmds;
};
+/*
+ * Status flags for nullb_device.
+ *
+ * CONFIGURED: Device has been configured and turned on. Cannot reconfigure.
+ * UP: Device is currently on and visible in userspace.
+ * THROTTLED: Device is being throttled.
+ * CACHE: Device is using a write-back cache.
+ */
+enum nullb_device_flags {
+ NULLB_DEV_FL_CONFIGURED = 0,
+ NULLB_DEV_FL_UP = 1,
+ NULLB_DEV_FL_THROTTLED = 2,
+ NULLB_DEV_FL_CACHE = 3,
+};
+
+/*
+ * nullb_page is a page in memory for nullb devices.
+ *
+ * @page: The page holding the data.
+ * @bitmap: The bitmap represents which sector in the page has data.
+ * Each bit represents one block size. For example, sector 8
+ * will use the 7th bit
+ * The highest 2 bits of bitmap are for special purpose. LOCK means the cache
+ * page is being flushing to storage. FREE means the cache page is freed and
+ * should be skipped from flushing to storage. Please see
+ * null_make_cache_space
+ */
+struct nullb_page {
+ struct page *page;
+ unsigned long bitmap;
+};
+#define NULLB_PAGE_LOCK (sizeof(unsigned long) * 8 - 1)
+#define NULLB_PAGE_FREE (sizeof(unsigned long) * 8 - 2)
+
+struct nullb_device {
+ struct nullb *nullb;
+ struct config_item item;
+ struct radix_tree_root data; /* data stored in the disk */
+ struct radix_tree_root cache; /* disk cache data */
+ unsigned long flags; /* device flags */
+ unsigned int curr_cache;
+ struct badblocks badblocks;
+
+ unsigned long size; /* device size in MB */
+ unsigned long completion_nsec; /* time in ns to complete a request */
+ unsigned long cache_size; /* disk cache size in MB */
+ unsigned int submit_queues; /* number of submission queues */
+ unsigned int home_node; /* home node for the device */
+ unsigned int queue_mode; /* block interface */
+ unsigned int blocksize; /* block size */
+ unsigned int irqmode; /* IRQ completion handler */
+ unsigned int hw_queue_depth; /* queue depth */
+ unsigned int index; /* index of the disk, only valid with a disk */
+ unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */
+ bool use_lightnvm; /* register as a LightNVM device */
+ bool blocking; /* blocking blk-mq device */
+ bool use_per_node_hctx; /* use per-node allocation for hardware context */
+ bool power; /* power on/off the device */
+ bool memory_backed; /* if data is stored in memory */
+ bool discard; /* if support discard */
+};
+
struct nullb {
+ struct nullb_device *dev;
struct list_head list;
unsigned int index;
struct request_queue *q;
@@ -37,8 +124,10 @@ struct nullb {
struct nvm_dev *ndev;
struct blk_mq_tag_set *tag_set;
struct blk_mq_tag_set __tag_set;
- struct hrtimer timer;
unsigned int queue_depth;
+ atomic_long_t cur_bytes;
+ struct hrtimer bw_timer;
+ unsigned long cache_flush_pos;
spinlock_t lock;
struct nullb_queue *queues;
@@ -49,7 +138,7 @@ struct nullb {
static LIST_HEAD(nullb_list);
static struct mutex lock;
static int null_major;
-static int nullb_indexes;
+static DEFINE_IDA(nullb_indexes);
static struct kmem_cache *ppa_cache;
static struct blk_mq_tag_set tag_set;
@@ -65,15 +154,15 @@ enum {
NULL_Q_MQ = 2,
};
-static int submit_queues;
-module_param(submit_queues, int, S_IRUGO);
+static int g_submit_queues = 1;
+module_param_named(submit_queues, g_submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
-static int home_node = NUMA_NO_NODE;
-module_param(home_node, int, S_IRUGO);
+static int g_home_node = NUMA_NO_NODE;
+module_param_named(home_node, g_home_node, int, S_IRUGO);
MODULE_PARM_DESC(home_node, "Home node for the device");
-static int queue_mode = NULL_Q_MQ;
+static int g_queue_mode = NULL_Q_MQ;
static int null_param_store_val(const char *str, int *val, int min, int max)
{
@@ -92,7 +181,7 @@ static int null_param_store_val(const char *str, int *val, int min, int max)
static int null_set_queue_mode(const char *str, const struct kernel_param *kp)
{
- return null_param_store_val(str, &queue_mode, NULL_Q_BIO, NULL_Q_MQ);
+ return null_param_store_val(str, &g_queue_mode, NULL_Q_BIO, NULL_Q_MQ);
}
static const struct kernel_param_ops null_queue_mode_param_ops = {
@@ -100,38 +189,38 @@ static const struct kernel_param_ops null_queue_mode_param_ops = {
.get = param_get_int,
};
-device_param_cb(queue_mode, &null_queue_mode_param_ops, &queue_mode, S_IRUGO);
+device_param_cb(queue_mode, &null_queue_mode_param_ops, &g_queue_mode, S_IRUGO);
MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
-static int gb = 250;
-module_param(gb, int, S_IRUGO);
+static int g_gb = 250;
+module_param_named(gb, g_gb, int, S_IRUGO);
MODULE_PARM_DESC(gb, "Size in GB");
-static int bs = 512;
-module_param(bs, int, S_IRUGO);
+static int g_bs = 512;
+module_param_named(bs, g_bs, int, S_IRUGO);
MODULE_PARM_DESC(bs, "Block size (in bytes)");
static int nr_devices = 1;
module_param(nr_devices, int, S_IRUGO);
MODULE_PARM_DESC(nr_devices, "Number of devices to register");
-static bool use_lightnvm;
-module_param(use_lightnvm, bool, S_IRUGO);
+static bool g_use_lightnvm;
+module_param_named(use_lightnvm, g_use_lightnvm, bool, S_IRUGO);
MODULE_PARM_DESC(use_lightnvm, "Register as a LightNVM device");
-static bool blocking;
-module_param(blocking, bool, S_IRUGO);
+static bool g_blocking;
+module_param_named(blocking, g_blocking, bool, S_IRUGO);
MODULE_PARM_DESC(blocking, "Register as a blocking blk-mq driver device");
static bool shared_tags;
module_param(shared_tags, bool, S_IRUGO);
MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq");
-static int irqmode = NULL_IRQ_SOFTIRQ;
+static int g_irqmode = NULL_IRQ_SOFTIRQ;
static int null_set_irqmode(const char *str, const struct kernel_param *kp)
{
- return null_param_store_val(str, &irqmode, NULL_IRQ_NONE,
+ return null_param_store_val(str, &g_irqmode, NULL_IRQ_NONE,
NULL_IRQ_TIMER);
}
@@ -140,21 +229,358 @@ static const struct kernel_param_ops null_irqmode_param_ops = {
.get = param_get_int,
};
-device_param_cb(irqmode, &null_irqmode_param_ops, &irqmode, S_IRUGO);
+device_param_cb(irqmode, &null_irqmode_param_ops, &g_irqmode, S_IRUGO);
MODULE_PARM_DESC(irqmode, "IRQ completion handler. 0-none, 1-softirq, 2-timer");
-static unsigned long completion_nsec = 10000;
-module_param(completion_nsec, ulong, S_IRUGO);
+static unsigned long g_completion_nsec = 10000;
+module_param_named(completion_nsec, g_completion_nsec, ulong, S_IRUGO);
MODULE_PARM_DESC(completion_nsec, "Time in ns to complete a request in hardware. Default: 10,000ns");
-static int hw_queue_depth = 64;
-module_param(hw_queue_depth, int, S_IRUGO);
+static int g_hw_queue_depth = 64;
+module_param_named(hw_queue_depth, g_hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = false;
-module_param(use_per_node_hctx, bool, S_IRUGO);
+static bool g_use_per_node_hctx;
+module_param_named(use_per_node_hctx, g_use_per_node_hctx, bool, S_IRUGO);
MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
+static struct nullb_device *null_alloc_dev(void);
+static void null_free_dev(struct nullb_device *dev);
+static void null_del_dev(struct nullb *nullb);
+static int null_add_dev(struct nullb_device *dev);
+static void null_free_device_storage(struct nullb_device *dev, bool is_cache);
+
+static inline struct nullb_device *to_nullb_device(struct config_item *item)
+{
+ return item ? container_of(item, struct nullb_device, item) : NULL;
+}
+
+static inline ssize_t nullb_device_uint_attr_show(unsigned int val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static inline ssize_t nullb_device_ulong_attr_show(unsigned long val,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%lu\n", val);
+}
+
+static inline ssize_t nullb_device_bool_attr_show(bool val, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%u\n", val);
+}
+
+static ssize_t nullb_device_uint_attr_store(unsigned int *val,
+ const char *page, size_t count)
+{
+ unsigned int tmp;
+ int result;
+
+ result = kstrtouint(page, 0, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t nullb_device_ulong_attr_store(unsigned long *val,
+ const char *page, size_t count)
+{
+ int result;
+ unsigned long tmp;
+
+ result = kstrtoul(page, 0, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+static ssize_t nullb_device_bool_attr_store(bool *val, const char *page,
+ size_t count)
+{
+ bool tmp;
+ int result;
+
+ result = kstrtobool(page, &tmp);
+ if (result)
+ return result;
+
+ *val = tmp;
+ return count;
+}
+
+/* The following macro should only be used with TYPE = {uint, ulong, bool}. */
+#define NULLB_DEVICE_ATTR(NAME, TYPE) \
+static ssize_t \
+nullb_device_##NAME##_show(struct config_item *item, char *page) \
+{ \
+ return nullb_device_##TYPE##_attr_show( \
+ to_nullb_device(item)->NAME, page); \
+} \
+static ssize_t \
+nullb_device_##NAME##_store(struct config_item *item, const char *page, \
+ size_t count) \
+{ \
+ if (test_bit(NULLB_DEV_FL_CONFIGURED, &to_nullb_device(item)->flags)) \
+ return -EBUSY; \
+ return nullb_device_##TYPE##_attr_store( \
+ &to_nullb_device(item)->NAME, page, count); \
+} \
+CONFIGFS_ATTR(nullb_device_, NAME);
+
+NULLB_DEVICE_ATTR(size, ulong);
+NULLB_DEVICE_ATTR(completion_nsec, ulong);
+NULLB_DEVICE_ATTR(submit_queues, uint);
+NULLB_DEVICE_ATTR(home_node, uint);
+NULLB_DEVICE_ATTR(queue_mode, uint);
+NULLB_DEVICE_ATTR(blocksize, uint);
+NULLB_DEVICE_ATTR(irqmode, uint);
+NULLB_DEVICE_ATTR(hw_queue_depth, uint);
+NULLB_DEVICE_ATTR(index, uint);
+NULLB_DEVICE_ATTR(use_lightnvm, bool);
+NULLB_DEVICE_ATTR(blocking, bool);
+NULLB_DEVICE_ATTR(use_per_node_hctx, bool);
+NULLB_DEVICE_ATTR(memory_backed, bool);
+NULLB_DEVICE_ATTR(discard, bool);
+NULLB_DEVICE_ATTR(mbps, uint);
+NULLB_DEVICE_ATTR(cache_size, ulong);
+
+static ssize_t nullb_device_power_show(struct config_item *item, char *page)
+{
+ return nullb_device_bool_attr_show(to_nullb_device(item)->power, page);
+}
+
+static ssize_t nullb_device_power_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+ bool newp = false;
+ ssize_t ret;
+
+ ret = nullb_device_bool_attr_store(&newp, page, count);
+ if (ret < 0)
+ return ret;
+
+ if (!dev->power && newp) {
+ if (test_and_set_bit(NULLB_DEV_FL_UP, &dev->flags))
+ return count;
+ if (null_add_dev(dev)) {
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ return -ENOMEM;
+ }
+
+ set_bit(NULLB_DEV_FL_CONFIGURED, &dev->flags);
+ dev->power = newp;
+ } else if (dev->power && !newp) {
+ mutex_lock(&lock);
+ dev->power = newp;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ clear_bit(NULLB_DEV_FL_UP, &dev->flags);
+ }
+
+ return count;
+}
+
+CONFIGFS_ATTR(nullb_device_, power);
+
+static ssize_t nullb_device_badblocks_show(struct config_item *item, char *page)
+{
+ struct nullb_device *t_dev = to_nullb_device(item);
+
+ return badblocks_show(&t_dev->badblocks, page, 0);
+}
+
+static ssize_t nullb_device_badblocks_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nullb_device *t_dev = to_nullb_device(item);
+ char *orig, *buf, *tmp;
+ u64 start, end;
+ int ret;
+
+ orig = kstrndup(page, count, GFP_KERNEL);
+ if (!orig)
+ return -ENOMEM;
+
+ buf = strstrip(orig);
+
+ ret = -EINVAL;
+ if (buf[0] != '+' && buf[0] != '-')
+ goto out;
+ tmp = strchr(&buf[1], '-');
+ if (!tmp)
+ goto out;
+ *tmp = '\0';
+ ret = kstrtoull(buf + 1, 0, &start);
+ if (ret)
+ goto out;
+ ret = kstrtoull(tmp + 1, 0, &end);
+ if (ret)
+ goto out;
+ ret = -EINVAL;
+ if (start > end)
+ goto out;
+ /* enable badblocks */
+ cmpxchg(&t_dev->badblocks.shift, -1, 0);
+ if (buf[0] == '+')
+ ret = badblocks_set(&t_dev->badblocks, start,
+ end - start + 1, 1);
+ else
+ ret = badblocks_clear(&t_dev->badblocks, start,
+ end - start + 1);
+ if (ret == 0)
+ ret = count;
+out:
+ kfree(orig);
+ return ret;
+}
+CONFIGFS_ATTR(nullb_device_, badblocks);
+
+static struct configfs_attribute *nullb_device_attrs[] = {
+ &nullb_device_attr_size,
+ &nullb_device_attr_completion_nsec,
+ &nullb_device_attr_submit_queues,
+ &nullb_device_attr_home_node,
+ &nullb_device_attr_queue_mode,
+ &nullb_device_attr_blocksize,
+ &nullb_device_attr_irqmode,
+ &nullb_device_attr_hw_queue_depth,
+ &nullb_device_attr_index,
+ &nullb_device_attr_use_lightnvm,
+ &nullb_device_attr_blocking,
+ &nullb_device_attr_use_per_node_hctx,
+ &nullb_device_attr_power,
+ &nullb_device_attr_memory_backed,
+ &nullb_device_attr_discard,
+ &nullb_device_attr_mbps,
+ &nullb_device_attr_cache_size,
+ &nullb_device_attr_badblocks,
+ NULL,
+};
+
+static void nullb_device_release(struct config_item *item)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+
+ badblocks_exit(&dev->badblocks);
+ null_free_device_storage(dev, false);
+ null_free_dev(dev);
+}
+
+static struct configfs_item_operations nullb_device_ops = {
+ .release = nullb_device_release,
+};
+
+static struct config_item_type nullb_device_type = {
+ .ct_item_ops = &nullb_device_ops,
+ .ct_attrs = nullb_device_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct
+config_item *nullb_group_make_item(struct config_group *group, const char *name)
+{
+ struct nullb_device *dev;
+
+ dev = null_alloc_dev();
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ config_item_init_type_name(&dev->item, name, &nullb_device_type);
+
+ return &dev->item;
+}
+
+static void
+nullb_group_drop_item(struct config_group *group, struct config_item *item)
+{
+ struct nullb_device *dev = to_nullb_device(item);
+
+ if (test_and_clear_bit(NULLB_DEV_FL_UP, &dev->flags)) {
+ mutex_lock(&lock);
+ dev->power = false;
+ null_del_dev(dev->nullb);
+ mutex_unlock(&lock);
+ }
+
+ config_item_put(item);
+}
+
+static ssize_t memb_group_features_show(struct config_item *item, char *page)
+{
+ return snprintf(page, PAGE_SIZE, "memory_backed,discard,bandwidth,cache,badblocks\n");
+}
+
+CONFIGFS_ATTR_RO(memb_group_, features);
+
+static struct configfs_attribute *nullb_group_attrs[] = {
+ &memb_group_attr_features,
+ NULL,
+};
+
+static struct configfs_group_operations nullb_group_ops = {
+ .make_item = nullb_group_make_item,
+ .drop_item = nullb_group_drop_item,
+};
+
+static struct config_item_type nullb_group_type = {
+ .ct_group_ops = &nullb_group_ops,
+ .ct_attrs = nullb_group_attrs,
+ .ct_owner = THIS_MODULE,
+};
+
+static struct configfs_subsystem nullb_subsys = {
+ .su_group = {
+ .cg_item = {
+ .ci_namebuf = "nullb",
+ .ci_type = &nullb_group_type,
+ },
+ },
+};
+
+static inline int null_cache_active(struct nullb *nullb)
+{
+ return test_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+}
+
+static struct nullb_device *null_alloc_dev(void)
+{
+ struct nullb_device *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+ INIT_RADIX_TREE(&dev->data, GFP_ATOMIC);
+ INIT_RADIX_TREE(&dev->cache, GFP_ATOMIC);
+ if (badblocks_init(&dev->badblocks, 0)) {
+ kfree(dev);
+ return NULL;
+ }
+
+ dev->size = g_gb * 1024;
+ dev->completion_nsec = g_completion_nsec;
+ dev->submit_queues = g_submit_queues;
+ dev->home_node = g_home_node;
+ dev->queue_mode = g_queue_mode;
+ dev->blocksize = g_bs;
+ dev->irqmode = g_irqmode;
+ dev->hw_queue_depth = g_hw_queue_depth;
+ dev->use_lightnvm = g_use_lightnvm;
+ dev->blocking = g_blocking;
+ dev->use_per_node_hctx = g_use_per_node_hctx;
+ return dev;
+}
+
+static void null_free_dev(struct nullb_device *dev)
+{
+ kfree(dev);
+}
+
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
clear_bit_unlock(tag, nq->tag_map);
@@ -193,7 +619,7 @@ static struct nullb_cmd *__alloc_cmd(struct nullb_queue *nq)
cmd = &nq->cmds[tag];
cmd->tag = tag;
cmd->nq = nq;
- if (irqmode == NULL_IRQ_TIMER) {
+ if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
@@ -229,19 +655,21 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
+ int queue_mode = cmd->nq->dev->queue_mode;
if (cmd->rq)
q = cmd->rq->q;
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_end_request(cmd->rq, BLK_STS_OK);
+ blk_mq_end_request(cmd->rq, cmd->error);
return;
case NULL_Q_RQ:
INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, BLK_STS_OK);
+ blk_end_request_all(cmd->rq, cmd->error);
break;
case NULL_Q_BIO:
+ cmd->bio->bi_status = cmd->error;
bio_endio(cmd->bio);
break;
}
@@ -267,25 +695,582 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
static void null_cmd_end_timer(struct nullb_cmd *cmd)
{
- ktime_t kt = completion_nsec;
+ ktime_t kt = cmd->nq->dev->completion_nsec;
hrtimer_start(&cmd->timer, kt, HRTIMER_MODE_REL);
}
static void null_softirq_done_fn(struct request *rq)
{
- if (queue_mode == NULL_Q_MQ)
+ struct nullb *nullb = rq->q->queuedata;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
end_cmd(blk_mq_rq_to_pdu(rq));
else
end_cmd(rq->special);
}
-static inline void null_handle_cmd(struct nullb_cmd *cmd)
+static struct nullb_page *null_alloc_page(gfp_t gfp_flags)
+{
+ struct nullb_page *t_page;
+
+ t_page = kmalloc(sizeof(struct nullb_page), gfp_flags);
+ if (!t_page)
+ goto out;
+
+ t_page->page = alloc_pages(gfp_flags, 0);
+ if (!t_page->page)
+ goto out_freepage;
+
+ t_page->bitmap = 0;
+ return t_page;
+out_freepage:
+ kfree(t_page);
+out:
+ return NULL;
+}
+
+static void null_free_page(struct nullb_page *t_page)
+{
+ __set_bit(NULLB_PAGE_FREE, &t_page->bitmap);
+ if (test_bit(NULLB_PAGE_LOCK, &t_page->bitmap))
+ return;
+ __free_page(t_page->page);
+ kfree(t_page);
+}
+
+static void null_free_sector(struct nullb *nullb, sector_t sector,
+ bool is_cache)
+{
+ unsigned int sector_bit;
+ u64 idx;
+ struct nullb_page *t_page, *ret;
+ struct radix_tree_root *root;
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ sector_bit = (sector & SECTOR_MASK);
+
+ t_page = radix_tree_lookup(root, idx);
+ if (t_page) {
+ __clear_bit(sector_bit, &t_page->bitmap);
+
+ if (!t_page->bitmap) {
+ ret = radix_tree_delete_item(root, idx, t_page);
+ WARN_ON(ret != t_page);
+ null_free_page(ret);
+ if (is_cache)
+ nullb->dev->curr_cache -= PAGE_SIZE;
+ }
+ }
+}
+
+static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
+ struct nullb_page *t_page, bool is_cache)
+{
+ struct radix_tree_root *root;
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+
+ if (radix_tree_insert(root, idx, t_page)) {
+ null_free_page(t_page);
+ t_page = radix_tree_lookup(root, idx);
+ WARN_ON(!t_page || t_page->page->index != idx);
+ } else if (is_cache)
+ nullb->dev->curr_cache += PAGE_SIZE;
+
+ return t_page;
+}
+
+static void null_free_device_storage(struct nullb_device *dev, bool is_cache)
+{
+ unsigned long pos = 0;
+ int nr_pages;
+ struct nullb_page *ret, *t_pages[FREE_BATCH];
+ struct radix_tree_root *root;
+
+ root = is_cache ? &dev->cache : &dev->data;
+
+ do {
+ int i;
+
+ nr_pages = radix_tree_gang_lookup(root,
+ (void **)t_pages, pos, FREE_BATCH);
+
+ for (i = 0; i < nr_pages; i++) {
+ pos = t_pages[i]->page->index;
+ ret = radix_tree_delete_item(root, pos, t_pages[i]);
+ WARN_ON(ret != t_pages[i]);
+ null_free_page(ret);
+ }
+
+ pos++;
+ } while (nr_pages == FREE_BATCH);
+
+ if (is_cache)
+ dev->curr_cache = 0;
+}
+
+static struct nullb_page *__null_lookup_page(struct nullb *nullb,
+ sector_t sector, bool for_write, bool is_cache)
+{
+ unsigned int sector_bit;
+ u64 idx;
+ struct nullb_page *t_page;
+ struct radix_tree_root *root;
+
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ sector_bit = (sector & SECTOR_MASK);
+
+ root = is_cache ? &nullb->dev->cache : &nullb->dev->data;
+ t_page = radix_tree_lookup(root, idx);
+ WARN_ON(t_page && t_page->page->index != idx);
+
+ if (t_page && (for_write || test_bit(sector_bit, &t_page->bitmap)))
+ return t_page;
+
+ return NULL;
+}
+
+static struct nullb_page *null_lookup_page(struct nullb *nullb,
+ sector_t sector, bool for_write, bool ignore_cache)
+{
+ struct nullb_page *page = NULL;
+
+ if (!ignore_cache)
+ page = __null_lookup_page(nullb, sector, for_write, true);
+ if (page)
+ return page;
+ return __null_lookup_page(nullb, sector, for_write, false);
+}
+
+static struct nullb_page *null_insert_page(struct nullb *nullb,
+ sector_t sector, bool ignore_cache)
+{
+ u64 idx;
+ struct nullb_page *t_page;
+
+ t_page = null_lookup_page(nullb, sector, true, ignore_cache);
+ if (t_page)
+ return t_page;
+
+ spin_unlock_irq(&nullb->lock);
+
+ t_page = null_alloc_page(GFP_NOIO);
+ if (!t_page)
+ goto out_lock;
+
+ if (radix_tree_preload(GFP_NOIO))
+ goto out_freepage;
+
+ spin_lock_irq(&nullb->lock);
+ idx = sector >> PAGE_SECTORS_SHIFT;
+ t_page->page->index = idx;
+ t_page = null_radix_tree_insert(nullb, idx, t_page, !ignore_cache);
+ radix_tree_preload_end();
+
+ return t_page;
+out_freepage:
+ null_free_page(t_page);
+out_lock:
+ spin_lock_irq(&nullb->lock);
+ return null_lookup_page(nullb, sector, true, ignore_cache);
+}
+
+static int null_flush_cache_page(struct nullb *nullb, struct nullb_page *c_page)
+{
+ int i;
+ unsigned int offset;
+ u64 idx;
+ struct nullb_page *t_page, *ret;
+ void *dst, *src;
+
+ idx = c_page->page->index;
+
+ t_page = null_insert_page(nullb, idx << PAGE_SECTORS_SHIFT, true);
+
+ __clear_bit(NULLB_PAGE_LOCK, &c_page->bitmap);
+ if (test_bit(NULLB_PAGE_FREE, &c_page->bitmap)) {
+ null_free_page(c_page);
+ if (t_page && t_page->bitmap == 0) {
+ ret = radix_tree_delete_item(&nullb->dev->data,
+ idx, t_page);
+ null_free_page(t_page);
+ }
+ return 0;
+ }
+
+ if (!t_page)
+ return -ENOMEM;
+
+ src = kmap_atomic(c_page->page);
+ dst = kmap_atomic(t_page->page);
+
+ for (i = 0; i < PAGE_SECTORS;
+ i += (nullb->dev->blocksize >> SECTOR_SHIFT)) {
+ if (test_bit(i, &c_page->bitmap)) {
+ offset = (i << SECTOR_SHIFT);
+ memcpy(dst + offset, src + offset,
+ nullb->dev->blocksize);
+ __set_bit(i, &t_page->bitmap);
+ }
+ }
+
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ ret = radix_tree_delete_item(&nullb->dev->cache, idx, c_page);
+ null_free_page(ret);
+ nullb->dev->curr_cache -= PAGE_SIZE;
+
+ return 0;
+}
+
+static int null_make_cache_space(struct nullb *nullb, unsigned long n)
{
+ int i, err, nr_pages;
+ struct nullb_page *c_pages[FREE_BATCH];
+ unsigned long flushed = 0, one_round;
+
+again:
+ if ((nullb->dev->cache_size * 1024 * 1024) >
+ nullb->dev->curr_cache + n || nullb->dev->curr_cache == 0)
+ return 0;
+
+ nr_pages = radix_tree_gang_lookup(&nullb->dev->cache,
+ (void **)c_pages, nullb->cache_flush_pos, FREE_BATCH);
+ /*
+ * nullb_flush_cache_page could unlock before using the c_pages. To
+ * avoid race, we don't allow page free
+ */
+ for (i = 0; i < nr_pages; i++) {
+ nullb->cache_flush_pos = c_pages[i]->page->index;
+ /*
+ * We found the page which is being flushed to disk by other
+ * threads
+ */
+ if (test_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap))
+ c_pages[i] = NULL;
+ else
+ __set_bit(NULLB_PAGE_LOCK, &c_pages[i]->bitmap);
+ }
+
+ one_round = 0;
+ for (i = 0; i < nr_pages; i++) {
+ if (c_pages[i] == NULL)
+ continue;
+ err = null_flush_cache_page(nullb, c_pages[i]);
+ if (err)
+ return err;
+ one_round++;
+ }
+ flushed += one_round << PAGE_SHIFT;
+
+ if (n > flushed) {
+ if (nr_pages == 0)
+ nullb->cache_flush_pos = 0;
+ if (one_round == 0) {
+ /* give other threads a chance */
+ spin_unlock_irq(&nullb->lock);
+ spin_lock_irq(&nullb->lock);
+ }
+ goto again;
+ }
+ return 0;
+}
+
+static int copy_to_nullb(struct nullb *nullb, struct page *source,
+ unsigned int off, sector_t sector, size_t n, bool is_fua)
+{
+ size_t temp, count = 0;
+ unsigned int offset;
+ struct nullb_page *t_page;
+ void *dst, *src;
+
+ while (count < n) {
+ temp = min_t(size_t, nullb->dev->blocksize, n - count);
+
+ if (null_cache_active(nullb) && !is_fua)
+ null_make_cache_space(nullb, PAGE_SIZE);
+
+ offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ t_page = null_insert_page(nullb, sector,
+ !null_cache_active(nullb) || is_fua);
+ if (!t_page)
+ return -ENOSPC;
+
+ src = kmap_atomic(source);
+ dst = kmap_atomic(t_page->page);
+ memcpy(dst + offset, src + off + count, temp);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+
+ __set_bit(sector & SECTOR_MASK, &t_page->bitmap);
+
+ if (is_fua)
+ null_free_sector(nullb, sector, true);
+
+ count += temp;
+ sector += temp >> SECTOR_SHIFT;
+ }
+ return 0;
+}
+
+static int copy_from_nullb(struct nullb *nullb, struct page *dest,
+ unsigned int off, sector_t sector, size_t n)
+{
+ size_t temp, count = 0;
+ unsigned int offset;
+ struct nullb_page *t_page;
+ void *dst, *src;
+
+ while (count < n) {
+ temp = min_t(size_t, nullb->dev->blocksize, n - count);
+
+ offset = (sector & SECTOR_MASK) << SECTOR_SHIFT;
+ t_page = null_lookup_page(nullb, sector, false,
+ !null_cache_active(nullb));
+
+ dst = kmap_atomic(dest);
+ if (!t_page) {
+ memset(dst + off + count, 0, temp);
+ goto next;
+ }
+ src = kmap_atomic(t_page->page);
+ memcpy(dst + off + count, src + offset, temp);
+ kunmap_atomic(src);
+next:
+ kunmap_atomic(dst);
+
+ count += temp;
+ sector += temp >> SECTOR_SHIFT;
+ }
+ return 0;
+}
+
+static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
+{
+ size_t temp;
+
+ spin_lock_irq(&nullb->lock);
+ while (n > 0) {
+ temp = min_t(size_t, n, nullb->dev->blocksize);
+ null_free_sector(nullb, sector, false);
+ if (null_cache_active(nullb))
+ null_free_sector(nullb, sector, true);
+ sector += temp >> SECTOR_SHIFT;
+ n -= temp;
+ }
+ spin_unlock_irq(&nullb->lock);
+}
+
+static int null_handle_flush(struct nullb *nullb)
+{
+ int err;
+
+ if (!null_cache_active(nullb))
+ return 0;
+
+ spin_lock_irq(&nullb->lock);
+ while (true) {
+ err = null_make_cache_space(nullb,
+ nullb->dev->cache_size * 1024 * 1024);
+ if (err || nullb->dev->curr_cache == 0)
+ break;
+ }
+
+ WARN_ON(!radix_tree_empty(&nullb->dev->cache));
+ spin_unlock_irq(&nullb->lock);
+ return err;
+}
+
+static int null_transfer(struct nullb *nullb, struct page *page,
+ unsigned int len, unsigned int off, bool is_write, sector_t sector,
+ bool is_fua)
+{
+ int err = 0;
+
+ if (!is_write) {
+ err = copy_from_nullb(nullb, page, off, sector, len);
+ flush_dcache_page(page);
+ } else {
+ flush_dcache_page(page);
+ err = copy_to_nullb(nullb, page, off, sector, len, is_fua);
+ }
+
+ return err;
+}
+
+static int null_handle_rq(struct nullb_cmd *cmd)
+{
+ struct request *rq = cmd->rq;
+ struct nullb *nullb = cmd->nq->dev->nullb;
+ int err;
+ unsigned int len;
+ sector_t sector;
+ struct req_iterator iter;
+ struct bio_vec bvec;
+
+ sector = blk_rq_pos(rq);
+
+ if (req_op(rq) == REQ_OP_DISCARD) {
+ null_handle_discard(nullb, sector, blk_rq_bytes(rq));
+ return 0;
+ }
+
+ spin_lock_irq(&nullb->lock);
+ rq_for_each_segment(bvec, rq, iter) {
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(req_op(rq)), sector,
+ req_op(rq) & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+ }
+ sector += len >> SECTOR_SHIFT;
+ }
+ spin_unlock_irq(&nullb->lock);
+
+ return 0;
+}
+
+static int null_handle_bio(struct nullb_cmd *cmd)
+{
+ struct bio *bio = cmd->bio;
+ struct nullb *nullb = cmd->nq->dev->nullb;
+ int err;
+ unsigned int len;
+ sector_t sector;
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+
+ sector = bio->bi_iter.bi_sector;
+
+ if (bio_op(bio) == REQ_OP_DISCARD) {
+ null_handle_discard(nullb, sector,
+ bio_sectors(bio) << SECTOR_SHIFT);
+ return 0;
+ }
+
+ spin_lock_irq(&nullb->lock);
+ bio_for_each_segment(bvec, bio, iter) {
+ len = bvec.bv_len;
+ err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
+ op_is_write(bio_op(bio)), sector,
+ bio_op(bio) & REQ_FUA);
+ if (err) {
+ spin_unlock_irq(&nullb->lock);
+ return err;
+ }
+ sector += len >> SECTOR_SHIFT;
+ }
+ spin_unlock_irq(&nullb->lock);
+ return 0;
+}
+
+static void null_stop_queue(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
+ blk_mq_stop_hw_queues(q);
+ else {
+ spin_lock_irq(q->queue_lock);
+ blk_stop_queue(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+}
+
+static void null_restart_queue_async(struct nullb *nullb)
+{
+ struct request_queue *q = nullb->q;
+ unsigned long flags;
+
+ if (nullb->dev->queue_mode == NULL_Q_MQ)
+ blk_mq_start_stopped_hw_queues(q, true);
+ else {
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue_async(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
+
+static blk_status_t null_handle_cmd(struct nullb_cmd *cmd)
+{
+ struct nullb_device *dev = cmd->nq->dev;
+ struct nullb *nullb = dev->nullb;
+ int err = 0;
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &dev->flags)) {
+ struct request *rq = cmd->rq;
+
+ if (!hrtimer_active(&nullb->bw_timer))
+ hrtimer_restart(&nullb->bw_timer);
+
+ if (atomic_long_sub_return(blk_rq_bytes(rq),
+ &nullb->cur_bytes) < 0) {
+ null_stop_queue(nullb);
+ /* race with timer */
+ if (atomic_long_read(&nullb->cur_bytes) > 0)
+ null_restart_queue_async(nullb);
+ if (dev->queue_mode == NULL_Q_RQ) {
+ struct request_queue *q = nullb->q;
+
+ spin_lock_irq(q->queue_lock);
+ rq->rq_flags |= RQF_DONTPREP;
+ blk_requeue_request(q, rq);
+ spin_unlock_irq(q->queue_lock);
+ return BLK_STS_OK;
+ } else
+ /* requeue request */
+ return BLK_STS_RESOURCE;
+ }
+ }
+
+ if (nullb->dev->badblocks.shift != -1) {
+ int bad_sectors;
+ sector_t sector, size, first_bad;
+ bool is_flush = true;
+
+ if (dev->queue_mode == NULL_Q_BIO &&
+ bio_op(cmd->bio) != REQ_OP_FLUSH) {
+ is_flush = false;
+ sector = cmd->bio->bi_iter.bi_sector;
+ size = bio_sectors(cmd->bio);
+ }
+ if (dev->queue_mode != NULL_Q_BIO &&
+ req_op(cmd->rq) != REQ_OP_FLUSH) {
+ is_flush = false;
+ sector = blk_rq_pos(cmd->rq);
+ size = blk_rq_sectors(cmd->rq);
+ }
+ if (!is_flush && badblocks_check(&nullb->dev->badblocks, sector,
+ size, &first_bad, &bad_sectors)) {
+ cmd->error = BLK_STS_IOERR;
+ goto out;
+ }
+ }
+
+ if (dev->memory_backed) {
+ if (dev->queue_mode == NULL_Q_BIO) {
+ if (bio_op(cmd->bio) == REQ_OP_FLUSH)
+ err = null_handle_flush(nullb);
+ else
+ err = null_handle_bio(cmd);
+ } else {
+ if (req_op(cmd->rq) == REQ_OP_FLUSH)
+ err = null_handle_flush(nullb);
+ else
+ err = null_handle_rq(cmd);
+ }
+ }
+ cmd->error = errno_to_blk_status(err);
+out:
/* Complete IO by inline, softirq or timer */
- switch (irqmode) {
+ switch (dev->irqmode) {
case NULL_IRQ_SOFTIRQ:
- switch (queue_mode) {
+ switch (dev->queue_mode) {
case NULL_Q_MQ:
blk_mq_complete_request(cmd->rq);
break;
@@ -307,6 +1292,34 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
null_cmd_end_timer(cmd);
break;
}
+ return BLK_STS_OK;
+}
+
+static enum hrtimer_restart nullb_bwtimer_fn(struct hrtimer *timer)
+{
+ struct nullb *nullb = container_of(timer, struct nullb, bw_timer);
+ ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
+ unsigned int mbps = nullb->dev->mbps;
+
+ if (atomic_long_read(&nullb->cur_bytes) == mb_per_tick(mbps))
+ return HRTIMER_NORESTART;
+
+ atomic_long_set(&nullb->cur_bytes, mb_per_tick(mbps));
+ null_restart_queue_async(nullb);
+
+ hrtimer_forward_now(&nullb->bw_timer, timer_interval);
+
+ return HRTIMER_RESTART;
+}
+
+static void nullb_setup_bwtimer(struct nullb *nullb)
+{
+ ktime_t timer_interval = ktime_set(0, TIMER_INTERVAL);
+
+ hrtimer_init(&nullb->bw_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ nullb->bw_timer.function = nullb_bwtimer_fn;
+ atomic_long_set(&nullb->cur_bytes, mb_per_tick(nullb->dev->mbps));
+ hrtimer_start(&nullb->bw_timer, timer_interval, HRTIMER_MODE_REL);
}
static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
@@ -366,20 +1379,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
+ struct nullb_queue *nq = hctx->driver_data;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- if (irqmode == NULL_IRQ_TIMER) {
+ if (nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
cmd->rq = bd->rq;
- cmd->nq = hctx->driver_data;
+ cmd->nq = nq;
blk_mq_start_request(bd->rq);
- null_handle_cmd(cmd);
- return BLK_STS_OK;
+ return null_handle_cmd(cmd);
}
static const struct blk_mq_ops null_mq_ops = {
@@ -438,7 +1451,8 @@ static int null_lnvm_submit_io(struct nvm_dev *dev, struct nvm_rq *rqd)
static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
{
- sector_t size = gb * 1024 * 1024 * 1024ULL;
+ struct nullb *nullb = dev->q->queuedata;
+ sector_t size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
sector_t blksize;
struct nvm_id_group *grp;
@@ -460,7 +1474,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
id->ppaf.ch_offset = 56;
id->ppaf.ch_len = 8;
- sector_div(size, bs); /* convert size to pages */
+ sector_div(size, nullb->dev->blocksize); /* convert size to pages */
size >>= 8; /* concert size to pgs pr blk */
grp = &id->grp;
grp->mtype = 0;
@@ -474,8 +1488,8 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
grp->num_blk = blksize;
grp->num_pln = 1;
- grp->fpg_sz = bs;
- grp->csecs = bs;
+ grp->fpg_sz = nullb->dev->blocksize;
+ grp->csecs = nullb->dev->blocksize;
grp->trdt = 25000;
grp->trdm = 25000;
grp->tprt = 500000;
@@ -483,7 +1497,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
grp->tbet = 1500000;
grp->tbem = 1500000;
grp->mpos = 0x010101; /* single plane rwe */
- grp->cpar = hw_queue_depth;
+ grp->cpar = nullb->dev->hw_queue_depth;
return 0;
}
@@ -568,19 +1582,44 @@ static void null_nvm_unregister(struct nullb *nullb) {}
static void null_del_dev(struct nullb *nullb)
{
+ struct nullb_device *dev = nullb->dev;
+
+ ida_simple_remove(&nullb_indexes, nullb->index);
+
list_del_init(&nullb->list);
- if (use_lightnvm)
+ if (dev->use_lightnvm)
null_nvm_unregister(nullb);
else
del_gendisk(nullb->disk);
+
+ if (test_bit(NULLB_DEV_FL_THROTTLED, &nullb->dev->flags)) {
+ hrtimer_cancel(&nullb->bw_timer);
+ atomic_long_set(&nullb->cur_bytes, LONG_MAX);
+ null_restart_queue_async(nullb);
+ }
+
blk_cleanup_queue(nullb->q);
- if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (dev->queue_mode == NULL_Q_MQ &&
+ nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
- if (!use_lightnvm)
+ if (!dev->use_lightnvm)
put_disk(nullb->disk);
cleanup_queues(nullb);
+ if (null_cache_active(nullb))
+ null_free_device_storage(nullb->dev, true);
kfree(nullb);
+ dev->nullb = NULL;
+}
+
+static void null_config_discard(struct nullb *nullb)
+{
+ if (nullb->dev->discard == false)
+ return;
+ nullb->q->limits.discard_granularity = nullb->dev->blocksize;
+ nullb->q->limits.discard_alignment = nullb->dev->blocksize;
+ blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
}
static int null_open(struct block_device *bdev, fmode_t mode)
@@ -605,6 +1644,7 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
+ nq->dev = nullb->dev;
}
static void null_init_queues(struct nullb *nullb)
@@ -652,13 +1692,13 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
- GFP_KERNEL);
+ nullb->queues = kzalloc(nullb->dev->submit_queues *
+ sizeof(struct nullb_queue), GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->nr_queues = 0;
- nullb->queue_depth = hw_queue_depth;
+ nullb->queue_depth = nullb->dev->hw_queue_depth;
return 0;
}
@@ -668,7 +1708,7 @@ static int init_driver_queues(struct nullb *nullb)
struct nullb_queue *nq;
int i, ret = 0;
- for (i = 0; i < submit_queues; i++) {
+ for (i = 0; i < nullb->dev->submit_queues; i++) {
nq = &nullb->queues[i];
null_init_queue(nullb, nq);
@@ -686,10 +1726,10 @@ static int null_gendisk_register(struct nullb *nullb)
struct gendisk *disk;
sector_t size;
- disk = nullb->disk = alloc_disk_node(1, home_node);
+ disk = nullb->disk = alloc_disk_node(1, nullb->dev->home_node);
if (!disk)
return -ENOMEM;
- size = gb * 1024 * 1024 * 1024ULL;
+ size = (sector_t)nullb->dev->size * 1024 * 1024ULL;
set_capacity(disk, size >> 9);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
@@ -704,49 +1744,86 @@ static int null_gendisk_register(struct nullb *nullb)
return 0;
}
-static int null_init_tag_set(struct blk_mq_tag_set *set)
+static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
set->ops = &null_mq_ops;
- set->nr_hw_queues = submit_queues;
- set->queue_depth = hw_queue_depth;
- set->numa_node = home_node;
+ set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
+ g_submit_queues;
+ set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
+ g_hw_queue_depth;
+ set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
set->cmd_size = sizeof(struct nullb_cmd);
set->flags = BLK_MQ_F_SHOULD_MERGE;
set->driver_data = NULL;
- if (blocking)
+ if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
return blk_mq_alloc_tag_set(set);
}
-static int null_add_dev(void)
+static void null_validate_conf(struct nullb_device *dev)
+{
+ dev->blocksize = round_down(dev->blocksize, 512);
+ dev->blocksize = clamp_t(unsigned int, dev->blocksize, 512, 4096);
+ if (dev->use_lightnvm && dev->blocksize != 4096)
+ dev->blocksize = 4096;
+
+ if (dev->use_lightnvm && dev->queue_mode != NULL_Q_MQ)
+ dev->queue_mode = NULL_Q_MQ;
+
+ if (dev->queue_mode == NULL_Q_MQ && dev->use_per_node_hctx) {
+ if (dev->submit_queues != nr_online_nodes)
+ dev->submit_queues = nr_online_nodes;
+ } else if (dev->submit_queues > nr_cpu_ids)
+ dev->submit_queues = nr_cpu_ids;
+ else if (dev->submit_queues == 0)
+ dev->submit_queues = 1;
+
+ dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
+ dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
+
+ /* Do memory allocation, so set blocking */
+ if (dev->memory_backed)
+ dev->blocking = true;
+ else /* cache is meaningless */
+ dev->cache_size = 0;
+ dev->cache_size = min_t(unsigned long, ULONG_MAX / 1024 / 1024,
+ dev->cache_size);
+ dev->mbps = min_t(unsigned int, 1024 * 40, dev->mbps);
+ /* can not stop a queue */
+ if (dev->queue_mode == NULL_Q_BIO)
+ dev->mbps = 0;
+}
+
+static int null_add_dev(struct nullb_device *dev)
{
struct nullb *nullb;
int rv;
- nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node);
+ null_validate_conf(dev);
+
+ nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, dev->home_node);
if (!nullb) {
rv = -ENOMEM;
goto out;
}
+ nullb->dev = dev;
+ dev->nullb = nullb;
spin_lock_init(&nullb->lock);
- if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
- submit_queues = nr_online_nodes;
-
rv = setup_queues(nullb);
if (rv)
goto out_free_nullb;
- if (queue_mode == NULL_Q_MQ) {
+ if (dev->queue_mode == NULL_Q_MQ) {
if (shared_tags) {
nullb->tag_set = &tag_set;
rv = 0;
} else {
nullb->tag_set = &nullb->__tag_set;
- rv = null_init_tag_set(nullb->tag_set);
+ rv = null_init_tag_set(nullb, nullb->tag_set);
}
if (rv)
@@ -758,8 +1835,8 @@ static int null_add_dev(void)
goto out_cleanup_tags;
}
null_init_queues(nullb);
- } else if (queue_mode == NULL_Q_BIO) {
- nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
+ } else if (dev->queue_mode == NULL_Q_BIO) {
+ nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
@@ -769,7 +1846,8 @@ static int null_add_dev(void)
if (rv)
goto out_cleanup_blk_queue;
} else {
- nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
+ nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock,
+ dev->home_node);
if (!nullb->q) {
rv = -ENOMEM;
goto out_cleanup_queues;
@@ -781,20 +1859,34 @@ static int null_add_dev(void)
goto out_cleanup_blk_queue;
}
+ if (dev->mbps) {
+ set_bit(NULLB_DEV_FL_THROTTLED, &dev->flags);
+ nullb_setup_bwtimer(nullb);
+ }
+
+ if (dev->cache_size > 0) {
+ set_bit(NULLB_DEV_FL_CACHE, &nullb->dev->flags);
+ blk_queue_write_cache(nullb->q, true, true);
+ blk_queue_flush_queueable(nullb->q, true);
+ }
+
nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
mutex_lock(&lock);
- nullb->index = nullb_indexes++;
+ nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
+ dev->index = nullb->index;
mutex_unlock(&lock);
- blk_queue_logical_block_size(nullb->q, bs);
- blk_queue_physical_block_size(nullb->q, bs);
+ blk_queue_logical_block_size(nullb->q, dev->blocksize);
+ blk_queue_physical_block_size(nullb->q, dev->blocksize);
+
+ null_config_discard(nullb);
sprintf(nullb->disk_name, "nullb%d", nullb->index);
- if (use_lightnvm)
+ if (dev->use_lightnvm)
rv = null_nvm_register(nullb);
else
rv = null_gendisk_register(nullb);
@@ -810,7 +1902,7 @@ static int null_add_dev(void)
out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q);
out_cleanup_tags:
- if (queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
+ if (dev->queue_mode == NULL_Q_MQ && nullb->tag_set == &nullb->__tag_set)
blk_mq_free_tag_set(nullb->tag_set);
out_cleanup_queues:
cleanup_queues(nullb);
@@ -825,51 +1917,63 @@ static int __init null_init(void)
int ret = 0;
unsigned int i;
struct nullb *nullb;
+ struct nullb_device *dev;
+
+ /* check for nullb_page.bitmap */
+ if (sizeof(unsigned long) * 8 - 2 < (PAGE_SIZE >> SECTOR_SHIFT))
+ return -EINVAL;
- if (bs > PAGE_SIZE) {
+ if (g_bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
- bs = PAGE_SIZE;
+ g_bs = PAGE_SIZE;
}
- if (use_lightnvm && bs != 4096) {
+ if (g_use_lightnvm && g_bs != 4096) {
pr_warn("null_blk: LightNVM only supports 4k block size\n");
pr_warn("null_blk: defaults block size to 4k\n");
- bs = 4096;
+ g_bs = 4096;
}
- if (use_lightnvm && queue_mode != NULL_Q_MQ) {
+ if (g_use_lightnvm && g_queue_mode != NULL_Q_MQ) {
pr_warn("null_blk: LightNVM only supported for blk-mq\n");
pr_warn("null_blk: defaults queue mode to blk-mq\n");
- queue_mode = NULL_Q_MQ;
+ g_queue_mode = NULL_Q_MQ;
}
- if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
- if (submit_queues < nr_online_nodes) {
- pr_warn("null_blk: submit_queues param is set to %u.",
+ if (g_queue_mode == NULL_Q_MQ && g_use_per_node_hctx) {
+ if (g_submit_queues != nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.\n",
nr_online_nodes);
- submit_queues = nr_online_nodes;
+ g_submit_queues = nr_online_nodes;
}
- } else if (submit_queues > nr_cpu_ids)
- submit_queues = nr_cpu_ids;
- else if (!submit_queues)
- submit_queues = 1;
+ } else if (g_submit_queues > nr_cpu_ids)
+ g_submit_queues = nr_cpu_ids;
+ else if (g_submit_queues <= 0)
+ g_submit_queues = 1;
- if (queue_mode == NULL_Q_MQ && shared_tags) {
- ret = null_init_tag_set(&tag_set);
+ if (g_queue_mode == NULL_Q_MQ && shared_tags) {
+ ret = null_init_tag_set(NULL, &tag_set);
if (ret)
return ret;
}
+ config_group_init(&nullb_subsys.su_group);
+ mutex_init(&nullb_subsys.su_mutex);
+
+ ret = configfs_register_subsystem(&nullb_subsys);
+ if (ret)
+ goto err_tagset;
+
mutex_init(&lock);
null_major = register_blkdev(0, "nullb");
if (null_major < 0) {
ret = null_major;
- goto err_tagset;
+ goto err_conf;
}
- if (use_lightnvm) {
+ if (g_use_lightnvm) {
ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64),
0, 0, NULL);
if (!ppa_cache) {
@@ -880,9 +1984,14 @@ static int __init null_init(void)
}
for (i = 0; i < nr_devices; i++) {
- ret = null_add_dev();
- if (ret)
+ dev = null_alloc_dev();
+ if (!dev)
+ goto err_dev;
+ ret = null_add_dev(dev);
+ if (ret) {
+ null_free_dev(dev);
goto err_dev;
+ }
}
pr_info("null: module loaded\n");
@@ -891,13 +2000,17 @@ static int __init null_init(void)
err_dev:
while (!list_empty(&nullb_list)) {
nullb = list_entry(nullb_list.next, struct nullb, list);
+ dev = nullb->dev;
null_del_dev(nullb);
+ null_free_dev(dev);
}
kmem_cache_destroy(ppa_cache);
err_ppa:
unregister_blkdev(null_major, "nullb");
+err_conf:
+ configfs_unregister_subsystem(&nullb_subsys);
err_tagset:
- if (queue_mode == NULL_Q_MQ && shared_tags)
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
return ret;
}
@@ -906,16 +2019,22 @@ static void __exit null_exit(void)
{
struct nullb *nullb;
+ configfs_unregister_subsystem(&nullb_subsys);
+
unregister_blkdev(null_major, "nullb");
mutex_lock(&lock);
while (!list_empty(&nullb_list)) {
+ struct nullb_device *dev;
+
nullb = list_entry(nullb_list.next, struct nullb, list);
+ dev = nullb->dev;
null_del_dev(nullb);
+ null_free_dev(dev);
}
mutex_unlock(&lock);
- if (queue_mode == NULL_Q_MQ && shared_tags)
+ if (g_queue_mode == NULL_Q_MQ && shared_tags)
blk_mq_free_tag_set(&tag_set);
kmem_cache_destroy(ppa_cache);
@@ -924,5 +2043,5 @@ static void __exit null_exit(void)
module_init(null_init);
module_exit(null_exit);
-MODULE_AUTHOR("Jens Axboe <jaxboe@fusionio.com>");
+MODULE_AUTHOR("Jens Axboe <axboe@kernel.dk>");
MODULE_LICENSE("GPL");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 6b8b097abbb9..67974796c350 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f];
bio_reset(bio);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
- bio->bi_bdev = pd->bdev;
+ bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
@@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
pkt->sector = new_sector;
bio_reset(pkt->bio);
- pkt->bio->bi_bdev = pd->bdev;
+ bio_set_set(pkt->bio, pd->bdev);
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
@@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector;
- pkt->w_bio->bi_bdev = pd->bdev;
+ bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
@@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
psd->pd = pd;
psd->bio = bio;
- cloned_bio->bi_bdev = pd->bdev;
+ bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio);
@@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata;
if (!pd) {
- pr_err("%s incorrect request queue\n",
- bdevname(bio->bi_bdev, b));
+ pr_err("%s incorrect request queue\n", bio_devname(bio, b));
goto end_io;
}
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index e0e81cacd781..6a55959cbf78 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -409,10 +409,8 @@ static int ps3vram_cache_init(struct ps3_system_bus_device *dev)
priv->cache.page_size = CACHE_PAGE_SIZE;
priv->cache.tags = kzalloc(sizeof(struct ps3vram_tag) *
CACHE_PAGE_COUNT, GFP_KERNEL);
- if (priv->cache.tags == NULL) {
- dev_err(&dev->core, "Could not allocate cache tags\n");
+ if (!priv->cache.tags)
return -ENOMEM;
- }
dev_info(&dev->core, "Created ram cache: %d entries, %d KiB each\n",
CACHE_PAGE_COUNT, CACHE_PAGE_SIZE / 1024);
@@ -743,7 +741,11 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
goto out_unmap_reports;
}
- ps3vram_cache_init(dev);
+ error = ps3vram_cache_init(dev);
+ if (error < 0) {
+ goto out_unmap_reports;
+ }
+
ps3vram_proc_init(dev);
queue = blk_alloc_queue(GFP_KERNEL);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b008b6a98098..b640ad8a6d20 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3435,7 +3435,7 @@ static void rbd_acquire_lock(struct work_struct *work)
struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
struct rbd_device, lock_dwork);
enum rbd_lock_state lock_state;
- int ret;
+ int ret = 0;
dout("%s rbd_dev %p\n", __func__, rbd_dev);
again:
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 7f4acebf4657..e397d3ee7308 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -112,7 +112,7 @@ static const struct block_device_operations rsxx_fops = {
static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
{
- generic_start_io_acct(bio_data_dir(bio), bio_sectors(bio),
+ generic_start_io_acct(card->queue, bio_data_dir(bio), bio_sectors(bio),
&card->gendisk->part0);
}
@@ -120,8 +120,8 @@ static void disk_stats_complete(struct rsxx_cardinfo *card,
struct bio *bio,
unsigned long start_time)
{
- generic_end_io_acct(bio_data_dir(bio), &card->gendisk->part0,
- start_time);
+ generic_end_io_acct(card->queue, bio_data_dir(bio),
+ &card->gendisk->part0, start_time);
}
static void bio_dma_done_cb(struct rsxx_cardinfo *card,
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index d0368682bd43..7cedb4295e9d 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -1,19 +1,12 @@
-/* Copyright 2012 STEC, Inc.
+/*
+ * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
+ * was acquired by Western Digital in 2012.
+ *
+ * Copyright 2012 sTec, Inc.
+ * Copyright (c) 2017 Western Digital Corporation or its affiliates.
*
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
- * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
- * Initial Driver Design!
- * Thomas Swann <tswann@stec-inc.com>
- * Interrupt handling.
- * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
- * biomode implementation.
- * Akhil Bhansali <abhansali@stec-inc.com>
- * Added support for DISCARD / FLUSH and FUA.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
*/
#include <linux/kernel.h>
@@ -23,11 +16,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
#include <linux/workqueue.h>
-#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/time.h>
#include <linux/hdreg.h>
@@ -37,9 +30,9 @@
#include <linux/version.h>
#include <linux/err.h>
#include <linux/aer.h>
-#include <linux/ctype.h>
#include <linux/wait.h>
-#include <linux/uio.h>
+#include <linux/stringify.h>
+#include <linux/slab_def.h>
#include <scsi/scsi.h>
#include <scsi/sg.h>
#include <linux/io.h>
@@ -51,19 +44,6 @@
static int skd_dbg_level;
static int skd_isr_comp_limit = 4;
-enum {
- STEC_LINK_2_5GTS = 0,
- STEC_LINK_5GTS = 1,
- STEC_LINK_8GTS = 2,
- STEC_LINK_UNKNOWN = 0xFF
-};
-
-enum {
- SKD_FLUSH_INITIALIZER,
- SKD_FLUSH_ZERO_SIZE_FIRST,
- SKD_FLUSH_DATA_SECOND,
-};
-
#define SKD_ASSERT(expr) \
do { \
if (unlikely(!(expr))) { \
@@ -73,17 +53,11 @@ enum {
} while (0)
#define DRV_NAME "skd"
-#define DRV_VERSION "2.2.1"
-#define DRV_BUILD_ID "0260"
#define PFX DRV_NAME ": "
-#define DRV_BIN_VERSION 0x100
-#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
-MODULE_AUTHOR("bug-reports: support@stec-inc.com");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
-MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
+MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
#define PCI_VENDOR_ID_STEC 0x1B39
#define PCI_DEVICE_ID_S1120 0x0001
@@ -96,34 +70,32 @@ MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
#define SKD_PAUSE_TIMEOUT (5 * 1000)
#define SKD_N_FITMSG_BYTES (512u)
+#define SKD_MAX_REQ_PER_MSG 14
-#define SKD_N_SPECIAL_CONTEXT 32u
#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
/* SG elements are 32 bytes, so we can make this 4096 and still be under the
* 128KB limit. That allows 4096*4K = 16M xfer size
*/
#define SKD_N_SG_PER_REQ_DEFAULT 256u
-#define SKD_N_SG_PER_SPECIAL 256u
#define SKD_N_COMPLETION_ENTRY 256u
#define SKD_N_READ_CAP_BYTES (8u)
#define SKD_N_INTERNAL_BYTES (512u)
+#define SKD_SKCOMP_SIZE \
+ ((sizeof(struct fit_completion_entry_v1) + \
+ sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
+
/* 5 bits of uniqifier, 0xF800 */
-#define SKD_ID_INCR (0x400)
#define SKD_ID_TABLE_MASK (3u << 8u)
#define SKD_ID_RW_REQUEST (0u << 8u)
#define SKD_ID_INTERNAL (1u << 8u)
-#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
#define SKD_ID_FIT_MSG (3u << 8u)
#define SKD_ID_SLOT_MASK 0x00FFu
#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
-#define SKD_N_TIMEOUT_SLOT 4u
-#define SKD_TIMEOUT_SLOT_MASK 3u
-
#define SKD_N_MAX_SECTORS 2048u
#define SKD_MAX_RETRIES 2u
@@ -141,7 +113,6 @@ enum skd_drvr_state {
SKD_DRVR_STATE_ONLINE,
SKD_DRVR_STATE_PAUSING,
SKD_DRVR_STATE_PAUSED,
- SKD_DRVR_STATE_DRAINING_TIMEOUT,
SKD_DRVR_STATE_RESTARTING,
SKD_DRVR_STATE_RESUMING,
SKD_DRVR_STATE_STOPPING,
@@ -158,7 +129,6 @@ enum skd_drvr_state {
#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
-#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
#define SKD_START_WAIT_SECONDS 90u
@@ -169,12 +139,6 @@ enum skd_req_state {
SKD_REQ_STATE_BUSY,
SKD_REQ_STATE_COMPLETED,
SKD_REQ_STATE_TIMEOUT,
- SKD_REQ_STATE_ABORTED,
-};
-
-enum skd_fit_msg_state {
- SKD_MSG_STATE_IDLE,
- SKD_MSG_STATE_BUSY,
};
enum skd_check_status_action {
@@ -185,34 +149,29 @@ enum skd_check_status_action {
SKD_CHECK_STATUS_BUSY_IMMINENT,
};
-struct skd_fitmsg_context {
- enum skd_fit_msg_state state;
-
- struct skd_fitmsg_context *next;
+struct skd_msg_buf {
+ struct fit_msg_hdr fmh;
+ struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
+};
+struct skd_fitmsg_context {
u32 id;
- u16 outstanding;
u32 length;
- u32 offset;
- u8 *msg_buf;
+ struct skd_msg_buf *msg_buf;
dma_addr_t mb_dma_address;
};
struct skd_request_context {
enum skd_req_state state;
- struct skd_request_context *next;
-
u16 id;
u32 fitmsg_id;
- struct request *req;
u8 flush_cmd;
- u32 timeout_stamp;
- u8 sg_data_dir;
+ enum dma_data_direction data_dir;
struct scatterlist *sg;
u32 n_sg;
u32 sg_byte_count;
@@ -224,38 +183,19 @@ struct skd_request_context {
struct fit_comp_error_info err_info;
+ blk_status_t status;
};
-#define SKD_DATA_DIR_HOST_TO_CARD 1
-#define SKD_DATA_DIR_CARD_TO_HOST 2
struct skd_special_context {
struct skd_request_context req;
- u8 orphaned;
-
void *data_buf;
dma_addr_t db_dma_address;
- u8 *msg_buf;
+ struct skd_msg_buf *msg_buf;
dma_addr_t mb_dma_address;
};
-struct skd_sg_io {
- fmode_t mode;
- void __user *argp;
-
- struct sg_io_hdr sg;
-
- u8 cdb[16];
-
- u32 dxfer_len;
- u32 iovcnt;
- struct sg_iovec *iov;
- struct sg_iovec no_iov_iov;
-
- struct skd_special_context *skspcl;
-};
-
typedef enum skd_irq_type {
SKD_IRQ_LEGACY,
SKD_IRQ_MSI,
@@ -265,7 +205,7 @@ typedef enum skd_irq_type {
#define SKD_MAX_BARS 2
struct skd_device {
- volatile void __iomem *mem_map[SKD_MAX_BARS];
+ void __iomem *mem_map[SKD_MAX_BARS];
resource_size_t mem_phys[SKD_MAX_BARS];
u32 mem_size[SKD_MAX_BARS];
@@ -276,21 +216,20 @@ struct skd_device {
spinlock_t lock;
struct gendisk *disk;
+ struct blk_mq_tag_set tag_set;
struct request_queue *queue;
+ struct skd_fitmsg_context *skmsg;
struct device *class_dev;
int gendisk_on;
int sync_done;
- atomic_t device_count;
u32 devno;
u32 major;
- char name[32];
char isr_name[30];
enum skd_drvr_state state;
u32 drive_state;
- u32 in_flight;
u32 cur_max_queue_depth;
u32 queue_low_water_mark;
u32 dev_max_queue_depth;
@@ -298,27 +237,20 @@ struct skd_device {
u32 num_fitmsg_context;
u32 num_req_context;
- u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
- u32 timeout_stamp;
- struct skd_fitmsg_context *skmsg_free_list;
struct skd_fitmsg_context *skmsg_table;
- struct skd_request_context *skreq_free_list;
- struct skd_request_context *skreq_table;
-
- struct skd_special_context *skspcl_free_list;
- struct skd_special_context *skspcl_table;
-
struct skd_special_context internal_skspcl;
u32 read_cap_blocksize;
u32 read_cap_last_lba;
int read_cap_is_valid;
int inquiry_is_valid;
u8 inq_serial_num[13]; /*12 chars plus null term */
- u8 id_str[80]; /* holds a composite name (pci + sernum) */
u8 skcomp_cycle;
u32 skcomp_ix;
+ struct kmem_cache *msgbuf_cache;
+ struct kmem_cache *sglist_cache;
+ struct kmem_cache *databuf_cache;
struct fit_completion_entry_v1 *skcomp_table;
struct fit_comp_error_info *skerr_table;
dma_addr_t cq_dma_address;
@@ -329,7 +261,6 @@ struct skd_device {
u32 timer_countdown;
u32 timer_substate;
- int n_special;
int sgs_per_request;
u32 last_mtd;
@@ -343,7 +274,7 @@ struct skd_device {
u32 timo_slot;
-
+ struct work_struct start_queue;
struct work_struct completion_worker;
};
@@ -353,53 +284,32 @@ struct skd_device {
static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
{
- u32 val;
-
- if (likely(skdev->dbg_level < 2))
- return readl(skdev->mem_map[1] + offset);
- else {
- barrier();
- val = readl(skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- return val;
- }
+ u32 val = readl(skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
+ return val;
}
static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
u32 offset)
{
- if (likely(skdev->dbg_level < 2)) {
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writel(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %x\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
+ writel(val, skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
}
static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
u32 offset)
{
- if (likely(skdev->dbg_level < 2)) {
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- } else {
- barrier();
- writeq(val, skdev->mem_map[1] + offset);
- barrier();
- pr_debug("%s:%s:%d offset %x = %016llx\n",
- skdev->name, __func__, __LINE__, offset, val);
- }
+ writeq(val, skdev->mem_map[1] + offset);
+ if (unlikely(skdev->dbg_level >= 2))
+ dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
+ val);
}
-#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
+#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
static int skd_isr_type = SKD_IRQ_DEFAULT;
module_param(skd_isr_type, int, 0444);
@@ -412,7 +322,7 @@ static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
module_param(skd_max_req_per_msg, int, 0444);
MODULE_PARM_DESC(skd_max_req_per_msg,
"Maximum SCSI requests packed in a single message."
- " (1-14, default==1)");
+ " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
@@ -429,10 +339,10 @@ MODULE_PARM_DESC(skd_sgs_per_request,
"Maximum SG elements per block request."
" (1-4096, default==256)");
-static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
+static int skd_max_pass_thru = 1;
module_param(skd_max_pass_thru, int, 0444);
MODULE_PARM_DESC(skd_max_pass_thru,
- "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
+ "Maximum SCSI pass-thru at a time. IGNORED");
module_param(skd_dbg_level, int, 0444);
MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
@@ -449,9 +359,6 @@ static void skd_send_fitmsg(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg);
static void skd_send_special_fitmsg(struct skd_device *skdev,
struct skd_special_context *skspcl);
-static void skd_request_fn(struct request_queue *rq);
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t status);
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq);
static void skd_postop_sg_list(struct skd_device *skdev,
@@ -460,19 +367,14 @@ static void skd_postop_sg_list(struct skd_device *skdev,
static void skd_restart_device(struct skd_device *skdev);
static int skd_quiesce_dev(struct skd_device *skdev);
static int skd_unquiesce_dev(struct skd_device *skdev);
-static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl);
static void skd_disable_interrupts(struct skd_device *skdev);
static void skd_isr_fwstate(struct skd_device *skdev);
-static void skd_recover_requests(struct skd_device *skdev, int requeue);
+static void skd_recover_requests(struct skd_device *skdev);
static void skd_soft_reset(struct skd_device *skdev);
-static const char *skd_name(struct skd_device *skdev);
const char *skd_drive_state_to_str(int state);
const char *skd_skdev_state_to_str(enum skd_drvr_state state);
static void skd_log_skdev(struct skd_device *skdev, const char *event);
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event);
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event);
@@ -481,18 +383,20 @@ static void skd_log_skreq(struct skd_device *skdev,
* READ/WRITE REQUESTS
*****************************************************************************
*/
-static void skd_fail_all_pending(struct skd_device *skdev)
+static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
{
- struct request_queue *q = skdev->queue;
- struct request *req;
+ int *count = data;
- for (;; ) {
- req = blk_peek_request(q);
- if (req == NULL)
- break;
- blk_start_request(req);
- __blk_end_request_all(req, BLK_STS_IOERR);
- }
+ count++;
+}
+
+static int skd_in_flight(struct skd_device *skdev)
+{
+ int count = 0;
+
+ blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
+
+ return count;
}
static void
@@ -501,9 +405,9 @@ skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
unsigned count)
{
if (data_dir == READ)
- scsi_req->cdb[0] = 0x28;
+ scsi_req->cdb[0] = READ_10;
else
- scsi_req->cdb[0] = 0x2a;
+ scsi_req->cdb[0] = WRITE_10;
scsi_req->cdb[1] = 0;
scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
@@ -522,7 +426,7 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
{
skreq->flush_cmd = 1;
- scsi_req->cdb[0] = 0x35;
+ scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
scsi_req->cdb[1] = 0;
scsi_req->cdb[2] = 0;
scsi_req->cdb[3] = 0;
@@ -534,307 +438,194 @@ skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
scsi_req->cdb[9] = 0;
}
-static void skd_request_fn_not_online(struct request_queue *q);
-
-static void skd_request_fn(struct request_queue *q)
+/*
+ * Return true if and only if all pending requests should be failed.
+ */
+static bool skd_fail_all(struct request_queue *q)
{
struct skd_device *skdev = q->queuedata;
- struct skd_fitmsg_context *skmsg = NULL;
- struct fit_msg_hdr *fmh = NULL;
- struct skd_request_context *skreq;
- struct request *req = NULL;
- struct skd_scsi_request *scsi_req;
- unsigned long io_flags;
- u32 lba;
- u32 count;
- int data_dir;
- u32 be_lba;
- u32 be_count;
- u64 be_dmaa;
- u64 cmdctxt;
- u32 timo_slot;
- void *cmd_ptr;
- int flush, fua;
-
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- skd_request_fn_not_online(q);
- return;
- }
- if (blk_queue_stopped(skdev->queue)) {
- if (skdev->skmsg_free_list == NULL ||
- skdev->skreq_free_list == NULL ||
- skdev->in_flight >= skdev->queue_low_water_mark)
- /* There is still some kind of shortage */
- return;
-
- queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
- }
+ SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
- /*
- * Stop conditions:
- * - There are no more native requests
- * - There are already the maximum number of requests in progress
- * - There are no more skd_request_context entries
- * - There are no more FIT msg buffers
+ skd_log_skdev(skdev, "req_not_online");
+ switch (skdev->state) {
+ case SKD_DRVR_STATE_PAUSING:
+ case SKD_DRVR_STATE_PAUSED:
+ case SKD_DRVR_STATE_STARTING:
+ case SKD_DRVR_STATE_RESTARTING:
+ case SKD_DRVR_STATE_WAIT_BOOT:
+ /* In case of starting, we haven't started the queue,
+ * so we can't get here... but requests are
+ * possibly hanging out waiting for us because we
+ * reported the dev/skd0 already. They'll wait
+ * forever if connect doesn't complete.
+ * What to do??? delay dev/skd0 ??
*/
- for (;; ) {
-
- flush = fua = 0;
-
- req = blk_peek_request(q);
-
- /* Are there any native requests to start? */
- if (req == NULL)
- break;
-
- lba = (u32)blk_rq_pos(req);
- count = blk_rq_sectors(req);
- data_dir = rq_data_dir(req);
- io_flags = req->cmd_flags;
-
- if (req_op(req) == REQ_OP_FLUSH)
- flush++;
-
- if (io_flags & REQ_FUA)
- fua++;
-
- pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
- "count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count, data_dir);
-
- /* At this point we know there is a request */
+ case SKD_DRVR_STATE_BUSY:
+ case SKD_DRVR_STATE_BUSY_IMMINENT:
+ case SKD_DRVR_STATE_BUSY_ERASE:
+ return false;
- /* Are too many requets already in progress? */
- if (skdev->in_flight >= skdev->cur_max_queue_depth) {
- pr_debug("%s:%s:%d qdepth %d, limit %d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth);
- break;
- }
+ case SKD_DRVR_STATE_BUSY_SANITIZE:
+ case SKD_DRVR_STATE_STOPPING:
+ case SKD_DRVR_STATE_SYNCING:
+ case SKD_DRVR_STATE_FAULT:
+ case SKD_DRVR_STATE_DISAPPEARED:
+ default:
+ return true;
+ }
+}
- /* Is a skd_request_context available? */
- skreq = skdev->skreq_free_list;
- if (skreq == NULL) {
- pr_debug("%s:%s:%d Out of req=%p\n",
- skdev->name, __func__, __LINE__, q);
- break;
- }
- SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
-
- /* Now we check to see if we can get a fit msg */
- if (skmsg == NULL) {
- if (skdev->skmsg_free_list == NULL) {
- pr_debug("%s:%s:%d Out of msg\n",
- skdev->name, __func__, __LINE__);
- break;
- }
- }
+static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *mqd)
+{
+ struct request *const req = mqd->rq;
+ struct request_queue *const q = req->q;
+ struct skd_device *skdev = q->queuedata;
+ struct skd_fitmsg_context *skmsg;
+ struct fit_msg_hdr *fmh;
+ const u32 tag = blk_mq_unique_tag(req);
+ struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
+ struct skd_scsi_request *scsi_req;
+ unsigned long flags = 0;
+ const u32 lba = blk_rq_pos(req);
+ const u32 count = blk_rq_sectors(req);
+ const int data_dir = rq_data_dir(req);
- skreq->flush_cmd = 0;
- skreq->n_sg = 0;
- skreq->sg_byte_count = 0;
+ if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
+ return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
- /*
- * OK to now dequeue request from q.
- *
- * At this point we are comitted to either start or reject
- * the native request. Note that skd_request_context is
- * available but is still at the head of the free list.
- */
- blk_start_request(req);
- skreq->req = req;
- skreq->fitmsg_id = 0;
-
- /* Either a FIT msg is in progress or we have to start one. */
- if (skmsg == NULL) {
- /* Are there any FIT msg buffers available? */
- skmsg = skdev->skmsg_free_list;
- if (skmsg == NULL) {
- pr_debug("%s:%s:%d Out of msg skdev=%p\n",
- skdev->name, __func__, __LINE__,
- skdev);
- break;
- }
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
+ blk_mq_start_request(req);
- skdev->skmsg_free_list = skmsg->next;
+ WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
+ tag, skd_max_queue_depth, q->nr_requests);
- skmsg->state = SKD_MSG_STATE_BUSY;
- skmsg->id += SKD_ID_INCR;
+ SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
- /* Initialize the FIT msg header */
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- memset(fmh, 0, sizeof(*fmh));
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- skmsg->length = sizeof(*fmh);
- }
+ dev_dbg(&skdev->pdev->dev,
+ "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
+ lba, count, count, data_dir);
- skreq->fitmsg_id = skmsg->id;
+ skreq->id = tag + SKD_ID_RW_REQUEST;
+ skreq->flush_cmd = 0;
+ skreq->n_sg = 0;
+ skreq->sg_byte_count = 0;
- /*
- * Note that a FIT msg may have just been started
- * but contains no SoFIT requests yet.
- */
+ skreq->fitmsg_id = 0;
- /*
- * Transcode the request, checking as we go. The outcome of
- * the transcoding is represented by the error variable.
- */
- cmd_ptr = &skmsg->msg_buf[skmsg->length];
- memset(cmd_ptr, 0, 32);
+ skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
- be_lba = cpu_to_be32(lba);
- be_count = cpu_to_be32(count);
- be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
- cmdctxt = skreq->id + SKD_ID_INCR;
+ if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
+ dev_dbg(&skdev->pdev->dev, "error Out\n");
+ skreq->status = BLK_STS_RESOURCE;
+ blk_mq_complete_request(req);
+ return BLK_STS_OK;
+ }
- scsi_req = cmd_ptr;
- scsi_req->hdr.tag = cmdctxt;
- scsi_req->hdr.sg_list_dma_address = be_dmaa;
+ dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
+ skreq->n_sg *
+ sizeof(struct fit_sg_descriptor),
+ DMA_TO_DEVICE);
- if (data_dir == READ)
- skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
- else
- skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
+ /* Either a FIT msg is in progress or we have to start one. */
+ if (skd_max_req_per_msg == 1) {
+ skmsg = NULL;
+ } else {
+ spin_lock_irqsave(&skdev->lock, flags);
+ skmsg = skdev->skmsg;
+ }
+ if (!skmsg) {
+ skmsg = &skdev->skmsg_table[tag];
+ skdev->skmsg = skmsg;
- if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
- skd_prep_zerosize_flush_cdb(scsi_req, skreq);
- SKD_ASSERT(skreq->flush_cmd == 1);
+ /* Initialize the FIT msg header */
+ fmh = &skmsg->msg_buf->fmh;
+ memset(fmh, 0, sizeof(*fmh));
+ fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
+ skmsg->length = sizeof(*fmh);
+ } else {
+ fmh = &skmsg->msg_buf->fmh;
+ }
- } else {
- skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
- }
+ skreq->fitmsg_id = skmsg->id;
- if (fua)
- scsi_req->cdb[1] |= SKD_FUA_NV;
+ scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
+ memset(scsi_req, 0, sizeof(*scsi_req));
- if (!req->bio)
- goto skip_sg;
+ scsi_req->hdr.tag = skreq->id;
+ scsi_req->hdr.sg_list_dma_address =
+ cpu_to_be64(skreq->sksg_dma_address);
- if (!skd_preop_sg_list(skdev, skreq)) {
- /*
- * Complete the native request with error.
- * Note that the request context is still at the
- * head of the free list, and that the SoFIT request
- * was encoded into the FIT msg buffer but the FIT
- * msg length has not been updated. In short, the
- * only resource that has been allocated but might
- * not be used is that the FIT msg could be empty.
- */
- pr_debug("%s:%s:%d error Out\n",
- skdev->name, __func__, __LINE__);
- skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
- continue;
- }
+ if (req_op(req) == REQ_OP_FLUSH) {
+ skd_prep_zerosize_flush_cdb(scsi_req, skreq);
+ SKD_ASSERT(skreq->flush_cmd == 1);
+ } else {
+ skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
+ }
-skip_sg:
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skreq->sg_byte_count);
+ if (req->cmd_flags & REQ_FUA)
+ scsi_req->cdb[1] |= SKD_FUA_NV;
- /* Complete resource allocations. */
- skdev->skreq_free_list = skreq->next;
- skreq->state = SKD_REQ_STATE_BUSY;
- skreq->id += SKD_ID_INCR;
+ scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
- skmsg->length += sizeof(struct skd_scsi_request);
- fmh->num_protocol_cmds_coalesced++;
+ /* Complete resource allocations. */
+ skreq->state = SKD_REQ_STATE_BUSY;
- /*
- * Update the active request counts.
- * Capture the timeout timestamp.
- */
- skreq->timeout_stamp = skdev->timeout_stamp;
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- skdev->timeout_slot[timo_slot]++;
- skdev->in_flight++;
- pr_debug("%s:%s:%d req=0x%x busy=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skdev->in_flight);
+ skmsg->length += sizeof(struct skd_scsi_request);
+ fmh->num_protocol_cmds_coalesced++;
- /*
- * If the FIT msg buffer is full send it.
- */
- if (skmsg->length >= SKD_N_FITMSG_BYTES ||
- fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
- skd_send_fitmsg(skdev, skmsg);
- skmsg = NULL;
- fmh = NULL;
- }
- }
+ dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
+ skd_in_flight(skdev));
/*
- * Is a FIT msg in progress? If it is empty put the buffer back
- * on the free list. If it is non-empty send what we got.
- * This minimizes latency when there are fewer requests than
- * what fits in a FIT msg.
+ * If the FIT msg buffer is full send it.
*/
- if (skmsg != NULL) {
- /* Bigger than just a FIT msg header? */
- if (skmsg->length > sizeof(struct fit_msg_hdr)) {
- pr_debug("%s:%s:%d sending msg=%p, len %d\n",
- skdev->name, __func__, __LINE__,
- skmsg, skmsg->length);
+ if (skd_max_req_per_msg == 1) {
+ skd_send_fitmsg(skdev, skmsg);
+ } else {
+ if (mqd->last ||
+ fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
skd_send_fitmsg(skdev, skmsg);
- } else {
- /*
- * The FIT msg is empty. It means we got started
- * on the msg, but the requests were rejected.
- */
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
+ skdev->skmsg = NULL;
}
- skmsg = NULL;
- fmh = NULL;
+ spin_unlock_irqrestore(&skdev->lock, flags);
}
- /*
- * If req is non-NULL it means there is something to do but
- * we are out of a resource.
- */
- if (req)
- blk_stop_queue(skdev->queue);
+ return BLK_STS_OK;
}
-static void skd_end_request(struct skd_device *skdev,
- struct skd_request_context *skreq, blk_status_t error)
+static enum blk_eh_timer_return skd_timed_out(struct request *req,
+ bool reserved)
{
- if (unlikely(error)) {
- struct request *req = skreq->req;
- char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
- skd_name(skdev), cmd, lba, count, skreq->id);
- } else
- pr_debug("%s:%s:%d id=0x%x error=%d\n",
- skdev->name, __func__, __LINE__, skreq->id, error);
+ struct skd_device *skdev = req->q->queuedata;
+
+ dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
+ blk_mq_unique_tag(req));
- __blk_end_request_all(skreq->req, error);
+ return BLK_EH_RESET_TIMER;
+}
+
+static void skd_complete_rq(struct request *req)
+{
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
+
+ blk_mq_end_request(req, skreq->status);
}
static bool skd_preop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- struct request *req = skreq->req;
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
- struct scatterlist *sg = &skreq->sg[0];
+ struct request *req = blk_mq_rq_from_pdu(skreq);
+ struct scatterlist *sgl = &skreq->sg[0], *sg;
int n_sg;
int i;
skreq->sg_byte_count = 0;
- /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
- skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
+ WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
+ skreq->data_dir != DMA_FROM_DEVICE);
- n_sg = blk_rq_map_sg(skdev->queue, req, sg);
+ n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
if (n_sg <= 0)
return false;
@@ -842,7 +633,7 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
* Map scatterlist to PCI bus addresses.
* Note PCI might change the number of entries.
*/
- n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
+ n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
if (n_sg <= 0)
return false;
@@ -850,10 +641,10 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
skreq->n_sg = n_sg;
- for (i = 0; i < n_sg; i++) {
+ for_each_sg(sgl, sg, n_sg, i) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- u32 cnt = sg_dma_len(&sg[i]);
- uint64_t dma_addr = sg_dma_address(&sg[i]);
+ u32 cnt = sg_dma_len(sg);
+ uint64_t dma_addr = sg_dma_address(sg);
sgd->control = FIT_SGD_CONTROL_NOT_LAST;
sgd->byte_count = cnt;
@@ -866,16 +657,16 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
if (unlikely(skdev->dbg_level > 1)) {
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
+ dev_dbg(&skdev->pdev->dev,
+ "skreq=%x sksg_list=%p sksg_dma=%llx\n",
+ skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
for (i = 0; i < n_sg; i++) {
struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
+
+ dev_dbg(&skdev->pdev->dev,
+ " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
}
}
@@ -885,9 +676,6 @@ static bool skd_preop_sg_list(struct skd_device *skdev,
static void skd_postop_sg_list(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
- int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
-
/*
* restore the next ptr for next IO request so we
* don't have to set it every time.
@@ -895,51 +683,7 @@ static void skd_postop_sg_list(struct skd_device *skdev,
skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
skreq->sksg_dma_address +
((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
- pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
-}
-
-static void skd_request_fn_not_online(struct request_queue *q)
-{
- struct skd_device *skdev = q->queuedata;
- int error;
-
- SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
-
- skd_log_skdev(skdev, "req_not_online");
- switch (skdev->state) {
- case SKD_DRVR_STATE_PAUSING:
- case SKD_DRVR_STATE_PAUSED:
- case SKD_DRVR_STATE_STARTING:
- case SKD_DRVR_STATE_RESTARTING:
- case SKD_DRVR_STATE_WAIT_BOOT:
- /* In case of starting, we haven't started the queue,
- * so we can't get here... but requests are
- * possibly hanging out waiting for us because we
- * reported the dev/skd0 already. They'll wait
- * forever if connect doesn't complete.
- * What to do??? delay dev/skd0 ??
- */
- case SKD_DRVR_STATE_BUSY:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- case SKD_DRVR_STATE_BUSY_ERASE:
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return;
-
- case SKD_DRVR_STATE_BUSY_SANITIZE:
- case SKD_DRVR_STATE_STOPPING:
- case SKD_DRVR_STATE_SYNCING:
- case SKD_DRVR_STATE_FAULT:
- case SKD_DRVR_STATE_DISAPPEARED:
- default:
- error = -EIO;
- break;
- }
-
- /* If we get here, terminate all pending block requeusts
- * with EIO and any scsi pass thru with appropriate sense
- */
-
- skd_fail_all_pending(skdev);
+ pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
}
/*
@@ -950,12 +694,22 @@ static void skd_request_fn_not_online(struct request_queue *q)
static void skd_timer_tick_not_online(struct skd_device *skdev);
+static void skd_start_queue(struct work_struct *work)
+{
+ struct skd_device *skdev = container_of(work, typeof(*skdev),
+ start_queue);
+
+ /*
+ * Although it is safe to call blk_start_queue() from interrupt
+ * context, blk_mq_start_hw_queues() must not be called from
+ * interrupt context.
+ */
+ blk_mq_start_hw_queues(skdev->queue);
+}
+
static void skd_timer_tick(ulong arg)
{
struct skd_device *skdev = (struct skd_device *)arg;
-
- u32 timo_slot;
- u32 overdue_timestamp;
unsigned long reqflags;
u32 state;
@@ -972,37 +726,9 @@ static void skd_timer_tick(ulong arg)
if (state != skdev->drive_state)
skd_isr_fwstate(skdev);
- if (skdev->state != SKD_DRVR_STATE_ONLINE) {
+ if (skdev->state != SKD_DRVR_STATE_ONLINE)
skd_timer_tick_not_online(skdev);
- goto timer_func_out;
- }
- skdev->timeout_stamp++;
- timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
-
- /*
- * All requests that happened during the previous use of
- * this slot should be done by now. The previous use was
- * over 7 seconds ago.
- */
- if (skdev->timeout_slot[timo_slot] == 0)
- goto timer_func_out;
-
- /* Something is overdue */
- overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
-
- pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_slot[timo_slot], skdev->in_flight);
- pr_err("(%s): Overdue IOs (%d), busy %d\n",
- skd_name(skdev), skdev->timeout_slot[timo_slot],
- skdev->in_flight);
- skdev->timer_countdown = SKD_DRAINING_TIMO;
- skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
- skdev->timo_slot = timo_slot;
- blk_stop_queue(skdev->queue);
-
-timer_func_out:
mod_timer(&skdev->timer, (jiffies + HZ));
spin_unlock_irqrestore(&skdev->lock, reqflags);
@@ -1015,9 +741,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
case SKD_DRVR_STATE_LOAD:
break;
case SKD_DRVR_STATE_BUSY_SANITIZE:
- pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
- skdev->name, __func__, __LINE__,
- skdev->drive_state, skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "drive busy sanitize[%x], driver[%x]\n",
+ skdev->drive_state, skdev->state);
/* If we've been in sanitize for 3 seconds, we figure we're not
* going to get anymore completions, so recover requests now
*/
@@ -1025,22 +751,21 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
skdev->timer_countdown--;
return;
}
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
break;
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
case SKD_DRVR_STATE_BUSY_ERASE:
- pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
+ dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
+ skdev->state, skdev->timer_countdown);
if (skdev->timer_countdown > 0) {
skdev->timer_countdown--;
return;
}
- pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
- skdev->name, __func__, __LINE__,
- skdev->state, skdev->timer_countdown);
+ dev_dbg(&skdev->pdev->dev,
+ "busy[%x], timedout=%d, restarting device.",
+ skdev->state, skdev->timer_countdown);
skd_restart_device(skdev);
break;
@@ -1054,12 +779,12 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
* revcover at some point. */
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Connect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
+ skdev->drive_state);
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1072,29 +797,6 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
case SKD_DRVR_STATE_PAUSED:
break;
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- pr_debug("%s:%s:%d "
- "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
- skdev->name, __func__, __LINE__,
- skdev->timo_slot,
- skdev->timer_countdown,
- skdev->in_flight,
- skdev->timeout_slot[skdev->timo_slot]);
- /* if the slot has cleared we can let the I/O continue */
- if (skdev->timeout_slot[skdev->timo_slot] == 0) {
- pr_debug("%s:%s:%d Slot drained, starting queue.\n",
- skdev->name, __func__, __LINE__);
- skdev->state = SKD_DRVR_STATE_ONLINE;
- blk_start_queue(skdev->queue);
- return;
- }
- if (skdev->timer_countdown > 0) {
- skdev->timer_countdown--;
- return;
- }
- skd_restart_device(skdev);
- break;
-
case SKD_DRVR_STATE_RESTARTING:
if (skdev->timer_countdown > 0) {
skdev->timer_countdown--;
@@ -1103,8 +805,9 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/* For now, we fault the drive. Could attempt resets to
* revcover at some point. */
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev,
+ "DriveFault Reconnect Timeout (%x)\n",
+ skdev->drive_state);
/*
* Recovering does two things:
@@ -1124,18 +827,18 @@ static void skd_timer_tick_not_online(struct skd_device *skdev)
/* It never came out of soft reset. Try to
* recover the requests and then let them
* fail. This is to mitigate hung processes. */
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
else {
- pr_err("(%s): Disable BusMaster (%x)\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
+ skdev->drive_state);
pci_disable_device(skdev->pdev);
skd_disable_interrupts(skdev);
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
}
/*start the queue so we can respond with error to requests */
/* wakeup anyone waiting for startup complete */
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -1154,13 +857,11 @@ static int skd_start_timer(struct skd_device *skdev)
{
int rc;
- init_timer(&skdev->timer);
setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
- pr_err("%s: failed to start timer %d\n",
- __func__, rc);
+ dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
return rc;
}
@@ -1171,634 +872,6 @@ static void skd_kill_timer(struct skd_device *skdev)
/*
*****************************************************************************
- * IOCTL
- *****************************************************************************
- */
-static int skd_ioctl_sg_io(struct skd_device *skdev,
- fmode_t mode, void __user *argp);
-static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir);
-static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
-static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio);
-
-static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl);
-
-static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
- uint cmd_in, ulong arg)
-{
- static const int sg_version_num = 30527;
- int rc = 0, timeout;
- struct gendisk *disk = bdev->bd_disk;
- struct skd_device *skdev = disk->private_data;
- int __user *p = (int __user *)arg;
-
- pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
- skdev->name, __func__, __LINE__,
- disk->disk_name, current->comm, mode, cmd_in, arg);
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- switch (cmd_in) {
- case SG_SET_TIMEOUT:
- rc = get_user(timeout, p);
- if (!rc)
- disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
- break;
- case SG_GET_TIMEOUT:
- rc = jiffies_to_clock_t(disk->queue->sg_timeout);
- break;
- case SG_GET_VERSION_NUM:
- rc = put_user(sg_version_num, p);
- break;
- case SG_IO:
- rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
- break;
-
- default:
- rc = -ENOTTY;
- break;
- }
-
- pr_debug("%s:%s:%d %s: completion rc %d\n",
- skdev->name, __func__, __LINE__, disk->disk_name, rc);
- return rc;
-}
-
-static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
- void __user *argp)
-{
- int rc;
- struct skd_sg_io sksgio;
-
- memset(&sksgio, 0, sizeof(sksgio));
- sksgio.mode = mode;
- sksgio.argp = argp;
- sksgio.iov = &sksgio.no_iov_iov;
-
- switch (skdev->state) {
- case SKD_DRVR_STATE_ONLINE:
- case SKD_DRVR_STATE_BUSY_IMMINENT:
- break;
-
- default:
- pr_debug("%s:%s:%d drive not online\n",
- skdev->name, __func__, __LINE__);
- rc = -ENXIO;
- goto out;
- }
-
- rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_prep_buffering(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
- if (rc)
- goto out;
-
- rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_await(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
- if (rc)
- goto out;
-
- rc = skd_sg_io_put_status(skdev, &sksgio);
- if (rc)
- goto out;
-
- rc = 0;
-
-out:
- skd_sg_io_release_skspcl(skdev, &sksgio);
-
- if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
- kfree(sksgio.iov);
- return rc;
-}
-
-static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct sg_io_hdr *sgp = &sksgio->sg;
- int i, acc;
-
- if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d access sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
- pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
- skdev->name, __func__, __LINE__, sgp->interface_id);
- return -EINVAL;
- }
-
- if (sgp->cmd_len > sizeof(sksgio->cdb)) {
- pr_debug("%s:%s:%d cmd_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->cmd_len);
- return -EINVAL;
- }
-
- if (sgp->iovec_count > 256) {
- pr_debug("%s:%s:%d iovec_count invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->iovec_count);
- return -EINVAL;
- }
-
- if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
- pr_debug("%s:%s:%d dxfer_len invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_len);
- return -EINVAL;
- }
-
- switch (sgp->dxfer_direction) {
- case SG_DXFER_NONE:
- acc = -1;
- break;
-
- case SG_DXFER_TO_DEV:
- acc = VERIFY_READ;
- break;
-
- case SG_DXFER_FROM_DEV:
- case SG_DXFER_TO_FROM_DEV:
- acc = VERIFY_WRITE;
- break;
-
- default:
- pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
- skdev->name, __func__, __LINE__, sgp->dxfer_direction);
- return -EINVAL;
- }
-
- if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
- pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->cmdp);
- return -EFAULT;
- }
-
- if (sgp->mx_sb_len != 0) {
- if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
- pr_debug("%s:%s:%d access sbp failed %p\n",
- skdev->name, __func__, __LINE__, sgp->sbp);
- return -EFAULT;
- }
- }
-
- if (sgp->iovec_count == 0) {
- sksgio->iov[0].iov_base = sgp->dxferp;
- sksgio->iov[0].iov_len = sgp->dxfer_len;
- sksgio->iovcnt = 1;
- sksgio->dxfer_len = sgp->dxfer_len;
- } else {
- struct sg_iovec *iov;
- uint nbytes = sizeof(*iov) * sgp->iovec_count;
- size_t iov_data_len;
-
- iov = kmalloc(nbytes, GFP_KERNEL);
- if (iov == NULL) {
- pr_debug("%s:%s:%d alloc iovec failed %d\n",
- skdev->name, __func__, __LINE__,
- sgp->iovec_count);
- return -ENOMEM;
- }
- sksgio->iov = iov;
- sksgio->iovcnt = sgp->iovec_count;
-
- if (copy_from_user(iov, sgp->dxferp, nbytes)) {
- pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
- skdev->name, __func__, __LINE__, sgp->dxferp);
- return -EFAULT;
- }
-
- /*
- * Sum up the vecs, making sure they don't overflow
- */
- iov_data_len = 0;
- for (i = 0; i < sgp->iovec_count; i++) {
- if (iov_data_len + iov[i].iov_len < iov_data_len)
- return -EINVAL;
- iov_data_len += iov[i].iov_len;
- }
-
- /* SG_IO howto says that the shorter of the two wins */
- if (sgp->dxfer_len < iov_data_len) {
- sksgio->iovcnt = iov_shorten((struct iovec *)iov,
- sgp->iovec_count,
- sgp->dxfer_len);
- sksgio->dxfer_len = sgp->dxfer_len;
- } else
- sksgio->dxfer_len = iov_data_len;
- }
-
- if (sgp->dxfer_direction != SG_DXFER_NONE) {
- struct sg_iovec *iov = sksgio->iov;
- for (i = 0; i < sksgio->iovcnt; i++, iov++) {
- if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
- pr_debug("%s:%s:%d access data failed %p/%d\n",
- skdev->name, __func__, __LINE__,
- iov->iov_base, (int)iov->iov_len);
- return -EFAULT;
- }
- }
- }
-
- return 0;
-}
-
-static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = NULL;
- int rc;
-
- for (;;) {
- ulong flags;
-
- spin_lock_irqsave(&skdev->lock, flags);
- skspcl = skdev->skspcl_free_list;
- if (skspcl != NULL) {
- skdev->skspcl_free_list =
- (struct skd_special_context *)skspcl->req.next;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.state = SKD_REQ_STATE_SETUP;
- skspcl->orphaned = 0;
- skspcl->req.n_sg = 0;
- }
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- if (skspcl != NULL) {
- rc = 0;
- break;
- }
-
- pr_debug("%s:%s:%d blocking\n",
- skdev->name, __func__, __LINE__);
-
- rc = wait_event_interruptible_timeout(
- skdev->waitq,
- (skdev->skspcl_free_list != NULL),
- msecs_to_jiffies(sksgio->sg.timeout));
-
- pr_debug("%s:%s:%d unblocking, rc=%d\n",
- skdev->name, __func__, __LINE__, rc);
-
- if (rc <= 0) {
- if (rc == 0)
- rc = -ETIMEDOUT;
- else
- rc = -EINTR;
- break;
- }
- /*
- * If we get here rc > 0 meaning the timeout to
- * wait_event_interruptible_timeout() had time left, hence the
- * sought event -- non-empty free list -- happened.
- * Retry the allocation.
- */
- }
- sksgio->skspcl = skspcl;
-
- return rc;
-}
-
-static int skd_skreq_prep_buffering(struct skd_device *skdev,
- struct skd_request_context *skreq,
- u32 dxfer_len)
-{
- u32 resid = dxfer_len;
-
- /*
- * The DMA engine must have aligned addresses and byte counts.
- */
- resid += (-resid) & 3;
- skreq->sg_byte_count = resid;
-
- skreq->n_sg = 0;
-
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
- u32 ix = skreq->n_sg;
- struct scatterlist *sg = &skreq->sg[ix];
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
- struct page *page;
-
- if (nbytes > resid)
- nbytes = resid;
-
- page = alloc_page(GFP_KERNEL);
- if (page == NULL)
- return -ENOMEM;
-
- sg_set_page(sg, page, nbytes, 0);
-
- /* TODO: This should be going through a pci_???()
- * routine to do proper mapping. */
- sksg->control = FIT_SGD_CONTROL_NOT_LAST;
- sksg->byte_count = nbytes;
-
- sksg->host_side_addr = sg_phys(sg);
-
- sksg->dev_side_addr = 0;
- sksg->next_desc_ptr = skreq->sksg_dma_address +
- (ix + 1) * sizeof(*sksg);
-
- skreq->n_sg++;
- resid -= nbytes;
- }
-
- if (skreq->n_sg > 0) {
- u32 ix = skreq->n_sg - 1;
- struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
-
- sksg->control = FIT_SGD_CONTROL_LAST;
- sksg->next_desc_ptr = 0;
- }
-
- if (unlikely(skdev->dbg_level > 1)) {
- u32 i;
-
- pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
- for (i = 0; i < skreq->n_sg; i++) {
- struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
-
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
- }
- }
-
- return 0;
-}
-
-static int skd_sg_io_prep_buffering(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct skd_request_context *skreq = &skspcl->req;
- u32 dxfer_len = sksgio->dxfer_len;
- int rc;
-
- rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
- /*
- * Eventually, errors or not, skd_release_special() is called
- * to recover allocations including partial allocations.
- */
- return rc;
-}
-
-static int skd_sg_io_copy_buffer(struct skd_device *skdev,
- struct skd_sg_io *sksgio, int dxfer_dir)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- u32 iov_ix = 0;
- struct sg_iovec curiov;
- u32 sksg_ix = 0;
- u8 *bufp = NULL;
- u32 buf_len = 0;
- u32 resid = sksgio->dxfer_len;
- int rc;
-
- curiov.iov_len = 0;
- curiov.iov_base = NULL;
-
- if (dxfer_dir != sksgio->sg.dxfer_direction) {
- if (dxfer_dir != SG_DXFER_TO_DEV ||
- sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
- return 0;
- }
-
- while (resid > 0) {
- u32 nbytes = PAGE_SIZE;
-
- if (curiov.iov_len == 0) {
- curiov = sksgio->iov[iov_ix++];
- continue;
- }
-
- if (buf_len == 0) {
- struct page *page;
- page = sg_page(&skspcl->req.sg[sksg_ix++]);
- bufp = page_address(page);
- buf_len = PAGE_SIZE;
- }
-
- nbytes = min_t(u32, nbytes, resid);
- nbytes = min_t(u32, nbytes, curiov.iov_len);
- nbytes = min_t(u32, nbytes, buf_len);
-
- if (dxfer_dir == SG_DXFER_TO_DEV)
- rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
- else
- rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
-
- if (rc)
- return -EFAULT;
-
- resid -= nbytes;
- curiov.iov_len -= nbytes;
- curiov.iov_base += nbytes;
- buf_len -= nbytes;
- }
-
- return 0;
-}
-
-static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
-
- memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
-
- /* Initialize the FIT msg header */
- fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
- fmh->num_protocol_cmds_coalesced = 1;
-
- /* Initialize the SCSI request */
- if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
- scsi_req->hdr.sg_list_dma_address =
- cpu_to_be64(skspcl->req.sksg_dma_address);
- scsi_req->hdr.tag = skspcl->req.id;
- scsi_req->hdr.sg_list_len_bytes =
- cpu_to_be32(skspcl->req.sg_byte_count);
- memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
-
- skspcl->req.state = SKD_REQ_STATE_BUSY;
- skd_send_special_fitmsg(skdev, skspcl);
-
- return 0;
-}
-
-static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
-{
- unsigned long flags;
- int rc;
-
- rc = wait_event_interruptible_timeout(skdev->waitq,
- (sksgio->skspcl->req.state !=
- SKD_REQ_STATE_BUSY),
- msecs_to_jiffies(sksgio->sg.
- timeout));
-
- spin_lock_irqsave(&skdev->lock, flags);
-
- if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d skspcl %p aborted\n",
- skdev->name, __func__, __LINE__, sksgio->skspcl);
-
- /* Build check cond, sense and let command finish. */
- /* For a timeout, we must fabricate completion and sense
- * data to complete the command */
- sksgio->skspcl->req.completion.status =
- SAM_STAT_CHECK_CONDITION;
-
- memset(&sksgio->skspcl->req.err_info, 0,
- sizeof(sksgio->skspcl->req.err_info));
- sksgio->skspcl->req.err_info.type = 0x70;
- sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
- sksgio->skspcl->req.err_info.code = 0x44;
- sksgio->skspcl->req.err_info.qual = 0;
- rc = 0;
- } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
- /* No longer on the adapter. We finish. */
- rc = 0;
- else {
- /* Something's gone wrong. Still busy. Timeout or
- * user interrupted (control-C). Mark as an orphan
- * so it will be disposed when completed. */
- sksgio->skspcl->orphaned = 1;
- sksgio->skspcl = NULL;
- if (rc == 0) {
- pr_debug("%s:%s:%d timed out %p (%u ms)\n",
- skdev->name, __func__, __LINE__,
- sksgio, sksgio->sg.timeout);
- rc = -ETIMEDOUT;
- } else {
- pr_debug("%s:%s:%d cntlc %p\n",
- skdev->name, __func__, __LINE__, sksgio);
- rc = -EINTR;
- }
- }
-
- spin_unlock_irqrestore(&skdev->lock, flags);
-
- return rc;
-}
-
-static int skd_sg_io_put_status(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct sg_io_hdr *sgp = &sksgio->sg;
- struct skd_special_context *skspcl = sksgio->skspcl;
- int resid = 0;
-
- u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
-
- sgp->status = skspcl->req.completion.status;
- resid = sksgio->dxfer_len - nb;
-
- sgp->masked_status = sgp->status & STATUS_MASK;
- sgp->msg_status = 0;
- sgp->host_status = 0;
- sgp->driver_status = 0;
- sgp->resid = resid;
- if (sgp->masked_status || sgp->host_status || sgp->driver_status)
- sgp->info |= SG_INFO_CHECK;
-
- pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
- skdev->name, __func__, __LINE__,
- sgp->status, sgp->masked_status, sgp->resid);
-
- if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
- if (sgp->mx_sb_len > 0) {
- struct fit_comp_error_info *ei = &skspcl->req.err_info;
- u32 nbytes = sizeof(*ei);
-
- nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
-
- sgp->sb_len_wr = nbytes;
-
- if (__copy_to_user(sgp->sbp, ei, nbytes)) {
- pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
- skdev->name, __func__, __LINE__,
- sgp->sbp);
- return -EFAULT;
- }
- }
- }
-
- if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
- pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
- skdev->name, __func__, __LINE__, sksgio->argp);
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int skd_sg_io_release_skspcl(struct skd_device *skdev,
- struct skd_sg_io *sksgio)
-{
- struct skd_special_context *skspcl = sksgio->skspcl;
-
- if (skspcl != NULL) {
- ulong flags;
-
- sksgio->skspcl = NULL;
-
- spin_lock_irqsave(&skdev->lock, flags);
- skd_release_special(skdev, skspcl);
- spin_unlock_irqrestore(&skdev->lock, flags);
- }
-
- return 0;
-}
-
-/*
- *****************************************************************************
* INTERNAL REQUESTS -- generated by driver itself
*****************************************************************************
*/
@@ -1811,14 +884,15 @@ static int skd_format_internal_skspcl(struct skd_device *skdev)
uint64_t dma_address;
struct skd_scsi_request *scsi;
- fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
+ fmh = &skspcl->msg_buf->fmh;
fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
fmh->num_protocol_cmds_coalesced = 1;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi = &skspcl->msg_buf->scsi[0];
memset(scsi, 0, sizeof(*scsi));
dma_address = skspcl->req.sksg_dma_address;
scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
+ skspcl->req.n_sg = 1;
sgd->control = FIT_SGD_CONTROL_LAST;
sgd->byte_count = 0;
sgd->host_side_addr = skspcl->db_dma_address;
@@ -1846,11 +920,9 @@ static void skd_send_internal_skspcl(struct skd_device *skdev,
*/
return;
- SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
skspcl->req.state = SKD_REQ_STATE_BUSY;
- skspcl->req.id += SKD_ID_INCR;
- scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ scsi = &skspcl->msg_buf->scsi[0];
scsi->hdr.tag = skspcl->req.id;
memset(scsi->cdb, 0, sizeof(scsi->cdb));
@@ -1940,32 +1012,35 @@ static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
/* If the check condition is of special interest, log a message */
if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
&& (code == 0x04) && (qual == 0x06)) {
- pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
- "ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), key, code, qual, fruc);
+ dev_err(&skdev->pdev->dev,
+ "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ key, code, qual, fruc);
}
}
static void skd_complete_internal(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr,
struct skd_special_context *skspcl)
{
u8 *buf = skspcl->data_buf;
u8 status;
int i;
- struct skd_scsi_request *scsi =
- (struct skd_scsi_request *)&skspcl->msg_buf[64];
+ struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
+
+ lockdep_assert_held(&skdev->lock);
SKD_ASSERT(skspcl == &skdev->internal_skspcl);
- pr_debug("%s:%s:%d complete internal %x\n",
- skdev->name, __func__, __LINE__, scsi->cdb[0]);
+ dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
+
+ dma_sync_single_for_cpu(&skdev->pdev->dev,
+ skspcl->db_dma_address,
+ skspcl->req.sksg_list[0].byte_count,
+ DMA_BIDIRECTIONAL);
skspcl->req.completion = *skcomp;
skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
status = skspcl->req.completion.status;
@@ -1981,14 +1056,15 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "TUR failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** TUR failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -1997,14 +1073,15 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "write buffer failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** write buffer failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -2014,33 +1091,31 @@ static void skd_complete_internal(struct skd_device *skdev,
skd_send_internal_skspcl(skdev, skspcl,
READ_CAPACITY);
else {
- pr_err(
- "(%s):*** W/R Buffer mismatch %d ***\n",
- skd_name(skdev), skdev->connect_retries);
+ dev_err(&skdev->pdev->dev,
+ "*** W/R Buffer mismatch %d ***\n",
+ skdev->connect_retries);
if (skdev->connect_retries <
SKD_MAX_CONNECT_RETRIES) {
skdev->connect_retries++;
skd_soft_reset(skdev);
} else {
- pr_err(
- "(%s): W/R Buffer Connect Error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev,
+ "W/R Buffer Connect Error\n");
return;
}
}
} else {
if (skdev->state == SKD_DRVR_STATE_STOPPING) {
- pr_debug("%s:%s:%d "
- "read buffer failed, don't send anymore state 0x%x\n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "read buffer failed, don't send anymore state 0x%x\n",
+ skdev->state);
return;
}
- pr_debug("%s:%s:%d "
- "**** read buffer failed, retry skerr\n",
- skdev->name, __func__, __LINE__);
- skd_send_internal_skspcl(skdev, skspcl, 0x00);
+ dev_dbg(&skdev->pdev->dev,
+ "**** read buffer failed, retry skerr\n");
+ skd_send_internal_skspcl(skdev, skspcl,
+ TEST_UNIT_READY);
}
break;
@@ -2054,10 +1129,9 @@ static void skd_complete_internal(struct skd_device *skdev,
(buf[4] << 24) | (buf[5] << 16) |
(buf[6] << 8) | buf[7];
- pr_debug("%s:%s:%d last lba %d, bs %d\n",
- skdev->name, __func__, __LINE__,
- skdev->read_cap_last_lba,
- skdev->read_cap_blocksize);
+ dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
+ skdev->read_cap_last_lba,
+ skdev->read_cap_blocksize);
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
@@ -2068,13 +1142,10 @@ static void skd_complete_internal(struct skd_device *skdev,
(skerr->key == MEDIUM_ERROR)) {
skdev->read_cap_last_lba = ~0;
set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
- pr_debug("%s:%s:%d "
- "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
} else {
- pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
skd_send_internal_skspcl(skdev, skspcl,
TEST_UNIT_READY);
}
@@ -2091,8 +1162,7 @@ static void skd_complete_internal(struct skd_device *skdev,
}
if (skd_unquiesce_dev(skdev) < 0)
- pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
/* connection is complete */
skdev->connect_retries = 0;
break;
@@ -2120,27 +1190,20 @@ static void skd_send_fitmsg(struct skd_device *skdev,
struct skd_fitmsg_context *skmsg)
{
u64 qcmd;
- struct fit_msg_hdr *fmh;
- pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
- skdev->name, __func__, __LINE__,
- skmsg->mb_dma_address, skdev->in_flight);
- pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
- skdev->name, __func__, __LINE__,
- skmsg->msg_buf, skmsg->offset);
+ dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
+ skmsg->mb_dma_address, skd_in_flight(skdev));
+ dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
qcmd = skmsg->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL;
- fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
- skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
-
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skmsg->msg_buf;
int i;
for (i = 0; i < skmsg->length; i += 8) {
- pr_debug("%s:%s:%d msg[%2d] %8ph\n",
- skdev->name, __func__, __LINE__, i, &bp[i]);
+ dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
+ &bp[i]);
if (i == 0)
i = 64 - 8;
}
@@ -2160,6 +1223,12 @@ static void skd_send_fitmsg(struct skd_device *skdev,
*/
qcmd |= FIT_QCMD_MSGSIZE_64;
+ dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
+ skmsg->length, DMA_TO_DEVICE);
+
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2168,30 +1237,31 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
{
u64 qcmd;
+ WARN_ON_ONCE(skspcl->req.n_sg != 1);
+
if (unlikely(skdev->dbg_level > 1)) {
u8 *bp = (u8 *)skspcl->msg_buf;
int i;
for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
- pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
- skdev->name, __func__, __LINE__, i, &bp[i]);
+ dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
+ &bp[i]);
if (i == 0)
i = 64 - 8;
}
- pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
- skdev->name, __func__, __LINE__,
- skspcl, skspcl->req.id, skspcl->req.sksg_list,
- skspcl->req.sksg_dma_address);
+ dev_dbg(&skdev->pdev->dev,
+ "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
+ skspcl, skspcl->req.id, skspcl->req.sksg_list,
+ skspcl->req.sksg_dma_address);
for (i = 0; i < skspcl->req.n_sg; i++) {
struct fit_sg_descriptor *sgd =
&skspcl->req.sksg_list[i];
- pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
- "addr=0x%llx next=0x%llx\n",
- skdev->name, __func__, __LINE__,
- i, sgd->byte_count, sgd->control,
- sgd->host_side_addr, sgd->next_desc_ptr);
+ dev_dbg(&skdev->pdev->dev,
+ " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
+ i, sgd->byte_count, sgd->control,
+ sgd->host_side_addr, sgd->next_desc_ptr);
}
}
@@ -2202,6 +1272,20 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
qcmd = skspcl->mb_dma_address;
qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+ dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
+ SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
+ dma_sync_single_for_device(&skdev->pdev->dev,
+ skspcl->req.sksg_dma_address,
+ 1 * sizeof(struct fit_sg_descriptor),
+ DMA_TO_DEVICE);
+ dma_sync_single_for_device(&skdev->pdev->dev,
+ skspcl->db_dma_address,
+ skspcl->req.sksg_list[0].byte_count,
+ DMA_BIDIRECTIONAL);
+
+ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
+ smp_wmb();
+
SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
}
@@ -2212,8 +1296,8 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
*/
static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr);
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr);
struct sns_info {
u8 type;
@@ -2262,21 +1346,20 @@ static struct sns_info skd_chkstat_table[] = {
static enum skd_check_status_action
skd_check_status(struct skd_device *skdev,
- u8 cmp_status, volatile struct fit_comp_error_info *skerr)
+ u8 cmp_status, struct fit_comp_error_info *skerr)
{
- int i, n;
+ int i;
- pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key, skerr->code, skerr->qual,
- skerr->fruc);
+ dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
+ skerr->key, skerr->code, skerr->qual, skerr->fruc);
- pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
- skdev->name, __func__, __LINE__, skerr->type, cmp_status,
- skerr->key, skerr->code, skerr->qual, skerr->fruc);
+ dev_dbg(&skdev->pdev->dev,
+ "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
+ skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
+ skerr->fruc);
/* Does the info match an entry in the good category? */
- n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
- for (i = 0; i < n; i++) {
+ for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
struct sns_info *sns = &skd_chkstat_table[i];
if (sns->mask & 0x10)
@@ -2300,10 +1383,9 @@ skd_check_status(struct skd_device *skdev,
continue;
if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
- pr_err("(%s): SMART Alert: sense key/asc/ascq "
- "%02x/%02x/%02x\n",
- skd_name(skdev), skerr->key,
- skerr->code, skerr->qual);
+ dev_err(&skdev->pdev->dev,
+ "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
+ skerr->key, skerr->code, skerr->qual);
}
return sns->action;
}
@@ -2312,335 +1394,80 @@ skd_check_status(struct skd_device *skdev,
* zero status means good
*/
if (cmp_status) {
- pr_debug("%s:%s:%d status check: error\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "status check: error\n");
return SKD_CHECK_STATUS_REPORT_ERROR;
}
- pr_debug("%s:%s:%d status check good default\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "status check good default\n");
return SKD_CHECK_STATUS_REPORT_GOOD;
}
static void skd_resolve_req_exception(struct skd_device *skdev,
- struct skd_request_context *skreq)
+ struct skd_request_context *skreq,
+ struct request *req)
{
u8 cmp_status = skreq->completion.status;
switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
case SKD_CHECK_STATUS_REPORT_GOOD:
case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
- skd_end_request(skdev, skreq, BLK_STS_OK);
+ skreq->status = BLK_STS_OK;
+ blk_mq_complete_request(req);
break;
case SKD_CHECK_STATUS_BUSY_IMMINENT:
skd_log_skreq(skdev, skreq, "retry(busy)");
- blk_requeue_request(skdev->queue, skreq->req);
- pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
+ blk_requeue_request(skdev->queue, req);
+ dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
skdev->timer_countdown = SKD_TIMER_MINUTES(20);
skd_quiesce_dev(skdev);
break;
case SKD_CHECK_STATUS_REQUEUE_REQUEST:
- if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
+ if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
skd_log_skreq(skdev, skreq, "retry");
- blk_requeue_request(skdev->queue, skreq->req);
+ blk_requeue_request(skdev->queue, req);
break;
}
- /* fall through to report error */
+ /* fall through */
case SKD_CHECK_STATUS_REPORT_ERROR:
default:
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
+ skreq->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
break;
}
}
-/* assume spinlock is already held */
static void skd_release_skreq(struct skd_device *skdev,
struct skd_request_context *skreq)
{
- u32 msg_slot;
- struct skd_fitmsg_context *skmsg;
-
- u32 timo_slot;
-
- /*
- * Reclaim the FIT msg buffer if this is
- * the first of the requests it carried to
- * be completed. The FIT msg buffer used to
- * send this request cannot be reused until
- * we are sure the s1120 card has copied
- * it to its memory. The FIT msg might have
- * contained several requests. As soon as
- * any of them are completed we know that
- * the entire FIT msg was transferred.
- * Only the first completed request will
- * match the FIT msg buffer id. The FIT
- * msg buffer id is immediately updated.
- * When subsequent requests complete the FIT
- * msg buffer id won't match, so we know
- * quite cheaply that it is already done.
- */
- msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
- SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
-
- skmsg = &skdev->skmsg_table[msg_slot];
- if (skmsg->id == skreq->fitmsg_id) {
- SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
- SKD_ASSERT(skmsg->outstanding > 0);
- skmsg->outstanding--;
- if (skmsg->outstanding == 0) {
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- skmsg->next = skdev->skmsg_free_list;
- skdev->skmsg_free_list = skmsg;
- }
- }
-
- /*
- * Decrease the number of active requests.
- * Also decrements the count in the timeout slot.
- */
- SKD_ASSERT(skdev->in_flight > 0);
- skdev->in_flight -= 1;
-
- timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
- SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
- skdev->timeout_slot[timo_slot] -= 1;
-
- /*
- * Reset backpointer
- */
- skreq->req = NULL;
-
/*
* Reclaim the skd_request_context
*/
skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- skreq->next = skdev->skreq_free_list;
- skdev->skreq_free_list = skreq;
}
-#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
-
-static void skd_do_inq_page_00(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
-
- /* Caller requested "supported pages". The driver needs to insert
- * its page.
- */
- pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
- skdev->name, __func__, __LINE__);
-
- /* If the device rejected the request because the CDB was
- * improperly formed, then just leave.
- */
- if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
- skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
- return;
-
- /* Get the amount of space the caller allocated */
- max_bytes = (cdb[3] << 8) | cdb[4];
-
- /* Get the number of pages actually returned by the device */
- drive_pages = (buf[2] << 8) | buf[3];
- drive_bytes = drive_pages + 4;
- new_size = drive_pages + 1;
-
- /* Supported pages must be in numerical order, so find where
- * the driver page needs to be inserted into the list of
- * pages returned by the device.
- */
- for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
- if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
- return; /* Device using this page code. abort */
- else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
- break;
- }
-
- if (insert_pt < max_bytes) {
- uint16_t u;
-
- /* Shift everything up one byte to make room. */
- for (u = new_size + 3; u > insert_pt; u--)
- buf[u] = buf[u - 1];
- buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
-
- /* SCSI byte order increment of num_returned_bytes by 1 */
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes) + 1;
- skcomp->num_returned_bytes =
- be32_to_cpu(skcomp->num_returned_bytes);
- }
-
- /* update page length field to reflect the driver's page too */
- buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
- buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
-}
-
-static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
-{
- int pcie_reg;
- u16 pci_bus_speed;
- u8 pci_lanes;
-
- pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (pcie_reg) {
- u16 linksta;
- pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
-
- pci_bus_speed = linksta & 0xF;
- pci_lanes = (linksta & 0x3F0) >> 4;
- } else {
- *speed = STEC_LINK_UNKNOWN;
- *width = 0xFF;
- return;
- }
-
- switch (pci_bus_speed) {
- case 1:
- *speed = STEC_LINK_2_5GTS;
- break;
- case 2:
- *speed = STEC_LINK_5GTS;
- break;
- case 3:
- *speed = STEC_LINK_8GTS;
- break;
- default:
- *speed = STEC_LINK_UNKNOWN;
- break;
- }
-
- if (pci_lanes <= 0x20)
- *width = pci_lanes;
- else
- *width = 0xFF;
-}
-
-static void skd_do_inq_page_da(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- struct pci_dev *pdev = skdev->pdev;
- unsigned max_bytes;
- struct driver_inquiry_data inq;
- u16 val;
-
- pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
- skdev->name, __func__, __LINE__);
-
- memset(&inq, 0, sizeof(inq));
-
- inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
-
- skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
- inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
- inq.pcie_device_number = PCI_SLOT(pdev->devfn);
- inq.pcie_function_number = PCI_FUNC(pdev->devfn);
-
- pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
- inq.pcie_vendor_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
- inq.pcie_device_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
- inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
-
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
- inq.pcie_subsystem_device_id = cpu_to_be16(val);
-
- /* Driver version, fixed lenth, padded with spaces on the right */
- inq.driver_version_length = sizeof(inq.driver_version);
- memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
- memcpy(inq.driver_version, DRV_VER_COMPL,
- min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
-
- inq.page_length = cpu_to_be16((sizeof(inq) - 4));
-
- /* Clear the error set by the device */
- skcomp->status = SAM_STAT_GOOD;
- memset((void *)skerr, 0, sizeof(*skerr));
-
- /* copy response into output buffer */
- max_bytes = (cdb[3] << 8) | cdb[4];
- memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
-
- skcomp->num_returned_bytes =
- be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
-}
-
-static void skd_do_driver_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr,
- uint8_t *cdb, uint8_t *buf)
-{
- if (!buf)
- return;
- else if (cdb[0] != INQUIRY)
- return; /* Not an INQUIRY */
- else if ((cdb[1] & 1) == 0)
- return; /* EVPD not set */
- else if (cdb[2] == 0)
- /* Need to add driver's page to supported pages list */
- skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
- else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
- /* Caller requested driver's page */
- skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
-}
-
-static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
-{
- if (!sg)
- return NULL;
- if (!sg_page(sg))
- return NULL;
- return sg_virt(sg);
-}
-
-static void skd_process_scsi_inq(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- uint8_t *buf;
- struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
- struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
-
- dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
- skspcl->req.sg_data_dir);
- buf = skd_sg_1st_page_ptr(skspcl->req.sg);
-
- if (buf)
- skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
-}
-
-
static int skd_isr_completion_posted(struct skd_device *skdev,
int limit, int *enqueued)
{
- volatile struct fit_completion_entry_v1 *skcmp = NULL;
- volatile struct fit_comp_error_info *skerr;
+ struct fit_completion_entry_v1 *skcmp;
+ struct fit_comp_error_info *skerr;
u16 req_id;
- u32 req_slot;
+ u32 tag;
+ u16 hwq = 0;
+ struct request *rq;
struct skd_request_context *skreq;
- u16 cmp_cntxt = 0;
- u8 cmp_status = 0;
- u8 cmp_cycle = 0;
- u32 cmp_bytes = 0;
+ u16 cmp_cntxt;
+ u8 cmp_status;
+ u8 cmp_cycle;
+ u32 cmp_bytes;
int rc = 0;
int processed = 0;
+ lockdep_assert_held(&skdev->lock);
+
for (;; ) {
SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
@@ -2652,16 +1479,14 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
skerr = &skdev->skerr_table[skdev->skcomp_ix];
- pr_debug("%s:%s:%d "
- "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
- "busy=%d rbytes=0x%x proto=%d\n",
- skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
- skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
- skdev->in_flight, cmp_bytes, skdev->proto_ver);
+ dev_dbg(&skdev->pdev->dev,
+ "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
+ skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
+ cmp_cntxt, cmp_status, skd_in_flight(skdev),
+ cmp_bytes, skdev->proto_ver);
if (cmp_cycle != skdev->skcomp_cycle) {
- pr_debug("%s:%s:%d end of completions\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "end of completions\n");
break;
}
/*
@@ -2680,49 +1505,38 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
* r/w request (see skd_start() above) or a special request.
*/
req_id = cmp_cntxt;
- req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
+ tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
/* Is this other than a r/w request? */
- if (req_slot >= skdev->num_req_context) {
+ if (tag >= skdev->num_req_context) {
/*
* This is not a completion for a r/w request.
*/
+ WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
+ tag));
skd_complete_other(skdev, skcmp, skerr);
continue;
}
- skreq = &skdev->skreq_table[req_slot];
+ rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
+ if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
+ tag))
+ continue;
+ skreq = blk_mq_rq_to_pdu(rq);
/*
* Make sure the request ID for the slot matches.
*/
if (skreq->id != req_id) {
- pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- req_id, skreq->id);
- {
- u16 new_id = cmp_cntxt;
- pr_err("(%s): Completion mismatch "
- "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
- skd_name(skdev), req_id,
- skreq->id, new_id);
+ dev_err(&skdev->pdev->dev,
+ "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
+ req_id, skreq->id, cmp_cntxt);
- continue;
- }
+ continue;
}
SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
- if (skreq->state == SKD_REQ_STATE_ABORTED) {
- pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id);
- /* a previously timed out command can
- * now be cleaned up */
- skd_release_skreq(skdev, skreq);
- continue;
- }
-
skreq->completion = *skcmp;
if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
skreq->err_info = *skerr;
@@ -2734,27 +1548,17 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
if (skreq->n_sg > 0)
skd_postop_sg_list(skdev, skreq);
- if (!skreq->req) {
- pr_debug("%s:%s:%d NULL backptr skdreq %p, "
- "req=0x%x req_id=0x%x\n",
- skdev->name, __func__, __LINE__,
- skreq, skreq->id, req_id);
- } else {
- /*
- * Capture the outcome and post it back to the
- * native request.
- */
- if (likely(cmp_status == SAM_STAT_GOOD))
- skd_end_request(skdev, skreq, BLK_STS_OK);
- else
- skd_resolve_req_exception(skdev, skreq);
- }
+ skd_release_skreq(skdev, skreq);
/*
- * Release the skreq, its FIT msg (if one), timeout slot,
- * and queue depth.
+ * Capture the outcome and post it back to the native request.
*/
- skd_release_skreq(skdev, skreq);
+ if (likely(cmp_status == SAM_STAT_GOOD)) {
+ skreq->status = BLK_STS_OK;
+ blk_mq_complete_request(rq);
+ } else {
+ skd_resolve_req_exception(skdev, skreq, rq);
+ }
/* skd_isr_comp_limit equal zero means no limit */
if (limit) {
@@ -2765,8 +1569,8 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
}
}
- if ((skdev->state == SKD_DRVR_STATE_PAUSING)
- && (skdev->in_flight) == 0) {
+ if (skdev->state == SKD_DRVR_STATE_PAUSING &&
+ skd_in_flight(skdev) == 0) {
skdev->state = SKD_DRVR_STATE_PAUSED;
wake_up_interruptible(&skdev->waitq);
}
@@ -2775,21 +1579,22 @@ static int skd_isr_completion_posted(struct skd_device *skdev,
}
static void skd_complete_other(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1 *skcomp,
- volatile struct fit_comp_error_info *skerr)
+ struct fit_completion_entry_v1 *skcomp,
+ struct fit_comp_error_info *skerr)
{
u32 req_id = 0;
u32 req_table;
u32 req_slot;
struct skd_special_context *skspcl;
+ lockdep_assert_held(&skdev->lock);
+
req_id = skcomp->tag;
req_table = req_id & SKD_ID_TABLE_MASK;
req_slot = req_id & SKD_ID_SLOT_MASK;
- pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
- skdev->name, __func__, __LINE__,
- req_table, req_id, req_slot);
+ dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
+ req_id, req_slot);
/*
* Based on the request id, determine how to dispatch this completion.
@@ -2799,28 +1604,12 @@ static void skd_complete_other(struct skd_device *skdev,
switch (req_table) {
case SKD_ID_RW_REQUEST:
/*
- * The caller, skd_completion_posted_isr() above,
+ * The caller, skd_isr_completion_posted() above,
* handles r/w requests. The only way we get here
* is if the req_slot is out of bounds.
*/
break;
- case SKD_ID_SPECIAL_REQUEST:
- /*
- * Make sure the req_slot is in bounds and that the id
- * matches.
- */
- if (req_slot < skdev->n_special) {
- skspcl = &skdev->skspcl_table[req_slot];
- if (skspcl->req.id == req_id &&
- skspcl->req.state == SKD_REQ_STATE_BUSY) {
- skd_complete_special(skdev,
- skcomp, skerr, skspcl);
- return;
- }
- }
- break;
-
case SKD_ID_INTERNAL:
if (req_slot == 0) {
skspcl = &skdev->internal_skspcl;
@@ -2851,72 +1640,9 @@ static void skd_complete_other(struct skd_device *skdev,
*/
}
-static void skd_complete_special(struct skd_device *skdev,
- volatile struct fit_completion_entry_v1
- *skcomp,
- volatile struct fit_comp_error_info *skerr,
- struct skd_special_context *skspcl)
-{
- pr_debug("%s:%s:%d completing special request %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- if (skspcl->orphaned) {
- /* Discard orphaned request */
- /* ?: Can this release directly or does it need
- * to use a worker? */
- pr_debug("%s:%s:%d release orphaned %p\n",
- skdev->name, __func__, __LINE__, skspcl);
- skd_release_special(skdev, skspcl);
- return;
- }
-
- skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
-
- skspcl->req.state = SKD_REQ_STATE_COMPLETED;
- skspcl->req.completion = *skcomp;
- skspcl->req.err_info = *skerr;
-
- skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
- skerr->code, skerr->qual, skerr->fruc);
-
- wake_up_interruptible(&skdev->waitq);
-}
-
-/* assume spinlock is already held */
-static void skd_release_special(struct skd_device *skdev,
- struct skd_special_context *skspcl)
-{
- int i, was_depleted;
-
- for (i = 0; i < skspcl->req.n_sg; i++) {
- struct page *page = sg_page(&skspcl->req.sg[i]);
- __free_page(page);
- }
-
- was_depleted = (skdev->skspcl_free_list == NULL);
-
- skspcl->req.state = SKD_REQ_STATE_IDLE;
- skspcl->req.id += SKD_ID_INCR;
- skspcl->req.next =
- (struct skd_request_context *)skdev->skspcl_free_list;
- skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
-
- if (was_depleted) {
- pr_debug("%s:%s:%d skspcl was depleted\n",
- skdev->name, __func__, __LINE__);
- /* Free list was depleted. Their might be waiters. */
- wake_up_interruptible(&skdev->waitq);
- }
-}
-
static void skd_reset_skcomp(struct skd_device *skdev)
{
- u32 nbytes;
- struct fit_completion_entry_v1 *skcomp;
-
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
-
- memset(skdev->skcomp_table, 0, nbytes);
+ memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
skdev->skcomp_ix = 0;
skdev->skcomp_cycle = 1;
@@ -2941,7 +1667,7 @@ static void skd_completion_worker(struct work_struct *work)
* process everything in compq
*/
skd_isr_completion_posted(skdev, 0, &flush_enqueued);
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -2951,14 +1677,13 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev);
static irqreturn_t
skd_isr(int irq, void *ptr)
{
- struct skd_device *skdev;
+ struct skd_device *skdev = ptr;
u32 intstat;
u32 ack;
int rc = 0;
int deferred = 0;
int flush_enqueued = 0;
- skdev = (struct skd_device *)ptr;
spin_lock(&skdev->lock);
for (;; ) {
@@ -2967,8 +1692,8 @@ skd_isr(int irq, void *ptr)
ack = FIT_INT_DEF_MASK;
ack &= intstat;
- pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
- skdev->name, __func__, __LINE__, intstat, ack);
+ dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
+ ack);
/* As long as there is an int pending on device, keep
* running loop. When none, get out, but if we've never
@@ -3018,12 +1743,12 @@ skd_isr(int irq, void *ptr)
}
if (unlikely(flush_enqueued))
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock(&skdev->lock);
@@ -3033,13 +1758,13 @@ skd_isr(int irq, void *ptr)
static void skd_drive_fault(struct skd_device *skdev)
{
skdev->state = SKD_DRVR_STATE_FAULT;
- pr_err("(%s): Drive FAULT\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive FAULT\n");
}
static void skd_drive_disappeared(struct skd_device *skdev)
{
skdev->state = SKD_DRVR_STATE_DISAPPEARED;
- pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
}
static void skd_isr_fwstate(struct skd_device *skdev)
@@ -3052,10 +1777,9 @@ static void skd_isr_fwstate(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_STATUS);
state = sense & FIT_SR_DRIVE_STATE_MASK;
- pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_drive_state_to_str(state), state);
+ dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_drive_state_to_str(state), state);
skdev->drive_state = state;
@@ -3066,7 +1790,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
break;
}
if (skdev->state == SKD_DRVR_STATE_RESTARTING)
- skd_recover_requests(skdev, 0);
+ skd_recover_requests(skdev);
if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
skdev->timer_countdown = SKD_STARTING_TIMO;
skdev->state = SKD_DRVR_STATE_STARTING;
@@ -3087,11 +1811,11 @@ static void skd_isr_fwstate(struct skd_device *skdev)
skdev->cur_max_queue_depth * 2 / 3 + 1;
if (skdev->queue_low_water_mark < 1)
skdev->queue_low_water_mark = 1;
- pr_info(
- "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
- skd_name(skdev),
- skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ dev_info(&skdev->pdev->dev,
+ "Queue depth limit=%d dev=%d lowat=%d\n",
+ skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth,
+ skdev->queue_low_water_mark);
skd_refresh_device_data(skdev);
break;
@@ -3107,7 +1831,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_TIMER_SECONDS(3);
- blk_start_queue(skdev->queue);
+ schedule_work(&skdev->start_queue);
break;
case FIT_SR_DRIVE_BUSY_ERASE:
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
@@ -3128,8 +1852,7 @@ static void skd_isr_fwstate(struct skd_device *skdev)
}
break;
case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
+ dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
break;
@@ -3141,17 +1864,17 @@ static void skd_isr_fwstate(struct skd_device *skdev)
case FIT_SR_DRIVE_FAULT:
skd_drive_fault(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
+ skd_recover_requests(skdev);
+ schedule_work(&skdev->start_queue);
break;
/* PCIe bus returned all Fs? */
case 0xFF:
- pr_info("(%s): state=0x%x sense=0x%x\n",
- skd_name(skdev), state, sense);
+ dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
+ sense);
skd_drive_disappeared(skdev);
- skd_recover_requests(skdev, 0);
- blk_start_queue(skdev->queue);
+ skd_recover_requests(skdev);
+ schedule_work(&skdev->start_queue);
break;
default:
/*
@@ -3159,92 +1882,33 @@ static void skd_isr_fwstate(struct skd_device *skdev)
*/
break;
}
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
+ dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
+ skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
}
-static void skd_recover_requests(struct skd_device *skdev, int requeue)
+static void skd_recover_request(struct request *req, void *data, bool reserved)
{
- int i;
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq = &skdev->skreq_table[i];
-
- if (skreq->state == SKD_REQ_STATE_BUSY) {
- skd_log_skreq(skdev, skreq, "recover");
-
- SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
- SKD_ASSERT(skreq->req != NULL);
-
- /* Release DMA resources for the request. */
- if (skreq->n_sg > 0)
- skd_postop_sg_list(skdev, skreq);
-
- if (requeue &&
- (unsigned long) ++skreq->req->special <
- SKD_MAX_RETRIES)
- blk_requeue_request(skdev->queue, skreq->req);
- else
- skd_end_request(skdev, skreq, BLK_STS_IOERR);
-
- skreq->req = NULL;
-
- skreq->state = SKD_REQ_STATE_IDLE;
- skreq->id += SKD_ID_INCR;
- }
- if (i > 0)
- skreq[-1].next = skreq;
- skreq->next = NULL;
- }
- skdev->skreq_free_list = skdev->skreq_table;
+ struct skd_device *const skdev = data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
- for (i = 0; i < skdev->num_fitmsg_context; i++) {
- struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
+ if (skreq->state != SKD_REQ_STATE_BUSY)
+ return;
- if (skmsg->state == SKD_MSG_STATE_BUSY) {
- skd_log_skmsg(skdev, skmsg, "salvaged");
- SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
- skmsg->state = SKD_MSG_STATE_IDLE;
- skmsg->id += SKD_ID_INCR;
- }
- if (i > 0)
- skmsg[-1].next = skmsg;
- skmsg->next = NULL;
- }
- skdev->skmsg_free_list = skdev->skmsg_table;
+ skd_log_skreq(skdev, skreq, "recover");
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl = &skdev->skspcl_table[i];
+ /* Release DMA resources for the request. */
+ if (skreq->n_sg > 0)
+ skd_postop_sg_list(skdev, skreq);
- /* If orphaned, reclaim it because it has already been reported
- * to the process as an error (it was just waiting for
- * a completion that didn't come, and now it will never come)
- * If busy, change to a state that will cause it to error
- * out in the wait routine and let it do the normal
- * reporting and reclaiming
- */
- if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
- if (skspcl->orphaned) {
- pr_debug("%s:%s:%d orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skd_release_special(skdev, skspcl);
- } else {
- pr_debug("%s:%s:%d not orphaned %p\n",
- skdev->name, __func__, __LINE__,
- skspcl);
- skspcl->req.state = SKD_REQ_STATE_ABORTED;
- }
- }
- }
- skdev->skspcl_free_list = skdev->skspcl_table;
-
- for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
- skdev->timeout_slot[i] = 0;
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->status = BLK_STS_IOERR;
+ blk_mq_complete_request(req);
+}
- skdev->in_flight = 0;
+static void skd_recover_requests(struct skd_device *skdev)
+{
+ blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
}
static void skd_isr_msg_from_dev(struct skd_device *skdev)
@@ -3255,8 +1919,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
- skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
+ dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
+ skdev->last_mtd);
/* ignore any mtd that is an ack for something we didn't send */
if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
@@ -3267,13 +1931,10 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
- pr_err("(%s): protocol mismatch\n",
- skdev->name);
- pr_err("(%s): got=%d support=%d\n",
- skdev->name, skdev->proto_ver,
- FIT_PROTOCOL_VERSION_1);
- pr_err("(%s): please upgrade driver\n",
- skdev->name);
+ dev_err(&skdev->pdev->dev, "protocol mismatch\n");
+ dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
+ skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
+ dev_err(&skdev->pdev->dev, " please upgrade driver\n");
skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
skd_soft_reset(skdev);
break;
@@ -3327,9 +1988,8 @@ static void skd_isr_msg_from_dev(struct skd_device *skdev)
SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
skdev->last_mtd = mtd;
- pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
- skd_name(skdev),
- skdev->connect_time_stamp, skdev->drive_jiffies);
+ dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
+ skdev->connect_time_stamp, skdev->drive_jiffies);
break;
case FIT_MTD_ARM_QUEUE:
@@ -3351,8 +2011,7 @@ static void skd_disable_interrupts(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_CONTROL);
sense &= ~FIT_CR_ENABLE_INTERRUPTS;
SKD_WRITEL(skdev, sense, FIT_CONTROL);
- pr_debug("%s:%s:%d sense 0x%x\n",
- skdev->name, __func__, __LINE__, sense);
+ dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
/* Note that the 1s is written. A 1-bit means
* disable, a 0 means enable.
@@ -3371,13 +2030,11 @@ static void skd_enable_interrupts(struct skd_device *skdev)
/* Note that the compliment of mask is written. A 1-bit means
* disable, a 0 means enable. */
SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d interrupt mask=0x%x\n",
- skdev->name, __func__, __LINE__, ~val);
+ dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
val = SKD_READL(skdev, FIT_CONTROL);
val |= FIT_CR_ENABLE_INTERRUPTS;
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
+ dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
SKD_WRITEL(skdev, val, FIT_CONTROL);
}
@@ -3393,8 +2050,7 @@ static void skd_soft_reset(struct skd_device *skdev)
val = SKD_READL(skdev, FIT_CONTROL);
val |= (FIT_CR_SOFT_RESET);
- pr_debug("%s:%s:%d control=0x%x\n",
- skdev->name, __func__, __LINE__, val);
+ dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
SKD_WRITEL(skdev, val, FIT_CONTROL);
}
@@ -3411,8 +2067,7 @@ static void skd_start_device(struct skd_device *skdev)
sense = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d initial status=0x%x\n",
- skdev->name, __func__, __LINE__, sense);
+ dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
state = sense & FIT_SR_DRIVE_STATE_MASK;
skdev->drive_state = state;
@@ -3425,25 +2080,23 @@ static void skd_start_device(struct skd_device *skdev)
switch (skdev->drive_state) {
case FIT_SR_DRIVE_OFFLINE:
- pr_err("(%s): Drive offline...\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive offline...\n");
break;
case FIT_SR_DRIVE_FW_BOOTING:
- pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
- skdev->name, __func__, __LINE__, skdev->name);
+ dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
break;
case FIT_SR_DRIVE_BUSY_SANITIZE:
- pr_info("(%s): Start: BUSY_SANITIZE\n",
- skd_name(skdev));
+ dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
case FIT_SR_DRIVE_BUSY_ERASE:
- pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
+ dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
@@ -3454,14 +2107,13 @@ static void skd_start_device(struct skd_device *skdev)
break;
case FIT_SR_DRIVE_BUSY:
- pr_err("(%s): Drive Busy...\n", skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "Drive Busy...\n");
skdev->state = SKD_DRVR_STATE_BUSY;
skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
break;
case FIT_SR_DRIVE_SOFT_RESET:
- pr_err("(%s) drive soft reset in prog\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
break;
case FIT_SR_DRIVE_FAULT:
@@ -3471,9 +2123,8 @@ static void skd_start_device(struct skd_device *skdev)
*/
skd_drive_fault(skdev);
/*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "starting queue\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
@@ -3483,38 +2134,33 @@ static void skd_start_device(struct skd_device *skdev)
* to the BAR1 addresses. */
skd_drive_disappeared(skdev);
/*start the queue so we can respond with error to requests */
- pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_start_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev,
+ "starting queue to error-out reqs\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = -1;
wake_up_interruptible(&skdev->waitq);
break;
default:
- pr_err("(%s) Start: unknown state %x\n",
- skd_name(skdev), skdev->drive_state);
+ dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
+ skdev->drive_state);
break;
}
state = SKD_READL(skdev, FIT_CONTROL);
- pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
- pr_debug("%s:%s:%d Intr Status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
state = SKD_READL(skdev, FIT_INT_MASK_HOST);
- pr_debug("%s:%s:%d Intr Mask=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
- pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
state = SKD_READL(skdev, FIT_HW_VERSION);
- pr_debug("%s:%s:%d HW version=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
spin_unlock_irqrestore(&skdev->lock, flags);
}
@@ -3529,14 +2175,12 @@ static void skd_stop_device(struct skd_device *skdev)
spin_lock_irqsave(&skdev->lock, flags);
if (skdev->state != SKD_DRVR_STATE_ONLINE) {
- pr_err("(%s): skd_stop_device not online no sync\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
goto stop_out;
}
if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
- pr_err("(%s): skd_stop_device no special\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
goto stop_out;
}
@@ -3554,16 +2198,13 @@ static void skd_stop_device(struct skd_device *skdev)
switch (skdev->sync_done) {
case 0:
- pr_err("(%s): skd_stop_device no sync\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
break;
case 1:
- pr_err("(%s): skd_stop_device sync done\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
break;
default:
- pr_err("(%s): skd_stop_device sync error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
}
stop_out:
@@ -3593,8 +2234,8 @@ stop_out:
}
if (dev_state != FIT_SR_DRIVE_INIT)
- pr_err("(%s): skd_stop_device state error 0x%02x\n",
- skd_name(skdev), dev_state);
+ dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
+ dev_state);
}
/* assume spinlock is held */
@@ -3607,8 +2248,7 @@ static void skd_restart_device(struct skd_device *skdev)
state = SKD_READL(skdev, FIT_STATUS);
- pr_debug("%s:%s:%d drive status=0x%x\n",
- skdev->name, __func__, __LINE__, state);
+ dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
state &= FIT_SR_DRIVE_STATE_MASK;
skdev->drive_state = state;
@@ -3628,9 +2268,8 @@ static int skd_quiesce_dev(struct skd_device *skdev)
switch (skdev->state) {
case SKD_DRVR_STATE_BUSY:
case SKD_DRVR_STATE_BUSY_IMMINENT:
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "stopping queue\n");
+ blk_mq_stop_hw_queues(skdev->queue);
break;
case SKD_DRVR_STATE_ONLINE:
case SKD_DRVR_STATE_STOPPING:
@@ -3642,8 +2281,8 @@ static int skd_quiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_RESUMING:
default:
rc = -EINVAL;
- pr_debug("%s:%s:%d state [%d] not implemented\n",
- skdev->name, __func__, __LINE__, skdev->state);
+ dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
+ skdev->state);
}
return rc;
}
@@ -3655,8 +2294,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
skd_log_skdev(skdev, "unquiesce");
if (skdev->state == SKD_DRVR_STATE_ONLINE) {
- pr_debug("%s:%s:%d **** device already ONLINE\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
return 0;
}
if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
@@ -3669,8 +2307,7 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
* to become available.
*/
skdev->state = SKD_DRVR_STATE_BUSY;
- pr_debug("%s:%s:%d drive BUSY state\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
return 0;
}
@@ -3689,26 +2326,24 @@ static int skd_unquiesce_dev(struct skd_device *skdev)
case SKD_DRVR_STATE_IDLE:
case SKD_DRVR_STATE_LOAD:
skdev->state = SKD_DRVR_STATE_ONLINE;
- pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
- skd_name(skdev),
- skd_skdev_state_to_str(prev_driver_state),
- prev_driver_state, skd_skdev_state_to_str(skdev->state),
- skdev->state);
- pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
- skdev->name, __func__, __LINE__);
- pr_debug("%s:%s:%d starting %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
- blk_start_queue(skdev->queue);
+ dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
+ skd_skdev_state_to_str(prev_driver_state),
+ prev_driver_state, skd_skdev_state_to_str(skdev->state),
+ skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "**** device ONLINE...starting block queue\n");
+ dev_dbg(&skdev->pdev->dev, "starting queue\n");
+ dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
+ schedule_work(&skdev->start_queue);
skdev->gendisk_on = 1;
wake_up_interruptible(&skdev->waitq);
break;
case SKD_DRVR_STATE_DISAPPEARED:
default:
- pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
- skdev->name, __func__, __LINE__,
- skdev->state);
+ dev_dbg(&skdev->pdev->dev,
+ "**** driver state %d, not implemented\n",
+ skdev->state);
return -EBUSY;
}
return 0;
@@ -3726,11 +2361,10 @@ static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
- pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
- irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
spin_unlock_irqrestore(&skdev->lock, flags);
return IRQ_HANDLED;
@@ -3742,9 +2376,8 @@ static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
skd_isr_fwstate(skdev);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3759,19 +2392,18 @@ static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
int deferred;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
&flush_enqueued);
if (flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
if (deferred)
schedule_work(&skdev->completion_worker);
else if (!flush_enqueued)
- skd_request_fn(skdev->queue);
+ schedule_work(&skdev->start_queue);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3784,9 +2416,8 @@ static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
skd_isr_msg_from_dev(skdev);
spin_unlock_irqrestore(&skdev->lock, flags);
@@ -3799,9 +2430,8 @@ static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
unsigned long flags;
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d MSIX = 0x%x\n",
- skdev->name, __func__, __LINE__,
- SKD_READL(skdev, FIT_INT_STATUS_HOST));
+ dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
+ SKD_READL(skdev, FIT_INT_STATUS_HOST));
SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
spin_unlock_irqrestore(&skdev->lock, flags);
return IRQ_HANDLED;
@@ -3850,8 +2480,7 @@ static int skd_acquire_msix(struct skd_device *skdev)
rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
PCI_IRQ_MSIX);
if (rc < 0) {
- pr_err("(%s): failed to enable MSI-X %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
goto out;
}
@@ -3859,8 +2488,7 @@ static int skd_acquire_msix(struct skd_device *skdev)
sizeof(struct skd_msix_entry), GFP_KERNEL);
if (!skdev->msix_entries) {
rc = -ENOMEM;
- pr_err("(%s): msix table allocation error\n",
- skd_name(skdev));
+ dev_err(&skdev->pdev->dev, "msix table allocation error\n");
goto out;
}
@@ -3877,16 +2505,15 @@ static int skd_acquire_msix(struct skd_device *skdev)
msix_entries[i].handler, 0,
qentry->isr_name, skdev);
if (rc) {
- pr_err("(%s): Unable to register(%d) MSI-X "
- "handler %d: %s\n",
- skd_name(skdev), rc, i, qentry->isr_name);
+ dev_err(&skdev->pdev->dev,
+ "Unable to register(%d) MSI-X handler %d: %s\n",
+ rc, i, qentry->isr_name);
goto msix_out;
}
}
- pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
- skdev->name, __func__, __LINE__,
- pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
+ dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
+ SKD_MAX_MSIX_COUNT);
return 0;
msix_out:
@@ -3909,8 +2536,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
if (!rc)
return 0;
- pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev,
+ "failed to enable MSI-X, re-trying with MSI %d\n", rc);
}
snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
@@ -3920,8 +2547,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
irq_flag |= PCI_IRQ_MSI;
rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
if (rc < 0) {
- pr_err("(%s): failed to allocate the MSI interrupt %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev,
+ "failed to allocate the MSI interrupt %d\n", rc);
return rc;
}
@@ -3930,8 +2557,8 @@ static int skd_acquire_irq(struct skd_device *skdev)
skdev->isr_name, skdev);
if (rc) {
pci_free_irq_vectors(pdev);
- pr_err("(%s): failed to allocate interrupt %d\n",
- skd_name(skdev), rc);
+ dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
+ rc);
return rc;
}
@@ -3965,20 +2592,45 @@ static void skd_release_irq(struct skd_device *skdev)
*****************************************************************************
*/
+static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
+ dma_addr_t *dma_handle, gfp_t gfp,
+ enum dma_data_direction dir)
+{
+ struct device *dev = &skdev->pdev->dev;
+ void *buf;
+
+ buf = kmem_cache_alloc(s, gfp);
+ if (!buf)
+ return NULL;
+ *dma_handle = dma_map_single(dev, buf, s->size, dir);
+ if (dma_mapping_error(dev, *dma_handle)) {
+ kfree(buf);
+ buf = NULL;
+ }
+ return buf;
+}
+
+static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
+ void *vaddr, dma_addr_t dma_handle,
+ enum dma_data_direction dir)
+{
+ if (!vaddr)
+ return;
+
+ dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir);
+ kmem_cache_free(s, vaddr);
+}
+
static int skd_cons_skcomp(struct skd_device *skdev)
{
int rc = 0;
struct fit_completion_entry_v1 *skcomp;
- u32 nbytes;
- nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
- nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
+ dev_dbg(&skdev->pdev->dev,
+ "comp pci_alloc, total bytes %zd entries %d\n",
+ SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
- pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
- skdev->name, __func__, __LINE__,
- nbytes, SKD_N_COMPLETION_ENTRY);
-
- skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
+ skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
&skdev->cq_dma_address);
if (skcomp == NULL) {
@@ -4000,14 +2652,14 @@ static int skd_cons_skmsg(struct skd_device *skdev)
int rc = 0;
u32 i;
- pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_fitmsg_context),
- skdev->num_fitmsg_context,
- sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
+ dev_dbg(&skdev->pdev->dev,
+ "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
+ sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
- skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
- *skdev->num_fitmsg_context, GFP_KERNEL);
+ skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
+ sizeof(struct skd_fitmsg_context),
+ GFP_KERNEL);
if (skdev->skmsg_table == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -4020,9 +2672,8 @@ static int skd_cons_skmsg(struct skd_device *skdev)
skmsg->id = i + SKD_ID_FIT_MSG;
- skmsg->state = SKD_MSG_STATE_IDLE;
skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
- SKD_N_FITMSG_BYTES + 64,
+ SKD_N_FITMSG_BYTES,
&skmsg->mb_dma_address);
if (skmsg->msg_buf == NULL) {
@@ -4030,22 +2681,13 @@ static int skd_cons_skmsg(struct skd_device *skdev)
goto err_out;
}
- skmsg->offset = (u32)((u64)skmsg->msg_buf &
- (~FIT_QCMD_BASE_ADDRESS_MASK));
- skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
- FIT_QCMD_BASE_ADDRESS_MASK);
- skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
- skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
+ WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
+ (FIT_QCMD_ALIGN - 1),
+ "not aligned: msg_buf %p mb_dma_address %#llx\n",
+ skmsg->msg_buf, skmsg->mb_dma_address);
memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
-
- skmsg->next = &skmsg[1];
}
- /* Free list is in order starting with the 0th entry. */
- skdev->skmsg_table[i - 1].next = NULL;
- skdev->skmsg_free_list = skdev->skmsg_table;
-
err_out:
return rc;
}
@@ -4055,18 +2697,14 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
dma_addr_t *ret_dma_addr)
{
struct fit_sg_descriptor *sg_list;
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
- sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
+ sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
+ GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
if (sg_list != NULL) {
uint64_t dma_address = *ret_dma_addr;
u32 i;
- memset(sg_list, 0, nbytes);
-
for (i = 0; i < n_sg - 1; i++) {
uint64_t ndp_off;
ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
@@ -4079,153 +2717,63 @@ static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
return sg_list;
}
-static int skd_cons_skreq(struct skd_device *skdev)
+static void skd_free_sg_list(struct skd_device *skdev,
+ struct fit_sg_descriptor *sg_list,
+ dma_addr_t dma_addr)
{
- int rc = 0;
- u32 i;
-
- pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_request_context),
- skdev->num_req_context,
- sizeof(struct skd_request_context) * skdev->num_req_context);
-
- skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
- * skdev->num_req_context, GFP_KERNEL);
- if (skdev->skreq_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
- skdev->name, __func__, __LINE__,
- skdev->sgs_per_request, sizeof(struct scatterlist),
- skdev->sgs_per_request * sizeof(struct scatterlist));
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
-
- skreq = &skdev->skreq_table[i];
-
- skreq->id = i + SKD_ID_RW_REQUEST;
- skreq->state = SKD_REQ_STATE_IDLE;
-
- skreq->sg = kzalloc(sizeof(struct scatterlist) *
- skdev->sgs_per_request, GFP_KERNEL);
- if (skreq->sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- sg_init_table(skreq->sg, skdev->sgs_per_request);
-
- skreq->sksg_list = skd_cons_sg_list(skdev,
- skdev->sgs_per_request,
- &skreq->sksg_dma_address);
-
- if (skreq->sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skreq->next = &skreq[1];
- }
-
- /* Free list is in order starting with the 0th entry. */
- skdev->skreq_table[i - 1].next = NULL;
- skdev->skreq_free_list = skdev->skreq_table;
+ if (WARN_ON_ONCE(!sg_list))
+ return;
-err_out:
- return rc;
+ skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
+ DMA_TO_DEVICE);
}
-static int skd_cons_skspcl(struct skd_device *skdev)
+static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx, unsigned int numa_node)
{
- int rc = 0;
- u32 i, nbytes;
-
- pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
- skdev->name, __func__, __LINE__,
- sizeof(struct skd_special_context),
- skdev->n_special,
- sizeof(struct skd_special_context) * skdev->n_special);
-
- skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
- * skdev->n_special, GFP_KERNEL);
- if (skdev->skspcl_table == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
+ struct skd_device *skdev = set->driver_data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->skspcl_table[i];
-
- skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
- skspcl->req.state = SKD_REQ_STATE_IDLE;
-
- skspcl->req.next = &skspcl[1].req;
-
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
-
- skspcl->msg_buf =
- pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
- if (skspcl->msg_buf == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
- SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
- if (skspcl->req.sg == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
-
- skspcl->req.sksg_list = skd_cons_sg_list(skdev,
- SKD_N_SG_PER_SPECIAL,
- &skspcl->req.
- sksg_dma_address);
- if (skspcl->req.sksg_list == NULL) {
- rc = -ENOMEM;
- goto err_out;
- }
- }
+ skreq->state = SKD_REQ_STATE_IDLE;
+ skreq->sg = (void *)(skreq + 1);
+ sg_init_table(skreq->sg, skd_sgs_per_request);
+ skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
+ &skreq->sksg_dma_address);
- /* Free list is in order starting with the 0th entry. */
- skdev->skspcl_table[i - 1].req.next = NULL;
- skdev->skspcl_free_list = skdev->skspcl_table;
+ return skreq->sksg_list ? 0 : -ENOMEM;
+}
- return rc;
+static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
+ unsigned int hctx_idx)
+{
+ struct skd_device *skdev = set->driver_data;
+ struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
-err_out:
- return rc;
+ skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
}
static int skd_cons_sksb(struct skd_device *skdev)
{
int rc = 0;
struct skd_special_context *skspcl;
- u32 nbytes;
skspcl = &skdev->internal_skspcl;
skspcl->req.id = 0 + SKD_ID_INTERNAL;
skspcl->req.state = SKD_REQ_STATE_IDLE;
- nbytes = SKD_N_INTERNAL_BYTES;
-
- skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->db_dma_address);
+ skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
+ &skspcl->db_dma_address,
+ GFP_DMA | __GFP_ZERO,
+ DMA_BIDIRECTIONAL);
if (skspcl->data_buf == NULL) {
rc = -ENOMEM;
goto err_out;
}
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
- &skspcl->mb_dma_address);
+ skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
+ &skspcl->mb_dma_address,
+ GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
if (skspcl->msg_buf == NULL) {
rc = -ENOMEM;
goto err_out;
@@ -4247,6 +2795,14 @@ err_out:
return rc;
}
+static const struct blk_mq_ops skd_mq_ops = {
+ .queue_rq = skd_mq_queue_rq,
+ .complete = skd_complete_rq,
+ .timeout = skd_timed_out,
+ .init_request = skd_init_request,
+ .exit_request = skd_exit_request,
+};
+
static int skd_cons_disk(struct skd_device *skdev)
{
int rc = 0;
@@ -4268,31 +2824,46 @@ static int skd_cons_disk(struct skd_device *skdev)
disk->fops = &skd_blockdev_ops;
disk->private_data = skdev;
- q = blk_init_queue(skd_request_fn, &skdev->lock);
- if (!q) {
- rc = -ENOMEM;
+ memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
+ skdev->tag_set.ops = &skd_mq_ops;
+ skdev->tag_set.nr_hw_queues = 1;
+ skdev->tag_set.queue_depth = skd_max_queue_depth;
+ skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
+ skdev->sgs_per_request * sizeof(struct scatterlist);
+ skdev->tag_set.numa_node = NUMA_NO_NODE;
+ skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
+ BLK_MQ_F_SG_MERGE |
+ BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
+ skdev->tag_set.driver_data = skdev;
+ rc = blk_mq_alloc_tag_set(&skdev->tag_set);
+ if (rc)
+ goto err_out;
+ q = blk_mq_init_queue(&skdev->tag_set);
+ if (IS_ERR(q)) {
+ blk_mq_free_tag_set(&skdev->tag_set);
+ rc = PTR_ERR(q);
goto err_out;
}
- blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+ q->queuedata = skdev;
skdev->queue = q;
disk->queue = q;
- q->queuedata = skdev;
blk_queue_write_cache(q, true, true);
blk_queue_max_segments(q, skdev->sgs_per_request);
blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
- /* set sysfs ptimal_io_size to 8K */
+ /* set optimal I/O size to 8KB */
blk_queue_io_opt(q, 8192);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+ blk_queue_rq_timeout(q, 8 * HZ);
+
spin_lock_irqsave(&skdev->lock, flags);
- pr_debug("%s:%s:%d stopping %s queue\n",
- skdev->name, __func__, __LINE__, skdev->name);
- blk_stop_queue(skdev->queue);
+ dev_dbg(&skdev->pdev->dev, "stopping queue\n");
+ blk_mq_stop_hw_queues(skdev->queue);
spin_unlock_irqrestore(&skdev->lock, flags);
err_out:
@@ -4306,13 +2877,13 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
{
struct skd_device *skdev;
int blk_major = skd_major;
+ size_t size;
int rc;
skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
if (!skdev) {
- pr_err(PFX "(%s): memory alloc failure\n",
- pci_name(pdev));
+ dev_err(&pdev->dev, "memory alloc failure\n");
return NULL;
}
@@ -4320,60 +2891,71 @@ static struct skd_device *skd_construct(struct pci_dev *pdev)
skdev->pdev = pdev;
skdev->devno = skd_next_devno++;
skdev->major = blk_major;
- sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
skdev->dev_max_queue_depth = 0;
skdev->num_req_context = skd_max_queue_depth;
skdev->num_fitmsg_context = skd_max_queue_depth;
- skdev->n_special = skd_max_pass_thru;
skdev->cur_max_queue_depth = 1;
skdev->queue_low_water_mark = 1;
skdev->proto_ver = 99;
skdev->sgs_per_request = skd_sgs_per_request;
skdev->dbg_level = skd_dbg_level;
- atomic_set(&skdev->device_count, 0);
-
spin_lock_init(&skdev->lock);
+ INIT_WORK(&skdev->start_queue, skd_start_queue);
INIT_WORK(&skdev->completion_worker, skd_completion_worker);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skcomp(skdev);
- if (rc < 0)
+ size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
+ skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->msgbuf_cache)
goto err_out;
-
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skmsg(skdev);
- if (rc < 0)
+ WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
+ "skd-msgbuf: %d < %zd\n",
+ kmem_cache_size(skdev->msgbuf_cache), size);
+ size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
+ skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->sglist_cache)
+ goto err_out;
+ WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
+ "skd-sglist: %d < %zd\n",
+ kmem_cache_size(skdev->sglist_cache), size);
+ size = SKD_N_INTERNAL_BYTES;
+ skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!skdev->databuf_cache)
goto err_out;
+ WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
+ "skd-databuf: %d < %zd\n",
+ kmem_cache_size(skdev->databuf_cache), size);
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skreq(skdev);
+ dev_dbg(&skdev->pdev->dev, "skcomp\n");
+ rc = skd_cons_skcomp(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- rc = skd_cons_skspcl(skdev);
+ dev_dbg(&skdev->pdev->dev, "skmsg\n");
+ rc = skd_cons_skmsg(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "sksb\n");
rc = skd_cons_sksb(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "disk\n");
rc = skd_cons_disk(skdev);
if (rc < 0)
goto err_out;
- pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "VICTORY\n");
return skdev;
err_out:
- pr_debug("%s:%s:%d construct failed\n",
- skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "construct failed\n");
skd_destruct(skdev);
return NULL;
}
@@ -4386,14 +2968,9 @@ err_out:
static void skd_free_skcomp(struct skd_device *skdev)
{
- if (skdev->skcomp_table != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(skdev->skcomp_table[0]) *
- SKD_N_COMPLETION_ENTRY;
- pci_free_consistent(skdev->pdev, nbytes,
+ if (skdev->skcomp_table)
+ pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
skdev->skcomp_table, skdev->cq_dma_address);
- }
skdev->skcomp_table = NULL;
skdev->cq_dma_address = 0;
@@ -4412,8 +2989,6 @@ static void skd_free_skmsg(struct skd_device *skdev)
skmsg = &skdev->skmsg_table[i];
if (skmsg->msg_buf != NULL) {
- skmsg->msg_buf += skmsg->offset;
- skmsg->mb_dma_address += skmsg->offset;
pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
skmsg->msg_buf,
skmsg->mb_dma_address);
@@ -4426,109 +3001,23 @@ static void skd_free_skmsg(struct skd_device *skdev)
skdev->skmsg_table = NULL;
}
-static void skd_free_sg_list(struct skd_device *skdev,
- struct fit_sg_descriptor *sg_list,
- u32 n_sg, dma_addr_t dma_addr)
-{
- if (sg_list != NULL) {
- u32 nbytes;
-
- nbytes = sizeof(*sg_list) * n_sg;
-
- pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
- }
-}
-
-static void skd_free_skreq(struct skd_device *skdev)
-{
- u32 i;
-
- if (skdev->skreq_table == NULL)
- return;
-
- for (i = 0; i < skdev->num_req_context; i++) {
- struct skd_request_context *skreq;
-
- skreq = &skdev->skreq_table[i];
-
- skd_free_sg_list(skdev, skreq->sksg_list,
- skdev->sgs_per_request,
- skreq->sksg_dma_address);
-
- skreq->sksg_list = NULL;
- skreq->sksg_dma_address = 0;
-
- kfree(skreq->sg);
- }
-
- kfree(skdev->skreq_table);
- skdev->skreq_table = NULL;
-}
-
-static void skd_free_skspcl(struct skd_device *skdev)
-{
- u32 i;
- u32 nbytes;
-
- if (skdev->skspcl_table == NULL)
- return;
-
- for (i = 0; i < skdev->n_special; i++) {
- struct skd_special_context *skspcl;
-
- skspcl = &skdev->skspcl_table[i];
-
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf,
- skspcl->mb_dma_address);
- }
-
- skspcl->msg_buf = NULL;
- skspcl->mb_dma_address = 0;
-
- skd_free_sg_list(skdev, skspcl->req.sksg_list,
- SKD_N_SG_PER_SPECIAL,
- skspcl->req.sksg_dma_address);
-
- skspcl->req.sksg_list = NULL;
- skspcl->req.sksg_dma_address = 0;
-
- kfree(skspcl->req.sg);
- }
-
- kfree(skdev->skspcl_table);
- skdev->skspcl_table = NULL;
-}
-
static void skd_free_sksb(struct skd_device *skdev)
{
- struct skd_special_context *skspcl;
- u32 nbytes;
-
- skspcl = &skdev->internal_skspcl;
-
- if (skspcl->data_buf != NULL) {
- nbytes = SKD_N_INTERNAL_BYTES;
+ struct skd_special_context *skspcl = &skdev->internal_skspcl;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->data_buf, skspcl->db_dma_address);
- }
+ skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
+ skspcl->db_dma_address, DMA_BIDIRECTIONAL);
skspcl->data_buf = NULL;
skspcl->db_dma_address = 0;
- if (skspcl->msg_buf != NULL) {
- nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
- pci_free_consistent(skdev->pdev, nbytes,
- skspcl->msg_buf, skspcl->mb_dma_address);
- }
+ skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
+ skspcl->mb_dma_address, DMA_TO_DEVICE);
skspcl->msg_buf = NULL;
skspcl->mb_dma_address = 0;
- skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
+ skd_free_sg_list(skdev, skspcl->req.sksg_list,
skspcl->req.sksg_dma_address);
skspcl->req.sksg_list = NULL;
@@ -4539,15 +3028,20 @@ static void skd_free_disk(struct skd_device *skdev)
{
struct gendisk *disk = skdev->disk;
- if (disk != NULL) {
- struct request_queue *q = disk->queue;
+ if (disk && (disk->flags & GENHD_FL_UP))
+ del_gendisk(disk);
- if (disk->flags & GENHD_FL_UP)
- del_gendisk(disk);
- if (q)
- blk_cleanup_queue(q);
- put_disk(disk);
+ if (skdev->queue) {
+ blk_cleanup_queue(skdev->queue);
+ skdev->queue = NULL;
+ if (disk)
+ disk->queue = NULL;
}
+
+ if (skdev->tag_set.tags)
+ blk_mq_free_tag_set(&skdev->tag_set);
+
+ put_disk(disk);
skdev->disk = NULL;
}
@@ -4556,26 +3050,25 @@ static void skd_destruct(struct skd_device *skdev)
if (skdev == NULL)
return;
+ cancel_work_sync(&skdev->start_queue);
- pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "disk\n");
skd_free_disk(skdev);
- pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "sksb\n");
skd_free_sksb(skdev);
- pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
- skd_free_skspcl(skdev);
-
- pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
- skd_free_skreq(skdev);
-
- pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "skmsg\n");
skd_free_skmsg(skdev);
- pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "skcomp\n");
skd_free_skcomp(skdev);
- pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
+ kmem_cache_destroy(skdev->databuf_cache);
+ kmem_cache_destroy(skdev->sglist_cache);
+ kmem_cache_destroy(skdev->msgbuf_cache);
+
+ dev_dbg(&skdev->pdev->dev, "skdev\n");
kfree(skdev);
}
@@ -4592,9 +3085,8 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
skdev = bdev->bd_disk->private_data;
- pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
- skdev->name, __func__, __LINE__,
- bdev->bd_disk->disk_name, current->comm);
+ dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
+ bdev->bd_disk->disk_name, current->comm);
if (skdev->read_cap_is_valid) {
capacity = get_capacity(skdev->disk);
@@ -4609,18 +3101,16 @@ static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
{
- pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
+ dev_dbg(&skdev->pdev->dev, "add_disk\n");
device_add_disk(parent, skdev->disk);
return 0;
}
static const struct block_device_operations skd_blockdev_ops = {
.owner = THIS_MODULE,
- .ioctl = skd_bdev_ioctl,
.getgeo = skd_bdev_getgeo,
};
-
/*
*****************************************************************************
* PCIe DRIVER GLUE
@@ -4671,10 +3161,8 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
char pci_str[32];
struct skd_device *skdev;
- pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
- DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
- pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
- pci_name(pdev), pdev->vendor, pdev->device);
+ dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
+ pdev->device);
rc = pci_enable_device(pdev);
if (rc)
@@ -4685,16 +3173,13 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
-
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "consistent DMA mask error %d\n",
+ rc);
}
} else {
- (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
-
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
}
@@ -4714,19 +3199,17 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
skd_pci_info(skdev, pci_str);
- pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
+ dev_info(&pdev->dev, "%s 64bit\n", pci_str);
pci_set_master(pdev);
rc = pci_enable_pcie_error_reporting(pdev);
if (rc) {
- pr_err(
- "(%s): bad enable of PCIe error reporting rc=%d\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev,
+ "bad enable of PCIe error reporting rc=%d\n", rc);
skdev->pcie_error_reporting_is_enabled = 0;
} else
skdev->pcie_error_reporting_is_enabled = 1;
-
pci_set_drvdata(pdev, skdev);
for (i = 0; i < SKD_MAX_BARS; i++) {
@@ -4735,21 +3218,19 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
skdev->mem_size[i]);
if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
+ dev_err(&pdev->dev,
+ "Unable to map adapter memory!\n");
rc = -ENODEV;
goto err_out_iounmap;
}
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
+ skdev->mem_size[i]);
}
rc = skd_acquire_irq(skdev);
if (rc) {
- pr_err("(%s): interrupt resource error %d\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
goto err_out_iounmap;
}
@@ -4771,29 +3252,14 @@ static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
/* we timed out, something is wrong with the device,
don't add the disk structure */
- pr_err(
- "(%s): error: waiting for s1120 timed out %d!\n",
- skd_name(skdev), rc);
+ dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
+ rc);
/* in case of no error; we timeout with ENXIO */
if (!rc)
rc = -ENXIO;
goto err_out_timer;
}
-
-#ifdef SKD_VMK_POLL_HANDLER
- if (skdev->irq_type == SKD_IRQ_MSIX) {
- /* MSIX completion handler is being used for coredump */
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->msix_entries[5].vector,
- skd_comp_q, skdev);
- } else {
- vmklnx_scsi_register_poll_handler(skdev->scsi_host,
- skdev->pdev->irq, skd_isr,
- skdev);
- }
-#endif /* SKD_VMK_POLL_HANDLER */
-
return rc;
err_out_timer:
@@ -4826,7 +3292,7 @@ static void skd_pci_remove(struct pci_dev *pdev)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return;
}
skd_stop_device(skdev);
@@ -4834,7 +3300,7 @@ static void skd_pci_remove(struct pci_dev *pdev)
for (i = 0; i < SKD_MAX_BARS; i++)
if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
+ iounmap(skdev->mem_map[i]);
if (skdev->pcie_error_reporting_is_enabled)
pci_disable_pcie_error_reporting(pdev);
@@ -4855,7 +3321,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return -EIO;
}
@@ -4865,7 +3331,7 @@ static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
for (i = 0; i < SKD_MAX_BARS; i++)
if (skdev->mem_map[i])
- iounmap((u32 *)skdev->mem_map[i]);
+ iounmap(skdev->mem_map[i]);
if (skdev->pcie_error_reporting_is_enabled)
pci_disable_pcie_error_reporting(pdev);
@@ -4885,7 +3351,7 @@ static int skd_pci_resume(struct pci_dev *pdev)
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return -1;
}
@@ -4903,15 +3369,14 @@ static int skd_pci_resume(struct pci_dev *pdev)
if (!rc) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- pr_err("(%s): consistent DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "consistent DMA mask error %d\n",
+ rc);
}
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
- pr_err("(%s): DMA mask error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "DMA mask error %d\n", rc);
goto err_out_regions;
}
}
@@ -4919,8 +3384,8 @@ static int skd_pci_resume(struct pci_dev *pdev)
pci_set_master(pdev);
rc = pci_enable_pcie_error_reporting(pdev);
if (rc) {
- pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
- skdev->name, rc);
+ dev_err(&pdev->dev,
+ "bad enable of PCIe error reporting rc=%d\n", rc);
skdev->pcie_error_reporting_is_enabled = 0;
} else
skdev->pcie_error_reporting_is_enabled = 1;
@@ -4932,21 +3397,17 @@ static int skd_pci_resume(struct pci_dev *pdev)
skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
skdev->mem_size[i]);
if (!skdev->mem_map[i]) {
- pr_err("(%s): Unable to map adapter memory!\n",
- skd_name(skdev));
+ dev_err(&pdev->dev, "Unable to map adapter memory!\n");
rc = -ENODEV;
goto err_out_iounmap;
}
- pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->mem_map[i],
- (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
+ dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
+ skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
+ skdev->mem_size[i]);
}
rc = skd_acquire_irq(skdev);
if (rc) {
-
- pr_err("(%s): interrupt resource error %d\n",
- pci_name(pdev), rc);
+ dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
goto err_out_iounmap;
}
@@ -4984,15 +3445,15 @@ static void skd_pci_shutdown(struct pci_dev *pdev)
{
struct skd_device *skdev;
- pr_err("skd_pci_shutdown called\n");
+ dev_err(&pdev->dev, "%s called\n", __func__);
skdev = pci_get_drvdata(pdev);
if (!skdev) {
- pr_err("%s: no device data for PCI\n", pci_name(pdev));
+ dev_err(&pdev->dev, "no device data for PCI\n");
return;
}
- pr_err("%s: calling stop\n", skd_name(skdev));
+ dev_err(&pdev->dev, "calling stop\n");
skd_stop_device(skdev);
}
@@ -5012,21 +3473,6 @@ static struct pci_driver skd_driver = {
*****************************************************************************
*/
-static const char *skd_name(struct skd_device *skdev)
-{
- memset(skdev->id_str, 0, sizeof(skdev->id_str));
-
- if (skdev->inquiry_is_valid)
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
- skdev->name, skdev->inq_serial_num,
- pci_name(skdev->pdev));
- else
- snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
- skdev->name, pci_name(skdev->pdev));
-
- return skdev->id_str;
-}
-
const char *skd_drive_state_to_str(int state)
{
switch (state) {
@@ -5078,8 +3524,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
return "PAUSING";
case SKD_DRVR_STATE_PAUSED:
return "PAUSED";
- case SKD_DRVR_STATE_DRAINING_TIMEOUT:
- return "DRAINING_TIMEOUT";
case SKD_DRVR_STATE_RESTARTING:
return "RESTARTING";
case SKD_DRVR_STATE_RESUMING:
@@ -5106,18 +3550,6 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
-{
- switch (state) {
- case SKD_MSG_STATE_IDLE:
- return "IDLE";
- case SKD_MSG_STATE_BUSY:
- return "BUSY";
- default:
- return "???";
- }
-}
-
static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
@@ -5131,8 +3563,6 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state)
return "COMPLETED";
case SKD_REQ_STATE_TIMEOUT:
return "TIMEOUT";
- case SKD_REQ_STATE_ABORTED:
- return "ABORTED";
default:
return "???";
}
@@ -5140,58 +3570,34 @@ static const char *skd_skreq_state_to_str(enum skd_req_state state)
static void skd_log_skdev(struct skd_device *skdev, const char *event)
{
- pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skdev, event);
- pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
- skdev->name, __func__, __LINE__,
- skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
- skd_skdev_state_to_str(skdev->state), skdev->state);
- pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->in_flight, skdev->cur_max_queue_depth,
- skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
- pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
- skdev->name, __func__, __LINE__,
- skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
-}
-
-static void skd_log_skmsg(struct skd_device *skdev,
- struct skd_fitmsg_context *skmsg, const char *event)
-{
- pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
- skdev->name, __func__, __LINE__,
- skd_skmsg_state_to_str(skmsg->state), skmsg->state,
- skmsg->id, skmsg->length);
+ dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
+ dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
+ skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
+ skd_skdev_state_to_str(skdev->state), skdev->state);
+ dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
+ skd_in_flight(skdev), skdev->cur_max_queue_depth,
+ skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
+ dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
+ skdev->skcomp_cycle, skdev->skcomp_ix);
}
static void skd_log_skreq(struct skd_device *skdev,
struct skd_request_context *skreq, const char *event)
{
- pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
- skdev->name, __func__, __LINE__, skdev->name, skreq, event);
- pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
- skdev->name, __func__, __LINE__,
- skd_skreq_state_to_str(skreq->state), skreq->state,
- skreq->id, skreq->fitmsg_id);
- pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
- skdev->name, __func__, __LINE__,
- skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
-
- if (skreq->req != NULL) {
- struct request *req = skreq->req;
- u32 lba = (u32)blk_rq_pos(req);
- u32 count = blk_rq_sectors(req);
-
- pr_debug("%s:%s:%d "
- "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
- skdev->name, __func__, __LINE__,
- req, lba, lba, count, count,
- (int)rq_data_dir(req));
- } else
- pr_debug("%s:%s:%d req=NULL\n",
- skdev->name, __func__, __LINE__);
+ struct request *req = blk_mq_rq_from_pdu(skreq);
+ u32 lba = blk_rq_pos(req);
+ u32 count = blk_rq_sectors(req);
+
+ dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
+ dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
+ skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
+ skreq->fitmsg_id);
+ dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
+ skreq->data_dir, skreq->n_sg);
+
+ dev_dbg(&skdev->pdev->dev,
+ "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
+ count, count, (int)rq_data_dir(req));
}
/*
@@ -5202,7 +3608,14 @@ static void skd_log_skreq(struct skd_device *skdev,
static int __init skd_init(void)
{
- pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
+ BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
+ BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
+ BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
+ BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
+ BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
+ BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
+ BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
+ BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
switch (skd_isr_type) {
case SKD_IRQ_LEGACY:
@@ -5222,7 +3635,8 @@ static int __init skd_init(void)
skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
}
- if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
+ if (skd_max_req_per_msg < 1 ||
+ skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
@@ -5246,19 +3660,11 @@ static int __init skd_init(void)
skd_isr_comp_limit = 0;
}
- if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
- pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
- skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
- skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
- }
-
return pci_register_driver(&skd_driver);
}
static void __exit skd_exit(void)
{
- pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
-
pci_unregister_driver(&skd_driver);
if (skd_major)
diff --git a/drivers/block/skd_s1120.h b/drivers/block/skd_s1120.h
index 61c757ff0161..de35f47e953c 100644
--- a/drivers/block/skd_s1120.h
+++ b/drivers/block/skd_s1120.h
@@ -1,19 +1,15 @@
-/* Copyright 2012 STEC, Inc.
+/*
+ * Copyright 2012 STEC, Inc.
+ * Copyright (c) 2017 Western Digital Corporation or its affiliates.
*
- * This file is licensed under the terms of the 3-clause
- * BSD License (http://opensource.org/licenses/BSD-3-Clause)
- * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
- * at your option. Both licenses are also available in the LICENSE file
- * distributed with this project. This file may not be copied, modified,
- * or distributed except in accordance with those terms.
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
*/
#ifndef SKD_S1120_H
#define SKD_S1120_H
-#pragma pack(push, s1120_h, 1)
-
/*
* Q-channel, 64-bit r/w
*/
@@ -30,7 +26,7 @@
#define FIT_QCMD_MSGSIZE_128 (0x1 << 4)
#define FIT_QCMD_MSGSIZE_256 (0x2 << 4)
#define FIT_QCMD_MSGSIZE_512 (0x3 << 4)
-#define FIT_QCMD_BASE_ADDRESS_MASK (0xFFFFFFFFFFFFFFC0ull)
+#define FIT_QCMD_ALIGN L1_CACHE_BYTES
/*
* Control, 32-bit r/w
@@ -250,7 +246,7 @@ struct fit_msg_hdr {
* 20-23 of the FIT_MTD_FITFW_INIT response.
*/
struct fit_completion_entry_v1 {
- uint32_t num_returned_bytes;
+ __be32 num_returned_bytes;
uint16_t tag;
uint8_t status; /* SCSI status */
uint8_t cycle;
@@ -278,7 +274,7 @@ struct fit_comp_error_info {
uint16_t sks_low; /* 10: Sense Key Specific (LSW) */
uint16_t reserved3; /* 12: Part of additional sense bytes (unused) */
uint16_t uec; /* 14: Additional Sense Bytes */
- uint64_t per; /* 16: Additional Sense Bytes */
+ uint64_t per __packed; /* 16: Additional Sense Bytes */
uint8_t reserved4[2]; /* 1E: Additional Sense Bytes (unused) */
};
@@ -292,11 +288,11 @@ struct fit_comp_error_info {
* Version one has the last 32 bits sg_list_len_bytes;
*/
struct skd_command_header {
- uint64_t sg_list_dma_address;
+ __be64 sg_list_dma_address;
uint16_t tag;
uint8_t attribute;
uint8_t add_cdb_len; /* In 32 bit words */
- uint32_t sg_list_len_bytes;
+ __be32 sg_list_len_bytes;
};
struct skd_scsi_request {
@@ -309,22 +305,20 @@ struct driver_inquiry_data {
uint8_t peripheral_device_type:5;
uint8_t qualifier:3;
uint8_t page_code;
- uint16_t page_length;
- uint16_t pcie_bus_number;
+ __be16 page_length;
+ __be16 pcie_bus_number;
uint8_t pcie_device_number;
uint8_t pcie_function_number;
uint8_t pcie_link_speed;
uint8_t pcie_link_lanes;
- uint16_t pcie_vendor_id;
- uint16_t pcie_device_id;
- uint16_t pcie_subsystem_vendor_id;
- uint16_t pcie_subsystem_device_id;
+ __be16 pcie_vendor_id;
+ __be16 pcie_device_id;
+ __be16 pcie_subsystem_vendor_id;
+ __be16 pcie_subsystem_device_id;
uint8_t reserved1[2];
uint8_t reserved2[3];
uint8_t driver_version_length;
uint8_t driver_version[0x14];
};
-#pragma pack(pop, s1120_h)
-
#endif /* SKD_S1120_H */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 1498b899a593..34e17ee799be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -265,7 +265,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
+ if (blk_rq_is_scsi(req))
err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
else
err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
@@ -381,6 +381,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
struct request_queue *q = vblk->disk->queue;
char cap_str_2[10], cap_str_10[10];
char *envp[] = { "RESIZE=1", NULL };
+ unsigned long long nblocks;
u64 capacity;
/* Host must always specify the capacity. */
@@ -393,16 +394,19 @@ static void virtblk_config_changed_work(struct work_struct *work)
capacity = (sector_t)-1;
}
- string_get_size(capacity, queue_logical_block_size(q),
+ nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
+
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
- string_get_size(capacity, queue_logical_block_size(q),
+ string_get_size(nblocks, queue_logical_block_size(q),
STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
dev_notice(&vdev->dev,
- "new size: %llu %d-byte logical blocks (%s/%s)\n",
- (unsigned long long)capacity,
- queue_logical_block_size(q),
- cap_str_10, cap_str_2);
+ "new size: %llu %d-byte logical blocks (%s/%s)\n",
+ nblocks,
+ queue_logical_block_size(q),
+ cap_str_10,
+ cap_str_2);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index fe7cd58c43d0..987d665e82de 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -705,9 +705,9 @@ static unsigned int xen_blkbk_unmap_prepare(
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
invcount++;
- }
+ }
- return invcount;
+ return invcount;
}
static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
@@ -1251,6 +1251,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
break;
case BLKIF_OP_WRITE_BARRIER:
drain = true;
+ /* fall through */
case BLKIF_OP_FLUSH_DISKCACHE:
ring->st_f_req++;
operation = REQ_OP_WRITE;
@@ -1362,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number;
@@ -1381,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio;
biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
+ bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 792da683e70d..21c1be1eb226 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -244,6 +244,7 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
struct pending_req *req, *n;
unsigned int j, r;
+ bool busy = false;
for (r = 0; r < blkif->nr_rings; r++) {
struct xen_blkif_ring *ring = &blkif->rings[r];
@@ -261,8 +262,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
* don't have any discard_io or other_io requests. So, checking
* for inflight IO is enough.
*/
- if (atomic_read(&ring->inflight) > 0)
- return -EBUSY;
+ if (atomic_read(&ring->inflight) > 0) {
+ busy = true;
+ continue;
+ }
if (ring->irq) {
unbind_from_irqhandler(ring->irq, ring);
@@ -300,6 +303,9 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
ring->active = false;
}
+ if (busy)
+ return -EBUSY;
+
blkif->nr_ring_pages = 0;
/*
* blkif->rings was allocated in connect_ring, so we should free it in
@@ -810,7 +816,8 @@ static void frontend_changed(struct xenbus_device *dev,
xenbus_switch_state(dev, XenbusStateClosed);
if (xenbus_dev_is_online(dev))
break;
- /* fall through if not online */
+ /* fall through */
+ /* if not online */
case XenbusStateUnknown:
/* implies xen_blkif_disconnect() via xen_blkbk_remove() */
device_unregister(&dev->dev);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 98e34e4c62b8..891265acb10e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2075,9 +2075,9 @@ static int blkfront_resume(struct xenbus_device *dev)
/*
* Get the bios in the request so we can re-queue them.
*/
- if (req_op(shadow[i].request) == REQ_OP_FLUSH ||
- req_op(shadow[i].request) == REQ_OP_DISCARD ||
- req_op(shadow[i].request) == REQ_OP_SECURE_ERASE ||
+ if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
+ req_op(shadow[j].request) == REQ_OP_DISCARD ||
+ req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
shadow[j].request->cmd_flags & REQ_FUA) {
/*
* Flush operations don't contain bios, so
@@ -2456,7 +2456,7 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateClosed:
if (dev->state == XenbusStateClosed)
break;
- /* Missed the backend's Closing state -- fallthrough */
+ /* fall through */
case XenbusStateClosing:
if (info)
blkfront_closing(info);
diff --git a/drivers/block/zram/Kconfig b/drivers/block/zram/Kconfig
index b8ecba6dcd3b..7cd4a8ec3c8f 100644
--- a/drivers/block/zram/Kconfig
+++ b/drivers/block/zram/Kconfig
@@ -13,3 +13,15 @@ config ZRAM
disks and maybe many more.
See zram.txt for more information.
+
+config ZRAM_WRITEBACK
+ bool "Write back incompressible page to backing device"
+ depends on ZRAM
+ default n
+ help
+ With incompressible page, there is no memory saving to keep it
+ in memory. Instead, write it out to backing device.
+ For this feature, admin should set up backing device via
+ /sys/block/zramX/backing_dev.
+
+ See zram.txt for more infomration.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 3b1b6340ba13..2981c27d3aae 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -175,20 +175,11 @@ static inline void update_used_max(struct zram *zram,
} while (old_max != cur_max);
}
-static inline void zram_fill_page(char *ptr, unsigned long len,
+static inline void zram_fill_page(void *ptr, unsigned long len,
unsigned long value)
{
- int i;
- unsigned long *page = (unsigned long *)ptr;
-
WARN_ON_ONCE(!IS_ALIGNED(len, sizeof(unsigned long)));
-
- if (likely(value == 0)) {
- memset(ptr, 0, len);
- } else {
- for (i = 0; i < len / sizeof(*page); i++)
- page[i] = value;
- }
+ memset_l(ptr, value, len / sizeof(unsigned long));
}
static bool page_same_filled(void *ptr, unsigned long *element)
@@ -270,6 +261,349 @@ static ssize_t mem_used_max_store(struct device *dev,
return len;
}
+#ifdef CONFIG_ZRAM_WRITEBACK
+static bool zram_wb_enabled(struct zram *zram)
+{
+ return zram->backing_dev;
+}
+
+static void reset_bdev(struct zram *zram)
+{
+ struct block_device *bdev;
+
+ if (!zram_wb_enabled(zram))
+ return;
+
+ bdev = zram->bdev;
+ if (zram->old_block_size)
+ set_blocksize(bdev, zram->old_block_size);
+ blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
+ /* hope filp_close flush all of IO */
+ filp_close(zram->backing_dev, NULL);
+ zram->backing_dev = NULL;
+ zram->old_block_size = 0;
+ zram->bdev = NULL;
+
+ kvfree(zram->bitmap);
+ zram->bitmap = NULL;
+}
+
+static ssize_t backing_dev_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zram *zram = dev_to_zram(dev);
+ struct file *file = zram->backing_dev;
+ char *p;
+ ssize_t ret;
+
+ down_read(&zram->init_lock);
+ if (!zram_wb_enabled(zram)) {
+ memcpy(buf, "none\n", 5);
+ up_read(&zram->init_lock);
+ return 5;
+ }
+
+ p = file_path(file, buf, PAGE_SIZE - 1);
+ if (IS_ERR(p)) {
+ ret = PTR_ERR(p);
+ goto out;
+ }
+
+ ret = strlen(p);
+ memmove(buf, p, ret);
+ buf[ret++] = '\n';
+out:
+ up_read(&zram->init_lock);
+ return ret;
+}
+
+static ssize_t backing_dev_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ char *file_name;
+ struct file *backing_dev = NULL;
+ struct inode *inode;
+ struct address_space *mapping;
+ unsigned int bitmap_sz, old_block_size = 0;
+ unsigned long nr_pages, *bitmap = NULL;
+ struct block_device *bdev = NULL;
+ int err;
+ struct zram *zram = dev_to_zram(dev);
+
+ file_name = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!file_name)
+ return -ENOMEM;
+
+ down_write(&zram->init_lock);
+ if (init_done(zram)) {
+ pr_info("Can't setup backing device for initialized device\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ strlcpy(file_name, buf, len);
+
+ backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0);
+ if (IS_ERR(backing_dev)) {
+ err = PTR_ERR(backing_dev);
+ backing_dev = NULL;
+ goto out;
+ }
+
+ mapping = backing_dev->f_mapping;
+ inode = mapping->host;
+
+ /* Support only block device in this moment */
+ if (!S_ISBLK(inode->i_mode)) {
+ err = -ENOTBLK;
+ goto out;
+ }
+
+ bdev = bdgrab(I_BDEV(inode));
+ err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram);
+ if (err < 0)
+ goto out;
+
+ nr_pages = i_size_read(inode) >> PAGE_SHIFT;
+ bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
+ bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
+ if (!bitmap) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ old_block_size = block_size(bdev);
+ err = set_blocksize(bdev, PAGE_SIZE);
+ if (err)
+ goto out;
+
+ reset_bdev(zram);
+ spin_lock_init(&zram->bitmap_lock);
+
+ zram->old_block_size = old_block_size;
+ zram->bdev = bdev;
+ zram->backing_dev = backing_dev;
+ zram->bitmap = bitmap;
+ zram->nr_pages = nr_pages;
+ up_write(&zram->init_lock);
+
+ pr_info("setup backing device %s\n", file_name);
+ kfree(file_name);
+
+ return len;
+out:
+ if (bitmap)
+ kvfree(bitmap);
+
+ if (bdev)
+ blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
+
+ if (backing_dev)
+ filp_close(backing_dev, NULL);
+
+ up_write(&zram->init_lock);
+
+ kfree(file_name);
+
+ return err;
+}
+
+static unsigned long get_entry_bdev(struct zram *zram)
+{
+ unsigned long entry;
+
+ spin_lock(&zram->bitmap_lock);
+ /* skip 0 bit to confuse zram.handle = 0 */
+ entry = find_next_zero_bit(zram->bitmap, zram->nr_pages, 1);
+ if (entry == zram->nr_pages) {
+ spin_unlock(&zram->bitmap_lock);
+ return 0;
+ }
+
+ set_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+
+ return entry;
+}
+
+static void put_entry_bdev(struct zram *zram, unsigned long entry)
+{
+ int was_set;
+
+ spin_lock(&zram->bitmap_lock);
+ was_set = test_and_clear_bit(entry, zram->bitmap);
+ spin_unlock(&zram->bitmap_lock);
+ WARN_ON_ONCE(!was_set);
+}
+
+void zram_page_end_io(struct bio *bio)
+{
+ struct page *page = bio->bi_io_vec[0].bv_page;
+
+ page_endio(page, op_is_write(bio_op(bio)),
+ blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
+}
+
+/*
+ * Returns 1 if the submission is successful.
+ */
+static int read_from_bdev_async(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent)
+{
+ struct bio *bio;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio_set_dev(bio, zram->bdev);
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len, bvec->bv_offset)) {
+ bio_put(bio);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_opf = REQ_OP_READ;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_opf = parent->bi_opf;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(bio);
+ return 1;
+}
+
+struct zram_work {
+ struct work_struct work;
+ struct zram *zram;
+ unsigned long entry;
+ struct bio *bio;
+};
+
+#if PAGE_SIZE != 4096
+static void zram_sync_read(struct work_struct *work)
+{
+ struct bio_vec bvec;
+ struct zram_work *zw = container_of(work, struct zram_work, work);
+ struct zram *zram = zw->zram;
+ unsigned long entry = zw->entry;
+ struct bio *bio = zw->bio;
+
+ read_from_bdev_async(zram, &bvec, entry, bio);
+}
+
+/*
+ * Block layer want one ->make_request_fn to be active at a time
+ * so if we use chained IO with parent IO in same context,
+ * it's a deadlock. To avoid, it, it uses worker thread context.
+ */
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ struct zram_work work;
+
+ work.zram = zram;
+ work.entry = entry;
+ work.bio = bio;
+
+ INIT_WORK_ONSTACK(&work.work, zram_sync_read);
+ queue_work(system_unbound_wq, &work.work);
+ flush_work(&work.work);
+ destroy_work_on_stack(&work.work);
+
+ return 1;
+}
+#else
+static int read_from_bdev_sync(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *bio)
+{
+ WARN_ON(1);
+ return -EIO;
+}
+#endif
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ if (sync)
+ return read_from_bdev_sync(zram, bvec, entry, parent);
+ else
+ return read_from_bdev_async(zram, bvec, entry, parent);
+}
+
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+{
+ struct bio *bio;
+ unsigned long entry;
+
+ bio = bio_alloc(GFP_ATOMIC, 1);
+ if (!bio)
+ return -ENOMEM;
+
+ entry = get_entry_bdev(zram);
+ if (!entry) {
+ bio_put(bio);
+ return -ENOSPC;
+ }
+
+ bio->bi_iter.bi_sector = entry * (PAGE_SIZE >> 9);
+ bio_set_dev(bio, zram->bdev);
+ if (!bio_add_page(bio, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset)) {
+ bio_put(bio);
+ put_entry_bdev(zram, entry);
+ return -EIO;
+ }
+
+ if (!parent) {
+ bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
+ bio->bi_end_io = zram_page_end_io;
+ } else {
+ bio->bi_opf = parent->bi_opf;
+ bio_chain(bio, parent);
+ }
+
+ submit_bio(bio);
+ *pentry = entry;
+
+ return 0;
+}
+
+static void zram_wb_clear(struct zram *zram, u32 index)
+{
+ unsigned long entry;
+
+ zram_clear_flag(zram, index, ZRAM_WB);
+ entry = zram_get_element(zram, index);
+ zram_set_element(zram, index, 0);
+ put_entry_bdev(zram, entry);
+}
+
+#else
+static bool zram_wb_enabled(struct zram *zram) { return false; }
+static inline void reset_bdev(struct zram *zram) {};
+static int write_to_bdev(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *parent,
+ unsigned long *pentry)
+
+{
+ return -EIO;
+}
+
+static int read_from_bdev(struct zram *zram, struct bio_vec *bvec,
+ unsigned long entry, struct bio *parent, bool sync)
+{
+ return -EIO;
+}
+static void zram_wb_clear(struct zram *zram, u32 index) {}
+#endif
+
+
/*
* We switched to per-cpu streams and this attr is not needed anymore.
* However, we will keep it around for some time, because:
@@ -453,30 +787,6 @@ static bool zram_same_page_read(struct zram *zram, u32 index,
return false;
}
-static bool zram_same_page_write(struct zram *zram, u32 index,
- struct page *page)
-{
- unsigned long element;
- void *mem = kmap_atomic(page);
-
- if (page_same_filled(mem, &element)) {
- kunmap_atomic(mem);
- /* Free memory associated with this sector now. */
- zram_slot_lock(zram, index);
- zram_free_page(zram, index);
- zram_set_flag(zram, index, ZRAM_SAME);
- zram_set_element(zram, index, element);
- zram_slot_unlock(zram, index);
-
- atomic64_inc(&zram->stats.same_pages);
- atomic64_inc(&zram->stats.pages_stored);
- return true;
- }
- kunmap_atomic(mem);
-
- return false;
-}
-
static void zram_meta_free(struct zram *zram, u64 disksize)
{
size_t num_pages = disksize >> PAGE_SHIFT;
@@ -515,7 +825,13 @@ static bool zram_meta_alloc(struct zram *zram, u64 disksize)
*/
static void zram_free_page(struct zram *zram, size_t index)
{
- unsigned long handle = zram_get_handle(zram, index);
+ unsigned long handle;
+
+ if (zram_wb_enabled(zram) && zram_test_flag(zram, index, ZRAM_WB)) {
+ zram_wb_clear(zram, index);
+ atomic64_dec(&zram->stats.pages_stored);
+ return;
+ }
/*
* No memory is allocated for same element filled pages.
@@ -529,6 +845,7 @@ static void zram_free_page(struct zram *zram, size_t index)
return;
}
+ handle = zram_get_handle(zram, index);
if (!handle)
return;
@@ -542,13 +859,31 @@ static void zram_free_page(struct zram *zram, size_t index)
zram_set_obj_size(zram, index, 0);
}
-static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
+static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
+ struct bio *bio, bool partial_io)
{
int ret;
unsigned long handle;
unsigned int size;
void *src, *dst;
+ if (zram_wb_enabled(zram)) {
+ zram_slot_lock(zram, index);
+ if (zram_test_flag(zram, index, ZRAM_WB)) {
+ struct bio_vec bvec;
+
+ zram_slot_unlock(zram, index);
+
+ bvec.bv_page = page;
+ bvec.bv_len = PAGE_SIZE;
+ bvec.bv_offset = 0;
+ return read_from_bdev(zram, &bvec,
+ zram_get_element(zram, index),
+ bio, partial_io);
+ }
+ zram_slot_unlock(zram, index);
+ }
+
if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
return 0;
@@ -581,7 +916,7 @@ static int zram_decompress_page(struct zram *zram, struct page *page, u32 index)
}
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page;
@@ -594,7 +929,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
return -ENOMEM;
}
- ret = zram_decompress_page(zram, page, index);
+ ret = __zram_bvec_read(zram, page, index, bio, is_partial_io(bvec));
if (unlikely(ret))
goto out;
@@ -613,30 +948,57 @@ out:
return ret;
}
-static int zram_compress(struct zram *zram, struct zcomp_strm **zstrm,
- struct page *page,
- unsigned long *out_handle, unsigned int *out_comp_len)
+static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
+ u32 index, struct bio *bio)
{
- int ret;
- unsigned int comp_len;
- void *src;
+ int ret = 0;
unsigned long alloced_pages;
unsigned long handle = 0;
+ unsigned int comp_len = 0;
+ void *src, *dst, *mem;
+ struct zcomp_strm *zstrm;
+ struct page *page = bvec->bv_page;
+ unsigned long element = 0;
+ enum zram_pageflags flags = 0;
+ bool allow_wb = true;
+
+ mem = kmap_atomic(page);
+ if (page_same_filled(mem, &element)) {
+ kunmap_atomic(mem);
+ /* Free memory associated with this sector now. */
+ flags = ZRAM_SAME;
+ atomic64_inc(&zram->stats.same_pages);
+ goto out;
+ }
+ kunmap_atomic(mem);
compress_again:
+ zstrm = zcomp_stream_get(zram->comp);
src = kmap_atomic(page);
- ret = zcomp_compress(*zstrm, src, &comp_len);
+ ret = zcomp_compress(zstrm, src, &comp_len);
kunmap_atomic(src);
if (unlikely(ret)) {
+ zcomp_stream_put(zram->comp);
pr_err("Compression failed! err=%d\n", ret);
- if (handle)
- zs_free(zram->mem_pool, handle);
+ zs_free(zram->mem_pool, handle);
return ret;
}
- if (unlikely(comp_len > max_zpage_size))
+ if (unlikely(comp_len > max_zpage_size)) {
+ if (zram_wb_enabled(zram) && allow_wb) {
+ zcomp_stream_put(zram->comp);
+ ret = write_to_bdev(zram, bvec, index, bio, &element);
+ if (!ret) {
+ flags = ZRAM_WB;
+ ret = 1;
+ goto out;
+ }
+ allow_wb = false;
+ goto compress_again;
+ }
comp_len = PAGE_SIZE;
+ }
/*
* handle allocation has 2 paths:
@@ -663,7 +1025,6 @@ compress_again:
handle = zs_malloc(zram->mem_pool, comp_len,
GFP_NOIO | __GFP_HIGHMEM |
__GFP_MOVABLE);
- *zstrm = zcomp_stream_get(zram->comp);
if (handle)
goto compress_again;
return -ENOMEM;
@@ -673,34 +1034,11 @@ compress_again:
update_used_max(zram, alloced_pages);
if (zram->limit_pages && alloced_pages > zram->limit_pages) {
+ zcomp_stream_put(zram->comp);
zs_free(zram->mem_pool, handle);
return -ENOMEM;
}
- *out_handle = handle;
- *out_comp_len = comp_len;
- return 0;
-}
-
-static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
-{
- int ret;
- unsigned long handle;
- unsigned int comp_len;
- void *src, *dst;
- struct zcomp_strm *zstrm;
- struct page *page = bvec->bv_page;
-
- if (zram_same_page_write(zram, index, page))
- return 0;
-
- zstrm = zcomp_stream_get(zram->comp);
- ret = zram_compress(zram, &zstrm, page, &handle, &comp_len);
- if (ret) {
- zcomp_stream_put(zram->comp);
- return ret;
- }
-
dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
src = zstrm->buffer;
@@ -712,25 +1050,31 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index)
zcomp_stream_put(zram->comp);
zs_unmap_object(zram->mem_pool, handle);
-
+ atomic64_add(comp_len, &zram->stats.compr_data_size);
+out:
/*
* Free memory associated with this sector
* before overwriting unused sectors.
*/
zram_slot_lock(zram, index);
zram_free_page(zram, index);
- zram_set_handle(zram, index, handle);
- zram_set_obj_size(zram, index, comp_len);
+
+ if (flags) {
+ zram_set_flag(zram, index, flags);
+ zram_set_element(zram, index, element);
+ } else {
+ zram_set_handle(zram, index, handle);
+ zram_set_obj_size(zram, index, comp_len);
+ }
zram_slot_unlock(zram, index);
/* Update stats */
- atomic64_add(comp_len, &zram->stats.compr_data_size);
atomic64_inc(&zram->stats.pages_stored);
- return 0;
+ return ret;
}
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
- u32 index, int offset)
+ u32 index, int offset, struct bio *bio)
{
int ret;
struct page *page = NULL;
@@ -748,7 +1092,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
if (!page)
return -ENOMEM;
- ret = zram_decompress_page(zram, page, index);
+ ret = __zram_bvec_read(zram, page, index, bio, true);
if (ret)
goto out;
@@ -763,7 +1107,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
vec.bv_offset = 0;
}
- ret = __zram_bvec_write(zram, &vec, index);
+ ret = __zram_bvec_write(zram, &vec, index, bio);
out:
if (is_partial_io(bvec))
__free_page(page);
@@ -808,28 +1152,34 @@ static void zram_bio_discard(struct zram *zram, u32 index,
}
}
+/*
+ * Returns errno if it has some problem. Otherwise return 0 or 1.
+ * Returns 0 if IO request was done synchronously
+ * Returns 1 if IO request was successfully submitted.
+ */
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
- int offset, bool is_write)
+ int offset, bool is_write, struct bio *bio)
{
unsigned long start_time = jiffies;
int rw_acct = is_write ? REQ_OP_WRITE : REQ_OP_READ;
+ struct request_queue *q = zram->disk->queue;
int ret;
- generic_start_io_acct(rw_acct, bvec->bv_len >> SECTOR_SHIFT,
+ generic_start_io_acct(q, rw_acct, bvec->bv_len >> SECTOR_SHIFT,
&zram->disk->part0);
if (!is_write) {
atomic64_inc(&zram->stats.num_reads);
- ret = zram_bvec_read(zram, bvec, index, offset);
+ ret = zram_bvec_read(zram, bvec, index, offset, bio);
flush_dcache_page(bvec->bv_page);
} else {
atomic64_inc(&zram->stats.num_writes);
- ret = zram_bvec_write(zram, bvec, index, offset);
+ ret = zram_bvec_write(zram, bvec, index, offset, bio);
}
- generic_end_io_acct(rw_acct, &zram->disk->part0, start_time);
+ generic_end_io_acct(q, rw_acct, &zram->disk->part0, start_time);
- if (unlikely(ret)) {
+ if (unlikely(ret < 0)) {
if (!is_write)
atomic64_inc(&zram->stats.failed_reads);
else
@@ -868,7 +1218,7 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
bv.bv_len = min_t(unsigned int, PAGE_SIZE - offset,
unwritten);
if (zram_bvec_rw(zram, &bv, index, offset,
- op_is_write(bio_op(bio))) < 0)
+ op_is_write(bio_op(bio)), bio) < 0)
goto out;
bv.bv_offset += bv.bv_len;
@@ -922,16 +1272,18 @@ static void zram_slot_free_notify(struct block_device *bdev,
static int zram_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, bool is_write)
{
- int offset, err = -EIO;
+ int offset, ret;
u32 index;
struct zram *zram;
struct bio_vec bv;
+ if (PageTransHuge(page))
+ return -ENOTSUPP;
zram = bdev->bd_disk->private_data;
if (!valid_io_request(zram, sector, PAGE_SIZE)) {
atomic64_inc(&zram->stats.invalid_io);
- err = -EINVAL;
+ ret = -EINVAL;
goto out;
}
@@ -942,7 +1294,7 @@ static int zram_rw_page(struct block_device *bdev, sector_t sector,
bv.bv_len = PAGE_SIZE;
bv.bv_offset = 0;
- err = zram_bvec_rw(zram, &bv, index, offset, is_write);
+ ret = zram_bvec_rw(zram, &bv, index, offset, is_write, NULL);
out:
/*
* If I/O fails, just return error(ie, non-zero) without
@@ -952,9 +1304,20 @@ out:
* bio->bi_end_io does things to handle the error
* (e.g., SetPageError, set_page_dirty and extra works).
*/
- if (err == 0)
+ if (unlikely(ret < 0))
+ return ret;
+
+ switch (ret) {
+ case 0:
page_endio(page, is_write, 0);
- return err;
+ break;
+ case 1:
+ ret = 0;
+ break;
+ default:
+ WARN_ON(1);
+ }
+ return ret;
}
static void zram_reset_device(struct zram *zram)
@@ -983,6 +1346,7 @@ static void zram_reset_device(struct zram *zram)
zram_meta_free(zram, disksize);
memset(&zram->stats, 0, sizeof(zram->stats));
zcomp_destroy(comp);
+ reset_bdev(zram);
}
static ssize_t disksize_store(struct device *dev,
@@ -1108,6 +1472,9 @@ static DEVICE_ATTR_WO(mem_limit);
static DEVICE_ATTR_WO(mem_used_max);
static DEVICE_ATTR_RW(max_comp_streams);
static DEVICE_ATTR_RW(comp_algorithm);
+#ifdef CONFIG_ZRAM_WRITEBACK
+static DEVICE_ATTR_RW(backing_dev);
+#endif
static struct attribute *zram_disk_attrs[] = {
&dev_attr_disksize.attr,
@@ -1118,6 +1485,9 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_mem_used_max.attr,
&dev_attr_max_comp_streams.attr,
&dev_attr_comp_algorithm.attr,
+#ifdef CONFIG_ZRAM_WRITEBACK
+ &dev_attr_backing_dev.attr,
+#endif
&dev_attr_io_stat.attr,
&dev_attr_mm_stat.attr,
&dev_attr_debug_stat.attr,
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index e34e44d02e3e..31762db861e3 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -60,9 +60,10 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
/* Flags for zram pages (table[page_no].value) */
enum zram_pageflags {
- /* Page consists entirely of zeros */
+ /* Page consists the same element */
ZRAM_SAME = ZRAM_FLAG_SHIFT,
ZRAM_ACCESS, /* page is now accessed */
+ ZRAM_WB, /* page is stored on backing_device */
__NR_ZRAM_PAGEFLAGS,
};
@@ -115,5 +116,13 @@ struct zram {
* zram is claimed so open request will be failed
*/
bool claim; /* Protected by bdev->bd_mutex */
+#ifdef CONFIG_ZRAM_WRITEBACK
+ struct file *backing_dev;
+ struct block_device *bdev;
+ unsigned int old_block_size;
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ spinlock_t bitmap_lock;
+#endif
};
#endif
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig
index 35952a94875e..fae5a74dc737 100644
--- a/drivers/bluetooth/Kconfig
+++ b/drivers/bluetooth/Kconfig
@@ -98,6 +98,7 @@ config BT_HCIUART_NOKIA
depends on BT_HCIUART_SERDEV
depends on PM
select BT_HCIUART_H4
+ select BT_BCM
help
Nokia H4+ is serial protocol for communication between Bluetooth
device and host. This protocol is required for Bluetooth devices
@@ -167,6 +168,7 @@ config BT_HCIUART_INTEL
config BT_HCIUART_BCM
bool "Broadcom protocol support"
depends on BT_HCIUART
+ depends on BT_HCIUART_SERDEV
select BT_HCIUART_H4
select BT_BCM
help
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index b793853ff05f..204afe66de92 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -140,7 +140,8 @@ MODULE_DEVICE_TABLE(usb, ath3k_table);
#define BTUSB_ATH3012 0x80
/* This table is to load patch and sysconfig files
- * for AR3012 */
+ * for AR3012
+ */
static const struct usb_device_id ath3k_blist_tbl[] = {
/* Atheros AR3012 with sflash firmware*/
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index d4b0b655dde6..b07ca9565291 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -93,6 +93,7 @@ static void bluecard_detach(struct pcmcia_device *p_dev);
/* Hardware states */
#define CARD_READY 1
+#define CARD_ACTIVITY 2
#define CARD_HAS_PCCARD_ID 4
#define CARD_HAS_POWER_LED 5
#define CARD_HAS_ACTIVITY_LED 6
@@ -160,16 +161,14 @@ static void bluecard_activity_led_timeout(u_long arg)
struct bluecard_info *info = (struct bluecard_info *)arg;
unsigned int iobase = info->p_dev->resource[0]->start;
- if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
- return;
-
- if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
- /* Disable activity LED */
- outb(0x08 | 0x20, iobase + 0x30);
- } else {
- /* Disable power LED */
- outb(0x00, iobase + 0x30);
+ if (test_bit(CARD_ACTIVITY, &(info->hw_state))) {
+ /* leave LED in inactive state for HZ/10 for blink effect */
+ clear_bit(CARD_ACTIVITY, &(info->hw_state));
+ mod_timer(&(info->timer), jiffies + HZ / 10);
}
+
+ /* Disable activity LED, enable power LED */
+ outb(0x08 | 0x20, iobase + 0x30);
}
@@ -177,22 +176,22 @@ static void bluecard_enable_activity_led(struct bluecard_info *info)
{
unsigned int iobase = info->p_dev->resource[0]->start;
- if (!test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
+ /* don't disturb running blink timer */
+ if (timer_pending(&(info->timer)))
return;
- if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
- /* Enable activity LED */
- outb(0x10 | 0x40, iobase + 0x30);
+ set_bit(CARD_ACTIVITY, &(info->hw_state));
- /* Stop the LED after HZ/4 */
- mod_timer(&(info->timer), jiffies + HZ / 4);
+ if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) {
+ /* Enable activity LED, keep power LED enabled */
+ outb(0x18 | 0x60, iobase + 0x30);
} else {
- /* Enable power LED */
- outb(0x08 | 0x20, iobase + 0x30);
-
- /* Stop the LED after HZ/2 */
- mod_timer(&(info->timer), jiffies + HZ / 2);
+ /* Disable power LED */
+ outb(0x00, iobase + 0x30);
}
+
+ /* Stop the LED after HZ/10 */
+ mod_timer(&(info->timer), jiffies + HZ / 10);
}
@@ -625,16 +624,13 @@ static int bluecard_hci_flush(struct hci_dev *hdev)
static int bluecard_hci_open(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
+ unsigned int iobase = info->p_dev->resource[0]->start;
if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)))
bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE);
- if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
- unsigned int iobase = info->p_dev->resource[0]->start;
-
- /* Enable LED */
- outb(0x08 | 0x20, iobase + 0x30);
- }
+ /* Enable power LED */
+ outb(0x08 | 0x20, iobase + 0x30);
return 0;
}
@@ -643,15 +639,15 @@ static int bluecard_hci_open(struct hci_dev *hdev)
static int bluecard_hci_close(struct hci_dev *hdev)
{
struct bluecard_info *info = hci_get_drvdata(hdev);
+ unsigned int iobase = info->p_dev->resource[0]->start;
bluecard_hci_flush(hdev);
- if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) {
- unsigned int iobase = info->p_dev->resource[0]->start;
+ /* Stop LED timer */
+ del_timer_sync(&(info->timer));
- /* Disable LED */
- outb(0x00, iobase + 0x30);
- }
+ /* Disable power LED */
+ outb(0x00, iobase + 0x30);
return 0;
}
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 32dcac017395..194788739a83 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -684,14 +684,16 @@ static int bt3c_config(struct pcmcia_device *link)
unsigned long try;
/* First pass: look for a config entry that looks normal.
- Two tries: without IO aliases, then with aliases */
+ * Two tries: without IO aliases, then with aliases
+ */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL))
goto found_port;
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 9ab6cfbb831d..cc4bdefa6648 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -287,6 +287,37 @@ static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev)
return skb;
}
+static int btbcm_read_info(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+
+ /* Read Verbose Config Version Info */
+ skb = btbcm_read_verbose_config(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ /* Read Controller Features */
+ skb = btbcm_read_controller_features(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
+ kfree_skb(skb);
+
+ /* Read Local Name */
+ skb = btbcm_read_local_name(hdev);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
+ kfree_skb(skb);
+
+ return 0;
+}
+
static const struct {
u16 subver;
const char *name;
@@ -322,13 +353,10 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- /* Read Verbose Config Version Info */
- skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
- kfree_skb(skb);
+ /* Read controller information */
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
switch ((rev & 0xf000) >> 12) {
case 0:
@@ -431,29 +459,10 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
subver = le16_to_cpu(ver->lmp_subver);
kfree_skb(skb);
- /* Read Verbose Config Version Info */
- skb = btbcm_read_verbose_config(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]);
- kfree_skb(skb);
-
- /* Read Controller Features */
- skb = btbcm_read_controller_features(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: BCM: features 0x%2.2x", hdev->name, skb->data[1]);
- kfree_skb(skb);
-
- /* Read Local Name */
- skb = btbcm_read_local_name(hdev);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1));
- kfree_skb(skb);
+ /* Read controller information */
+ err = btbcm_read_info(hdev);
+ if (err)
+ return err;
switch ((rev & 0xf000) >> 12) {
case 0:
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index eb794f08b238..03341ce98c32 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -1455,7 +1455,8 @@ done:
fw_dump_ptr = fw_dump_data;
/* Dump all the memory data into single file, a userspace script will
- be used to split all the memory data to multiple files*/
+ * be used to split all the memory data to multiple files
+ */
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start");
for (idx = 0; idx < dump_num; idx++) {
struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx];
@@ -1482,7 +1483,8 @@ done:
}
/* fw_dump_data will be free in device coredump release function
- after 5 min*/
+ * after 5 min
+ */
dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL);
BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end");
}
diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c
index 28afd5d585f9..0bbdfcef2aa8 100644
--- a/drivers/bluetooth/btqca.c
+++ b/drivers/bluetooth/btqca.c
@@ -81,7 +81,7 @@ static int rome_patch_ver_req(struct hci_dev *hdev, u32 *rome_version)
* and lower 2 bytes from patch will be used.
*/
*rome_version = (le32_to_cpu(ver->soc_id) << 16) |
- (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
+ (le16_to_cpu(ver->rome_ver) & 0x0000ffff);
out:
kfree_skb(skb);
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 8279094dd713..d9a99b4302ea 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -279,6 +279,8 @@ static int rtl_load_config(struct hci_dev *hdev, const char *name, u8 **buff)
return ret;
ret = fw->size;
*buff = kmemdup(fw->data, ret, GFP_KERNEL);
+ if (!*buff)
+ ret = -ENOMEM;
release_firmware(fw);
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 1cb958e199eb..c8e945d19ffe 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -144,7 +144,8 @@ static int btsdio_rx_packet(struct btsdio_data *data)
if (!skb) {
/* Out of memory. Prepare a read retry and just
* return with the expectation that the next time
- * we're called we'll have more memory. */
+ * we're called we'll have more memory.
+ */
return -ENOMEM;
}
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 7df79bb12350..310e9c2e09b6 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -614,14 +614,16 @@ static int btuart_config(struct pcmcia_device *link)
int try;
/* First pass: look for a config entry that looks normal.
- Two tries: without IO aliases, then with aliases */
+ * Two tries: without IO aliases, then with aliases
+ */
for (try = 0; try < 2; try++)
if (!pcmcia_loop_config(link, btuart_check_config, &try))
goto found_port;
/* Second pass: try to find an entry that isn't picky about
- its base address, then try to grab any standard serial port
- address, and finally try to get any free port. */
+ * its base address, then try to grab any standard serial port
+ * address, and finally try to get any free port.
+ */
if (!pcmcia_loop_config(link, btuart_check_config_notpicky, NULL))
goto found_port;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index fa24d693af24..7a5c06aaa181 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -66,6 +66,7 @@ static struct usb_driver btusb_driver;
#define BTUSB_BCM2045 0x40000
#define BTUSB_IFNUM_2 0x80000
#define BTUSB_CW6622 0x100000
+#define BTUSB_BCM_NO_PRODID 0x200000
static const struct usb_device_id btusb_table[] = {
/* Generic Bluetooth USB device */
@@ -131,7 +132,8 @@ static const struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
- { USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
+ { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM },
/* Broadcom BCM920703 (HTC Vive) */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01),
@@ -169,6 +171,10 @@ static const struct usb_device_id btusb_table[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
.driver_info = BTUSB_BCM_PATCHRAM },
+ /* Broadcom devices with missing product id */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0000, 0x0000, 0xff, 0x01, 0x01),
+ .driver_info = BTUSB_BCM_PATCHRAM | BTUSB_BCM_NO_PRODID },
+
/* Intel Bluetooth USB Bootloader (RAM module) */
{ USB_DEVICE(0x8087, 0x0a5a),
.driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
@@ -268,6 +274,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME },
+ { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME },
/* Broadcom BCM2035 */
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 },
@@ -357,6 +364,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -656,7 +664,8 @@ static void btusb_intr_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -745,7 +754,8 @@ static void btusb_bulk_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -840,7 +850,8 @@ static void btusb_isoc_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -952,7 +963,8 @@ static void btusb_diag_complete(struct urb *urb)
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
/* -EPERM: urb is being killed;
- * -ENODEV: device got disconnected */
+ * -ENODEV: device got disconnected
+ */
if (err != -EPERM && err != -ENODEV)
BT_ERR("%s urb %p failed to resubmit (%d)",
hdev->name, urb, -err);
@@ -1076,6 +1088,10 @@ static int btusb_open(struct hci_dev *hdev)
}
data->intf->needs_remote_wakeup = 1;
+ /* device specific wakeup source enabled and required for USB
+ * remote wakeup while host is suspended
+ */
+ device_wakeup_enable(&data->udev->dev);
if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags))
goto done;
@@ -1139,6 +1155,7 @@ static int btusb_close(struct hci_dev *hdev)
goto failed;
data->intf->needs_remote_wakeup = 0;
+ device_wakeup_disable(&data->udev->dev);
usb_autopm_put_interface(data->intf);
failed:
@@ -2892,11 +2909,25 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info == BTUSB_IGNORE)
return -ENODEV;
+ if (id->driver_info & BTUSB_BCM_NO_PRODID) {
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ /* For the broken Broadcom devices that show 0000:0000
+ * as USB vendor and product information, check that the
+ * manufacturer string identifies them as Broadcom based
+ * devices.
+ */
+ if (!udev->manufacturer ||
+ strcmp(udev->manufacturer, "Broadcom Corp"))
+ return -ENODEV;
+ }
+
if (id->driver_info & BTUSB_ATH3012) {
struct usb_device *udev = interface_to_usbdev(intf);
/* Old firmware would otherwise let ath3k driver load
- * patch and sysconfig files */
+ * patch and sysconfig files
+ */
if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
return -ENODEV;
}
@@ -3067,6 +3098,12 @@ static int btusb_probe(struct usb_interface *intf,
if (id->driver_info & BTUSB_QCA_ROME) {
data->setup_on_usb = btusb_setup_qca;
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
+
+ /* QCA Rome devices lose their updated firmware over suspend,
+ * but the USB hub doesn't notice any status change.
+ * Explicitly request a device reset on resume.
+ */
+ set_bit(BTUSB_RESET_RESUME, &data->flags);
}
#ifdef CONFIG_BT_HCIBTUSB_RTL
@@ -3259,13 +3296,28 @@ static void play_deferred(struct btusb_data *data)
int err;
while ((urb = usb_get_from_anchor(&data->deferred))) {
+ usb_anchor_urb(urb, &data->tx_anchor);
+
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err < 0)
+ if (err < 0) {
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ data->hdev->name, urb, -err);
+ kfree(urb->setup_packet);
+ usb_unanchor_urb(urb);
+ usb_free_urb(urb);
break;
+ }
data->tx_in_flight++;
+ usb_free_urb(urb);
+ }
+
+ /* Cleanup the rest deferred urbs. */
+ while ((urb = usb_get_from_anchor(&data->deferred))) {
+ kfree(urb->setup_packet);
+ usb_free_urb(urb);
}
- usb_scuttle_anchored_urbs(&data->deferred);
}
static int btusb_resume(struct usb_interface *intf)
diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
index 85a3978b064f..5ef8000f90a9 100644
--- a/drivers/bluetooth/btwilink.c
+++ b/drivers/bluetooth/btwilink.c
@@ -93,8 +93,7 @@ static void st_reg_completion_cb(void *priv_data, int data)
complete(&lhst->wait_reg_completion);
}
-/* Called by Shared Transport layer when receive data is
- * available */
+/* Called by Shared Transport layer when receive data is available */
static long st_receive(void *priv_data, struct sk_buff *skb)
{
struct ti_st *lhst = priv_data;
@@ -198,7 +197,8 @@ static int ti_st_open(struct hci_dev *hdev)
}
/* Is ST registration callback
- * called with ERROR status? */
+ * called with ERROR status?
+ */
if (hst->reg_status != 0) {
BT_ERR("ST registration completed with invalid "
"status %d", hst->reg_status);
@@ -276,7 +276,7 @@ static int ti_st_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
static int bt_ti_probe(struct platform_device *pdev)
{
- static struct ti_st *hst;
+ struct ti_st *hst;
struct hci_dev *hdev;
int err;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 6a662d0161b4..e2540113d0da 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -27,6 +27,8 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/acpi.h>
+#include <linux/of.h>
+#include <linux/property.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio/consumer.h>
@@ -34,6 +36,7 @@
#include <linux/interrupt.h>
#include <linux/dmi.h>
#include <linux/pm_runtime.h>
+#include <linux/serdev.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -41,11 +44,15 @@
#include "btbcm.h"
#include "hci_uart.h"
+#define BCM_NULL_PKT 0x00
+#define BCM_NULL_SIZE 0
+
#define BCM_LM_DIAG_PKT 0x07
#define BCM_LM_DIAG_SIZE 63
#define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */
+/* platform device driver resources */
struct bcm_device {
struct list_head list;
@@ -59,6 +66,7 @@ struct bcm_device {
bool clk_enabled;
u32 init_speed;
+ u32 oper_speed;
int irq;
u8 irq_polarity;
@@ -68,6 +76,12 @@ struct bcm_device {
#endif
};
+/* serdev driver resources */
+struct bcm_serdev {
+ struct hci_uart hu;
+};
+
+/* generic bcm uart resources */
struct bcm_data {
struct sk_buff *rx_skb;
struct sk_buff_head txq;
@@ -79,6 +93,14 @@ struct bcm_data {
static DEFINE_MUTEX(bcm_device_lock);
static LIST_HEAD(bcm_device_list);
+static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed)
+{
+ if (hu->serdev)
+ serdev_device_set_baudrate(hu->serdev, speed);
+ else
+ hci_uart_set_baudrate(hu, speed);
+}
+
static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed)
{
struct hci_dev *hdev = hu->hdev;
@@ -176,7 +198,7 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
static int bcm_request_irq(struct bcm_data *bcm)
{
struct bcm_device *bdev = bcm->dev;
- int err = 0;
+ int err;
/* If this is not a platform device, do not enable PM functionalities */
mutex_lock(&bcm_device_lock);
@@ -185,21 +207,23 @@ static int bcm_request_irq(struct bcm_data *bcm)
goto unlock;
}
- if (bdev->irq > 0) {
- err = devm_request_irq(&bdev->pdev->dev, bdev->irq,
- bcm_host_wake, IRQF_TRIGGER_RISING,
- "host_wake", bdev);
- if (err)
- goto unlock;
+ if (bdev->irq <= 0) {
+ err = -EOPNOTSUPP;
+ goto unlock;
+ }
+
+ err = devm_request_irq(&bdev->pdev->dev, bdev->irq, bcm_host_wake,
+ IRQF_TRIGGER_RISING, "host_wake", bdev);
+ if (err)
+ goto unlock;
- device_init_wakeup(&bdev->pdev->dev, true);
+ device_init_wakeup(&bdev->pdev->dev, true);
- pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
- BCM_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&bdev->pdev->dev);
- pm_runtime_set_active(&bdev->pdev->dev);
- pm_runtime_enable(&bdev->pdev->dev);
- }
+ pm_runtime_set_autosuspend_delay(&bdev->pdev->dev,
+ BCM_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&bdev->pdev->dev);
+ pm_runtime_set_active(&bdev->pdev->dev);
+ pm_runtime_enable(&bdev->pdev->dev);
unlock:
mutex_unlock(&bcm_device_lock);
@@ -287,6 +311,14 @@ static int bcm_open(struct hci_uart *hu)
hu->priv = bcm;
+ /* If this is a serdev defined device, then only use
+ * serdev open primitive and skip the rest.
+ */
+ if (hu->serdev) {
+ serdev_device_open(hu->serdev);
+ goto out;
+ }
+
if (!hu->tty->dev)
goto out;
@@ -301,6 +333,7 @@ static int bcm_open(struct hci_uart *hu)
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
bcm->dev = dev;
hu->init_speed = dev->init_speed;
+ hu->oper_speed = dev->oper_speed;
#ifdef CONFIG_PM
dev->hu = hu;
#endif
@@ -321,6 +354,12 @@ static int bcm_close(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ /* If this is a serdev defined device, only use serdev
+ * close primitive and then continue as usual.
+ */
+ if (hu->serdev)
+ serdev_device_close(hu->serdev);
+
/* Protect bcm->dev against removal of the device or driver */
mutex_lock(&bcm_device_lock);
if (bcm_device_exists(bdev)) {
@@ -396,7 +435,7 @@ static int bcm_setup(struct hci_uart *hu)
speed = 0;
if (speed)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
/* Operational speed if any */
if (hu->oper_speed)
@@ -409,7 +448,7 @@ static int bcm_setup(struct hci_uart *hu)
if (speed) {
err = bcm_set_baudrate(hu, speed);
if (!err)
- hci_uart_set_baudrate(hu, speed);
+ host_set_baudrate(hu, speed);
}
finalize:
@@ -432,11 +471,19 @@ finalize:
.lsize = 0, \
.maxlen = BCM_LM_DIAG_SIZE
+#define BCM_RECV_NULL \
+ .type = BCM_NULL_PKT, \
+ .hlen = BCM_NULL_SIZE, \
+ .loff = 0, \
+ .lsize = 0, \
+ .maxlen = BCM_NULL_SIZE
+
static const struct h4_recv_pkt bcm_recv_pkts[] = {
{ H4_RECV_ACL, .recv = hci_recv_frame },
{ H4_RECV_SCO, .recv = hci_recv_frame },
{ H4_RECV_EVENT, .recv = hci_recv_frame },
{ BCM_RECV_LM_DIAG, .recv = hci_recv_diag },
+ { BCM_RECV_NULL, .recv = hci_recv_diag },
};
static int bcm_recv(struct hci_uart *hu, const void *data, int count)
@@ -697,8 +744,10 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
case ACPI_RESOURCE_TYPE_SERIAL_BUS:
sb = &ares->data.uart_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART)
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) {
dev->init_speed = sb->default_baud_rate;
+ dev->oper_speed = 4000000;
+ }
break;
default:
@@ -851,7 +900,6 @@ static const struct hci_uart_proto bcm_proto = {
.name = "Broadcom",
.manufacturer = 15,
.init_speed = 115200,
- .oper_speed = 4000000,
.open = bcm_open,
.close = bcm_close,
.flush = bcm_flush,
@@ -901,9 +949,57 @@ static struct platform_driver bcm_driver = {
},
};
+static int bcm_serdev_probe(struct serdev_device *serdev)
+{
+ struct bcm_serdev *bcmdev;
+ u32 speed;
+ int err;
+
+ bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL);
+ if (!bcmdev)
+ return -ENOMEM;
+
+ bcmdev->hu.serdev = serdev;
+ serdev_device_set_drvdata(serdev, bcmdev);
+
+ err = device_property_read_u32(&serdev->dev, "max-speed", &speed);
+ if (!err)
+ bcmdev->hu.oper_speed = speed;
+
+ return hci_uart_register_device(&bcmdev->hu, &bcm_proto);
+}
+
+static void bcm_serdev_remove(struct serdev_device *serdev)
+{
+ struct bcm_serdev *bcmdev = serdev_device_get_drvdata(serdev);
+
+ hci_uart_unregister_device(&bcmdev->hu);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id bcm_bluetooth_of_match[] = {
+ { .compatible = "brcm,bcm43438-bt" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match);
+#endif
+
+static struct serdev_device_driver bcm_serdev_driver = {
+ .probe = bcm_serdev_probe,
+ .remove = bcm_serdev_remove,
+ .driver = {
+ .name = "hci_uart_bcm",
+ .of_match_table = of_match_ptr(bcm_bluetooth_of_match),
+ },
+};
+
int __init bcm_init(void)
{
+ /* For now, we need to keep both platform device
+ * driver (ACPI generated) and serdev driver (DT).
+ */
platform_driver_register(&bcm_driver);
+ serdev_device_driver_register(&bcm_serdev_driver);
return hci_uart_register_proto(&bcm_proto);
}
@@ -911,6 +1007,7 @@ int __init bcm_init(void)
int __exit bcm_deinit(void)
{
platform_driver_unregister(&bcm_driver);
+ serdev_device_driver_unregister(&bcm_serdev_driver);
return hci_uart_unregister_proto(&bcm_proto);
}
diff --git a/drivers/bluetooth/hci_h4.c b/drivers/bluetooth/hci_h4.c
index 4e328d7d47bb..3b82a87224a9 100644
--- a/drivers/bluetooth/hci_h4.c
+++ b/drivers/bluetooth/hci_h4.c
@@ -172,7 +172,7 @@ struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
const struct h4_recv_pkt *pkts, int pkts_count)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
- u8 alignment = hu->alignment;
+ u8 alignment = hu->alignment ? hu->alignment : 1;
while (count) {
int i, len;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 8397b716fa65..a746627e784e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -457,7 +457,8 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
/* Error if the tty has no write op instead of leaving an exploitable
- hole */
+ * hole
+ */
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index c982943f0747..424c15aa7bb7 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -622,7 +622,8 @@ static int download_firmware(struct ll_device *lldev)
cmd = (struct hci_command *)action_ptr;
if (cmd->opcode == 0xff36) {
/* ignore remote change
- * baud rate HCI VS command */
+ * baud rate HCI VS command
+ */
bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware");
break;
}
@@ -742,14 +743,8 @@ static int hci_ti_probe(struct serdev_device *serdev)
static void hci_ti_remove(struct serdev_device *serdev)
{
struct ll_device *lldev = serdev_device_get_drvdata(serdev);
- struct hci_uart *hu = &lldev->hu;
- struct hci_dev *hdev = hu->hdev;
- cancel_work_sync(&hu->write_work);
-
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- hu->proto->close(hu);
+ hci_uart_unregister_device(&lldev->hu);
}
static const struct of_device_id hci_ti_of_match[] = {
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 181a15b549e5..3539fd03f47e 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -767,16 +767,8 @@ static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev)
static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev)
{
struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev);
- struct hci_uart *hu = &btdev->hu;
- struct hci_dev *hdev = hu->hdev;
- cancel_work_sync(&hu->write_work);
-
- hci_unregister_dev(hdev);
- hci_free_dev(hdev);
- hu->proto->close(hu);
-
- pm_runtime_disable(&btdev->serdev->dev);
+ hci_uart_unregister_device(&btdev->hu);
}
static int nokia_bluetooth_runtime_suspend(struct device *dev)
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index aea930101dd2..b725ac4f7ff6 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -354,3 +354,16 @@ err_alloc:
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
+
+void hci_uart_unregister_device(struct hci_uart *hu)
+{
+ struct hci_dev *hdev = hu->hdev;
+
+ hci_unregister_dev(hdev);
+ hci_free_dev(hdev);
+
+ cancel_work_sync(&hu->write_work);
+
+ hu->proto->close(hu);
+}
+EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index c6e9e1cf63f8..d9cd95d81149 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -112,6 +112,7 @@ struct hci_uart {
int hci_uart_register_proto(const struct hci_uart_proto *p);
int hci_uart_unregister_proto(const struct hci_uart_proto *p);
int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p);
+void hci_uart_unregister_device(struct hci_uart *hu);
int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 2408ea38a39c..ae3d8f3444b9 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -132,7 +132,7 @@ config SIMPLE_PM_BUS
config SUNXI_RSB
tristate "Allwinner sunXi Reduced Serial Bus Driver"
- default MACH_SUN8I || MACH_SUN9I
+ default MACH_SUN8I || MACH_SUN9I || ARM64
depends on ARCH_SUNXI
select REGMAP
help
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index c49da15d9790..3c29d36702a8 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -2124,8 +2124,8 @@ int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
return -ENODEV;
port = __cci_ace_get_port(dn, ACE_LITE_PORT);
- if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
- dn->full_name))
+ if (WARN_ONCE(port < 0, "node %pOF ACE lite port look-up failure\n",
+ dn))
return -ENODEV;
cci_port_control(port, enable);
return 0;
@@ -2200,14 +2200,14 @@ static int cci_probe_ports(struct device_node *np)
if (of_property_read_string(cp, "interface-type",
&match_str)) {
- WARN(1, "node %s missing interface-type property\n",
- cp->full_name);
+ WARN(1, "node %pOF missing interface-type property\n",
+ cp);
continue;
}
is_ace = strcmp(match_str, "ace") == 0;
if (!is_ace && strcmp(match_str, "ace-lite")) {
- WARN(1, "node %s containing invalid interface-type property, skipping it\n",
- cp->full_name);
+ WARN(1, "node %pOF containing invalid interface-type property, skipping it\n",
+ cp);
continue;
}
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index 4bd361d64270..3d56ebcda720 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -156,8 +156,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
ret = weim_timing_setup(child, base, devtype);
if (ret)
- dev_warn(&pdev->dev, "%s set timing failed.\n",
- child->full_name);
+ dev_warn(&pdev->dev, "%pOF set timing failed.\n",
+ child);
else
have_child = 1;
}
@@ -166,8 +166,8 @@ static int __init weim_parse_dt(struct platform_device *pdev,
ret = of_platform_default_populate(pdev->dev.of_node,
NULL, &pdev->dev);
if (ret)
- dev_err(&pdev->dev, "%s fail to create devices.\n",
- pdev->dev.of_node->full_name);
+ dev_err(&pdev->dev, "%pOF fail to create devices.\n",
+ pdev->dev.of_node);
return ret;
}
diff --git a/drivers/bus/omap-ocp2scp.c b/drivers/bus/omap-ocp2scp.c
index bf500e0e7362..77791f3dcfc6 100644
--- a/drivers/bus/omap-ocp2scp.c
+++ b/drivers/bus/omap-ocp2scp.c
@@ -70,8 +70,10 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
if (!of_device_is_compatible(np, "ti,am437x-ocp2scp")) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(regs))
- goto err0;
+ if (IS_ERR(regs)) {
+ ret = PTR_ERR(regs);
+ goto err1;
+ }
pm_runtime_get_sync(&pdev->dev);
reg = readl_relaxed(regs + OCP2SCP_TIMING);
@@ -83,6 +85,9 @@ static int omap_ocp2scp_probe(struct platform_device *pdev)
return 0;
+err1:
+ pm_runtime_disable(&pdev->dev);
+
err0:
device_for_each_child(&pdev->dev, NULL, ocp2scp_remove_devices);
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 795c9d9c96a6..328ca93781cf 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -556,20 +556,20 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
/* Runtime addresses for all slaves should be set first */
for_each_available_child_of_node(np, child) {
- dev_dbg(dev, "setting child %s runtime address\n",
- child->full_name);
+ dev_dbg(dev, "setting child %pOF runtime address\n",
+ child);
ret = of_property_read_u32(child, "reg", &hwaddr);
if (ret) {
- dev_err(dev, "%s: invalid 'reg' property: %d\n",
- child->full_name, ret);
+ dev_err(dev, "%pOF: invalid 'reg' property: %d\n",
+ child, ret);
continue;
}
rtaddr = sunxi_rsb_get_rtaddr(hwaddr);
if (!rtaddr) {
- dev_err(dev, "%s: unknown hardware device address\n",
- child->full_name);
+ dev_err(dev, "%pOF: unknown hardware device address\n",
+ child);
continue;
}
@@ -586,15 +586,15 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
/* send command */
ret = _sunxi_rsb_run_xfer(rsb);
if (ret)
- dev_warn(dev, "%s: set runtime address failed: %d\n",
- child->full_name, ret);
+ dev_warn(dev, "%pOF: set runtime address failed: %d\n",
+ child, ret);
}
/* Then we start adding devices and probing them */
for_each_available_child_of_node(np, child) {
struct sunxi_rsb_device *rdev;
- dev_dbg(dev, "adding child %s\n", child->full_name);
+ dev_dbg(dev, "adding child %pOF\n", child);
ret = of_property_read_u32(child, "reg", &hwaddr);
if (ret)
@@ -606,8 +606,8 @@ static int of_rsb_register_devices(struct sunxi_rsb *rsb)
rdev = sunxi_rsb_device_create(rsb, child, hwaddr, rtaddr);
if (IS_ERR(rdev))
- dev_err(dev, "failed to add child device %s: %ld\n",
- child->full_name, PTR_ERR(rdev));
+ dev_err(dev, "failed to add child device %pOF: %ld\n",
+ child, PTR_ERR(rdev));
}
return 0;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index ccd239ab879f..623714344600 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -161,7 +161,7 @@ config VIRTIO_CONSOLE
depends on VIRTIO && TTY
select HVC_DRIVER
help
- Virtio console for use with lguest and other hypervisors.
+ Virtio console for use with hypervisors.
Also serves as a general-purpose serial device for data
transfer between the guest and host. Character devices at
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index dcbbb4ea3cc1..89527bae4602 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -381,7 +381,7 @@ static void agp_ali_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_ali_pci_table[] = {
+static const struct pci_device_id agp_ali_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 5fbd333e4c6d..b450544dcaf0 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -21,7 +21,7 @@
#define AMD_TLBFLUSH 0x0c /* In mmio region (32-bit register) */
#define AMD_CACHEENTRY 0x10 /* In mmio region (32-bit register) */
-static struct pci_device_id agp_amdk7_pci_table[];
+static const struct pci_device_id agp_amdk7_pci_table[];
struct amd_page_map {
unsigned long *real;
@@ -508,7 +508,7 @@ static int agp_amdk7_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
/* must be the same order as name table above */
-static struct pci_device_id agp_amdk7_pci_table[] = {
+static const struct pci_device_id agp_amdk7_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index c99cd19d9147..e50c29c97ca7 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -610,7 +610,7 @@ static int agp_amd64_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id agp_amd64_pci_table[] = {
+static const struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 0b5ec7af2414..88b4cbee4dac 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -540,7 +540,7 @@ static void agp_ati_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_ati_pci_table[] = {
+static const struct pci_device_id agp_ati_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index 533cb6d229b8..7f88490b5479 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -427,7 +427,7 @@ static int agp_efficeon_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id agp_efficeon_pci_table[] = {
+static const struct pci_device_id agp_efficeon_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 0a21daed5b62..9e4f27a6cb5a 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,7 +828,7 @@ static int agp_intel_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id agp_intel_pci_table[] = {
+static const struct pci_device_id agp_intel_pci_table[] = {
#define ID(x) \
{ \
.class = (PCI_CLASS_BRIDGE_HOST << 8), \
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 6c8d39cb566e..828b34445203 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -420,7 +420,7 @@ static int agp_nvidia_resume(struct pci_dev *pdev)
#endif
-static struct pci_device_id agp_nvidia_pci_table[] = {
+static const struct pci_device_id agp_nvidia_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 2c74038da459..14909fc5d767 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -237,7 +237,7 @@ static int agp_sis_resume(struct pci_dev *pdev)
#endif /* CONFIG_PM */
-static struct pci_device_id agp_sis_pci_table[] = {
+static const struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index fdced547ad59..c381c8e396fc 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -679,7 +679,7 @@ static void agp_uninorth_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-static struct pci_device_id agp_uninorth_pci_table[] = {
+static const struct pci_device_id agp_uninorth_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
.class_mask = ~0,
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index b67263d6e34b..c0a5b1f3a986 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -67,7 +67,7 @@ static char *applicom_pci_devnames[] = {
"PCI2000PFB"
};
-static struct pci_device_id applicom_pci_tbl[] = {
+static const struct pci_device_id applicom_pci_tbl[] = {
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) },
{ PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) },
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 1b223c32a8ae..95a031e9eced 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -13,10 +13,8 @@ menuconfig HW_RANDOM
that's usually called /dev/hwrng, and which exposes one
of possibly several hardware random number generators.
- These hardware random number generators do not feed directly
- into the kernel's random number generator. That is usually
- handled by the "rngd" daemon. Documentation/hw_random.txt
- has more information.
+ These hardware random number generators do feed into the
+ kernel's random number generator entropy pool.
If unsure, say Y.
@@ -255,6 +253,20 @@ config HW_RANDOM_MXC_RNGA
If unsure, say Y.
+config HW_RANDOM_IMX_RNGC
+ tristate "Freescale i.MX RNGC Random Number Generator"
+ depends on ARCH_MXC
+ default HW_RANDOM
+ ---help---
+ This driver provides kernel-side support for the Random Number
+ Generator Version C hardware found on some Freescale i.MX
+ processors. Version B is also supported by this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called imx-rngc.
+
+ If unsure, say Y.
+
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
depends on ARCH_NOMADIK
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index b085975ec1d2..39a67defac67 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
+obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_OCTEON) += octeon-rng.o
obj-$(CONFIG_HW_RANDOM_NOMADIK) += nomadik-rng.o
obj-$(CONFIG_HW_RANDOM_PSERIES) += pseries-rng.o
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 503a41dfa193..9701ac7d8b47 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -28,7 +28,10 @@
#define RNG_MODULE_NAME "hw_random"
static struct hwrng *current_rng;
+/* the current rng has been explicitly chosen by user via sysfs */
+static int cur_rng_set_by_user;
static struct task_struct *hwrng_fill;
+/* list of registered rngs, sorted decending by quality */
static LIST_HEAD(rng_list);
/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
@@ -303,6 +306,7 @@ static ssize_t hwrng_attr_current_store(struct device *dev,
list_for_each_entry(rng, &rng_list, list) {
if (sysfs_streq(rng->name, buf)) {
err = 0;
+ cur_rng_set_by_user = 1;
if (rng != current_rng)
err = set_current_rng(rng);
break;
@@ -351,16 +355,27 @@ static ssize_t hwrng_attr_available_show(struct device *dev,
return strlen(buf);
}
+static ssize_t hwrng_attr_selected_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
+}
+
static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
hwrng_attr_current_show,
hwrng_attr_current_store);
static DEVICE_ATTR(rng_available, S_IRUGO,
hwrng_attr_available_show,
NULL);
+static DEVICE_ATTR(rng_selected, S_IRUGO,
+ hwrng_attr_selected_show,
+ NULL);
static struct attribute *rng_dev_attrs[] = {
&dev_attr_rng_current.attr,
&dev_attr_rng_available.attr,
+ &dev_attr_rng_selected.attr,
NULL
};
@@ -417,6 +432,7 @@ int hwrng_register(struct hwrng *rng)
{
int err = -EINVAL;
struct hwrng *old_rng, *tmp;
+ struct list_head *rng_list_ptr;
if (!rng->name || (!rng->data_read && !rng->read))
goto out;
@@ -432,14 +448,27 @@ int hwrng_register(struct hwrng *rng)
init_completion(&rng->cleanup_done);
complete(&rng->cleanup_done);
+ /* rng_list is sorted by decreasing quality */
+ list_for_each(rng_list_ptr, &rng_list) {
+ tmp = list_entry(rng_list_ptr, struct hwrng, list);
+ if (tmp->quality < rng->quality)
+ break;
+ }
+ list_add_tail(&rng->list, rng_list_ptr);
+
old_rng = current_rng;
err = 0;
- if (!old_rng) {
+ if (!old_rng ||
+ (!cur_rng_set_by_user && rng->quality > old_rng->quality)) {
+ /*
+ * Set new rng as current as the new rng source
+ * provides better entropy quality and was not
+ * chosen by userspace.
+ */
err = set_current_rng(rng);
if (err)
goto out_unlock;
}
- list_add_tail(&rng->list, &rng_list);
if (old_rng && !rng->init) {
/*
@@ -466,12 +495,13 @@ void hwrng_unregister(struct hwrng *rng)
list_del(&rng->list);
if (current_rng == rng) {
drop_current_rng();
+ cur_rng_set_by_user = 0;
+ /* rng_list is sorted by quality, use the best (=first) one */
if (!list_empty(&rng_list)) {
- struct hwrng *tail;
-
- tail = list_entry(rng_list.prev, struct hwrng, list);
+ struct hwrng *new_rng;
- set_current_rng(tail);
+ new_rng = list_entry(rng_list.next, struct hwrng, list);
+ set_current_rng(new_rng);
}
}
diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
new file mode 100644
index 000000000000..88db42d30760
--- /dev/null
+++ b/drivers/char/hw_random/imx-rngc.c
@@ -0,0 +1,331 @@
+/*
+ * RNG driver for Freescale RNGC
+ *
+ * Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Copyright (C) 2017 Martin Kaiser <martin@kaiser.cx>
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/hw_random.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+
+#define RNGC_COMMAND 0x0004
+#define RNGC_CONTROL 0x0008
+#define RNGC_STATUS 0x000C
+#define RNGC_ERROR 0x0010
+#define RNGC_FIFO 0x0014
+
+#define RNGC_CMD_CLR_ERR 0x00000020
+#define RNGC_CMD_CLR_INT 0x00000010
+#define RNGC_CMD_SEED 0x00000002
+#define RNGC_CMD_SELF_TEST 0x00000001
+
+#define RNGC_CTRL_MASK_ERROR 0x00000040
+#define RNGC_CTRL_MASK_DONE 0x00000020
+
+#define RNGC_STATUS_ERROR 0x00010000
+#define RNGC_STATUS_FIFO_LEVEL_MASK 0x00000f00
+#define RNGC_STATUS_FIFO_LEVEL_SHIFT 8
+#define RNGC_STATUS_SEED_DONE 0x00000020
+#define RNGC_STATUS_ST_DONE 0x00000010
+
+#define RNGC_ERROR_STATUS_STAT_ERR 0x00000008
+
+#define RNGC_TIMEOUT 3000 /* 3 sec */
+
+
+static bool self_test = true;
+module_param(self_test, bool, 0);
+
+struct imx_rngc {
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+ struct hwrng rng;
+ struct completion rng_op_done;
+ /*
+ * err_reg is written only by the irq handler and read only
+ * when interrupts are masked, we need no spinlock
+ */
+ u32 err_reg;
+};
+
+
+static inline void imx_rngc_irq_mask_clear(struct imx_rngc *rngc)
+{
+ u32 ctrl, cmd;
+
+ /* mask interrupts */
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl |= RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR;
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+
+ /*
+ * CLR_INT clears the interrupt only if there's no error
+ * CLR_ERR clear the interrupt and the error register if there
+ * is an error
+ */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ cmd |= RNGC_CMD_CLR_INT | RNGC_CMD_CLR_ERR;
+ writel(cmd, rngc->base + RNGC_COMMAND);
+}
+
+static inline void imx_rngc_irq_unmask(struct imx_rngc *rngc)
+{
+ u32 ctrl;
+
+ ctrl = readl(rngc->base + RNGC_CONTROL);
+ ctrl &= ~(RNGC_CTRL_MASK_DONE | RNGC_CTRL_MASK_ERROR);
+ writel(ctrl, rngc->base + RNGC_CONTROL);
+}
+
+static int imx_rngc_self_test(struct imx_rngc *rngc)
+{
+ u32 cmd;
+ int ret;
+
+ imx_rngc_irq_unmask(rngc);
+
+ /* run self test */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SELF_TEST, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done, RNGC_TIMEOUT);
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ if (rngc->err_reg != 0)
+ return -EIO;
+
+ return 0;
+}
+
+static int imx_rngc_read(struct hwrng *rng, void *data, size_t max, bool wait)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ unsigned int status;
+ unsigned int level;
+ int retval = 0;
+
+ while (max >= sizeof(u32)) {
+ status = readl(rngc->base + RNGC_STATUS);
+
+ /* is there some error while reading this random number? */
+ if (status & RNGC_STATUS_ERROR)
+ break;
+
+ /* how many random numbers are in FIFO? [0-16] */
+ level = (status & RNGC_STATUS_FIFO_LEVEL_MASK) >>
+ RNGC_STATUS_FIFO_LEVEL_SHIFT;
+
+ if (level) {
+ /* retrieve a random number from FIFO */
+ *(u32 *)data = readl(rngc->base + RNGC_FIFO);
+
+ retval += sizeof(u32);
+ data += sizeof(u32);
+ max -= sizeof(u32);
+ }
+ }
+
+ return retval ? retval : -EIO;
+}
+
+static irqreturn_t imx_rngc_irq(int irq, void *priv)
+{
+ struct imx_rngc *rngc = (struct imx_rngc *)priv;
+ u32 status;
+
+ /*
+ * clearing the interrupt will also clear the error register
+ * read error and status before clearing
+ */
+ status = readl(rngc->base + RNGC_STATUS);
+ rngc->err_reg = readl(rngc->base + RNGC_ERROR);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (status & (RNGC_STATUS_SEED_DONE | RNGC_STATUS_ST_DONE))
+ complete(&rngc->rng_op_done);
+
+ return IRQ_HANDLED;
+}
+
+static int imx_rngc_init(struct hwrng *rng)
+{
+ struct imx_rngc *rngc = container_of(rng, struct imx_rngc, rng);
+ u32 cmd;
+ int ret;
+
+ /* clear error */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_CLR_ERR, rngc->base + RNGC_COMMAND);
+
+ /* create seed, repeat while there is some statistical error */
+ do {
+ imx_rngc_irq_unmask(rngc);
+
+ /* seed creation */
+ cmd = readl(rngc->base + RNGC_COMMAND);
+ writel(cmd | RNGC_CMD_SEED, rngc->base + RNGC_COMMAND);
+
+ ret = wait_for_completion_timeout(&rngc->rng_op_done,
+ RNGC_TIMEOUT);
+
+ if (!ret) {
+ imx_rngc_irq_mask_clear(rngc);
+ return -ETIMEDOUT;
+ }
+
+ } while (rngc->err_reg == RNGC_ERROR_STATUS_STAT_ERR);
+
+ return rngc->err_reg ? -EIO : 0;
+}
+
+static int imx_rngc_probe(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ rngc = devm_kzalloc(&pdev->dev, sizeof(*rngc), GFP_KERNEL);
+ if (!rngc)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rngc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rngc->base))
+ return PTR_ERR(rngc->base);
+
+ rngc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rngc->clk)) {
+ dev_err(&pdev->dev, "Can not get rng_clk\n");
+ return PTR_ERR(rngc->clk);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ dev_err(&pdev->dev, "Couldn't get irq %d\n", irq);
+ return irq;
+ }
+
+ ret = clk_prepare_enable(rngc->clk);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev,
+ irq, imx_rngc_irq, 0, pdev->name, (void *)rngc);
+ if (ret) {
+ dev_err(rngc->dev, "Can't get interrupt working.\n");
+ goto err;
+ }
+
+ init_completion(&rngc->rng_op_done);
+
+ rngc->rng.name = pdev->name;
+ rngc->rng.init = imx_rngc_init;
+ rngc->rng.read = imx_rngc_read;
+
+ rngc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, rngc);
+
+ imx_rngc_irq_mask_clear(rngc);
+
+ if (self_test) {
+ ret = imx_rngc_self_test(rngc);
+ if (ret) {
+ dev_err(rngc->dev, "FSL RNGC self test failed.\n");
+ goto err;
+ }
+ }
+
+ ret = hwrng_register(&rngc->rng);
+ if (ret) {
+ dev_err(&pdev->dev, "FSL RNGC registering failed (%d)\n", ret);
+ goto err;
+ }
+
+ dev_info(&pdev->dev, "Freescale RNGC registered.\n");
+ return 0;
+
+err:
+ clk_disable_unprepare(rngc->clk);
+
+ return ret;
+}
+
+static int __exit imx_rngc_remove(struct platform_device *pdev)
+{
+ struct imx_rngc *rngc = platform_get_drvdata(pdev);
+
+ hwrng_unregister(&rngc->rng);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int imx_rngc_suspend(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(rngc->clk);
+
+ return 0;
+}
+
+static int imx_rngc_resume(struct device *dev)
+{
+ struct imx_rngc *rngc = dev_get_drvdata(dev);
+
+ clk_prepare_enable(rngc->clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops imx_rngc_pm_ops = {
+ .suspend = imx_rngc_suspend,
+ .resume = imx_rngc_resume,
+};
+#endif
+
+static const struct of_device_id imx_rngc_dt_ids[] = {
+ { .compatible = "fsl,imx25-rngb", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_rngc_dt_ids);
+
+static struct platform_driver imx_rngc_driver = {
+ .driver = {
+ .name = "imx_rngc",
+#ifdef CONFIG_PM
+ .pm = &imx_rngc_pm_ops,
+#endif
+ .of_match_table = imx_rngc_dt_ids,
+ },
+ .remove = __exit_p(imx_rngc_remove),
+};
+
+module_platform_driver_probe(imx_rngc_driver, imx_rngc_probe);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("H/W RNGC driver for i.MX");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 985973855005..36f47e8d06a3 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2812,7 +2812,7 @@ static struct platform_driver ipmi_driver = {
};
#ifdef CONFIG_PARISC
-static int ipmi_parisc_probe(struct parisc_device *dev)
+static int __init ipmi_parisc_probe(struct parisc_device *dev)
{
struct smi_info *info;
int rv;
@@ -2850,22 +2850,24 @@ static int ipmi_parisc_probe(struct parisc_device *dev)
return 0;
}
-static int ipmi_parisc_remove(struct parisc_device *dev)
+static int __exit ipmi_parisc_remove(struct parisc_device *dev)
{
cleanup_one_si(dev_get_drvdata(&dev->dev));
return 0;
}
-static const struct parisc_device_id ipmi_parisc_tbl[] = {
+static const struct parisc_device_id ipmi_parisc_tbl[] __initconst = {
{ HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
{ 0, }
};
-static struct parisc_driver ipmi_parisc_driver = {
+MODULE_DEVICE_TABLE(parisc, ipmi_parisc_tbl);
+
+static struct parisc_driver ipmi_parisc_driver __refdata = {
.name = "ipmi",
.id_table = ipmi_parisc_tbl,
.probe = ipmi_parisc_probe,
- .remove = ipmi_parisc_remove,
+ .remove = __exit_p(ipmi_parisc_remove),
};
#endif /* CONFIG_PARISC */
diff --git a/drivers/char/mwave/smapi.c b/drivers/char/mwave/smapi.c
index 8c5411a8f33f..691f5898bb32 100644
--- a/drivers/char/mwave/smapi.c
+++ b/drivers/char/mwave/smapi.c
@@ -128,10 +128,11 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
{
int bRC = -EIO;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
PRINTK_1(TRACE_SMAPI, "smapi::smapi_query_DSP_cfg entry\n");
@@ -148,7 +149,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bDSPEnabled = ((usCX & 0x0001) != 0);
pSettings->usDspIRQ = usSI & 0x00FF;
pSettings->usDspDMA = (usSI & 0xFF00) >> 8;
- if ((usDI & 0x00FF) < numDspBases) {
+ if ((usDI & 0x00FF) < ARRAY_SIZE(ausDspBases)) {
pSettings->usDspBaseIO = ausDspBases[usDI & 0x00FF];
} else {
pSettings->usDspBaseIO = 0;
@@ -176,7 +177,7 @@ int smapi_query_DSP_cfg(SMAPI_DSP_SETTINGS * pSettings)
pSettings->bModemEnabled = ((usCX & 0x0001) != 0);
pSettings->usUartIRQ = usSI & 0x000F;
- if (((usSI & 0xFF00) >> 8) < numUartBases) {
+ if (((usSI & 0xFF00) >> 8) < ARRAY_SIZE(ausUartBases)) {
pSettings->usUartBaseIO = ausUartBases[(usSI & 0xFF00) >> 8];
} else {
pSettings->usUartBaseIO = 0;
@@ -205,15 +206,16 @@ int smapi_set_DSP_cfg(void)
int bRC = -EIO;
int i;
unsigned short usAX, usBX, usCX, usDX, usDI, usSI;
- unsigned short ausDspBases[] = { 0x0030, 0x4E30, 0x8E30, 0xCE30, 0x0130, 0x0350, 0x0070, 0x0DB0 };
- unsigned short ausUartBases[] = { 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
- unsigned short ausDspIrqs[] = { 5, 7, 10, 11, 15 };
- unsigned short ausUartIrqs[] = { 3, 4 };
-
- unsigned short numDspBases = 8;
- unsigned short numUartBases = 4;
- unsigned short numDspIrqs = 5;
- unsigned short numUartIrqs = 2;
+ static const unsigned short ausDspBases[] = {
+ 0x0030, 0x4E30, 0x8E30, 0xCE30,
+ 0x0130, 0x0350, 0x0070, 0x0DB0 };
+ static const unsigned short ausUartBases[] = {
+ 0x03F8, 0x02F8, 0x03E8, 0x02E8 };
+ static const unsigned short ausDspIrqs[] = {
+ 5, 7, 10, 11, 15 };
+ static const unsigned short ausUartIrqs[] = {
+ 3, 4 };
+
unsigned short dspio_index = 0, uartio_index = 0;
PRINTK_5(TRACE_SMAPI,
@@ -221,11 +223,11 @@ int smapi_set_DSP_cfg(void)
mwave_3780i_irq, mwave_3780i_io, mwave_uart_irq, mwave_uart_io);
if (mwave_3780i_io) {
- for (i = 0; i < numDspBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspBases); i++) {
if (mwave_3780i_io == ausDspBases[i])
break;
}
- if (i == numDspBases) {
+ if (i == ARRAY_SIZE(ausDspBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_io address %x. Aborting.\n", mwave_3780i_io);
return bRC;
}
@@ -233,22 +235,22 @@ int smapi_set_DSP_cfg(void)
}
if (mwave_3780i_irq) {
- for (i = 0; i < numDspIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausDspIrqs); i++) {
if (mwave_3780i_irq == ausDspIrqs[i])
break;
}
- if (i == numDspIrqs) {
+ if (i == ARRAY_SIZE(ausDspIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_3780i_irq %x. Aborting.\n", mwave_3780i_irq);
return bRC;
}
}
if (mwave_uart_io) {
- for (i = 0; i < numUartBases; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartBases); i++) {
if (mwave_uart_io == ausUartBases[i])
break;
}
- if (i == numUartBases) {
+ if (i == ARRAY_SIZE(ausUartBases)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_io address %x. Aborting.\n", mwave_uart_io);
return bRC;
}
@@ -257,11 +259,11 @@ int smapi_set_DSP_cfg(void)
if (mwave_uart_irq) {
- for (i = 0; i < numUartIrqs; i++) {
+ for (i = 0; i < ARRAY_SIZE(ausUartIrqs); i++) {
if (mwave_uart_irq == ausUartIrqs[i])
break;
}
- if (i == numUartIrqs) {
+ if (i == ARRAY_SIZE(ausUartIrqs)) {
PRINTK_ERROR(KERN_ERR_MWAVE "smapi::smapi_set_DSP_cfg: Error: Invalid mwave_uart_irq %x. Aborting.\n", mwave_uart_irq);
return bRC;
}
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 3e73bcdf9e65..d256110ba672 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -101,9 +101,6 @@ static DEFINE_IDA(ida_index);
#define PP_BUFFER_SIZE 1024
#define PARDEVICE_MAX 8
-/* ROUND_UP macro from fs/select.c */
-#define ROUND_UP(x,y) (((x)+(y)-1)/(y))
-
static DEFINE_MUTEX(pp_do_mutex);
/* define fixed sized ioctl cmd for y2038 migration */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index f4f866ee54bc..d3a979e25724 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -1491,7 +1491,7 @@ static struct platform_driver sonypi_driver = {
static struct platform_device *sonypi_platform_device;
-static struct dmi_system_id __initdata sonypi_dmi_table[] = {
+static const struct dmi_system_id sonypi_dmi_table[] __initconst = {
{
.ident = "Sony Vaio",
.matches = {
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
index 572a51704e67..6210bff46341 100644
--- a/drivers/char/tlclk.c
+++ b/drivers/char/tlclk.c
@@ -766,7 +766,7 @@ static struct attribute *tlclk_sysfs_entries[] = {
NULL
};
-static struct attribute_group tlclk_attribute_group = {
+static const struct attribute_group tlclk_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = tlclk_sysfs_entries,
};
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 67ec9d3d04f5..0eca20c5a80c 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -164,14 +164,7 @@ static int tpm_class_shutdown(struct device *dev)
chip->ops = NULL;
up_write(&chip->ops_sem);
}
- /* Allow bus- and device-specific code to run. Note: since chip->ops
- * is NULL, more-specific shutdown code will not be able to issue TPM
- * commands.
- */
- if (dev->bus && dev->bus->shutdown)
- dev->bus->shutdown(dev);
- else if (dev->driver && dev->driver->shutdown)
- dev->driver->shutdown(dev);
+
return 0;
}
@@ -214,7 +207,7 @@ struct tpm_chip *tpm_chip_alloc(struct device *pdev,
device_initialize(&chip->devs);
chip->dev.class = tpm_class;
- chip->dev.class->shutdown = tpm_class_shutdown;
+ chip->dev.class->shutdown_pre = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
chip->dev.parent = pdev;
chip->dev.groups = chip->groups;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index ad843eb02ae7..d1aed2513bd9 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -451,9 +451,6 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
* device is created by remoteproc, the DMA memory is
* associated with the grandparent device:
* vdev => rproc => platform-dev.
- * The code here would have been less quirky if
- * DMA_MEMORY_INCLUDES_CHILDREN had been supported
- * in dma-coherent.c
*/
if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
goto free_buf;
@@ -1130,7 +1127,7 @@ static const struct file_operations port_fops = {
* We turn the characters into a scatter-gather list, add it to the
* output queue and then kick the Host. Then we sit here waiting for
* it to finish: inefficient in theory, but in practice
- * implementations will do it immediately (lguest's Launcher does).
+ * implementations will do it immediately.
*/
static int put_chars(u32 vtermno, const char *buf, int count)
{
@@ -1308,7 +1305,7 @@ static struct attribute *port_sysfs_entries[] = {
NULL
};
-static struct attribute_group port_attribute_group = {
+static const struct attribute_group port_attribute_group = {
.name = NULL, /* put in device directory */
.attrs = port_sysfs_entries,
};
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 3e6b23c3453c..067396bedf22 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -86,8 +86,7 @@
#include <linux/cdev.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
#include <linux/uaccess.h>
#ifdef CONFIG_OF
@@ -222,6 +221,8 @@ static const struct config_registers v6_config_registers = {
* hwicap_command_desync - Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
+ * Returns: '0' on success and failure value on error
+ *
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
@@ -251,10 +252,12 @@ static int hwicap_command_desync(struct hwicap_drvdata *drvdata)
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
- * register value to be returned.
- * Examples: XHI_IDCODE, XHI_FLR.
+ * register value to be returned.
+ * Examples: XHI_IDCODE, XHI_FLR.
* @reg_data: returns the value of the register.
*
+ * Returns: '0' on success and failure value on error
+ *
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
@@ -320,7 +323,8 @@ static int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
dev_dbg(drvdata->dev, "initializing\n");
/* Abort any current transaction, to make sure we have the
- * ICAP in a good state. */
+ * ICAP in a good state.
+ */
dev_dbg(drvdata->dev, "Reset...\n");
drvdata->config->reset(drvdata);
@@ -632,7 +636,6 @@ static int hwicap_setup(struct device *dev, int id,
drvdata = kzalloc(sizeof(struct hwicap_drvdata), GFP_KERNEL);
if (!drvdata) {
- dev_err(dev, "Couldn't allocate device private record\n");
retval = -ENOMEM;
goto failed0;
}
@@ -759,20 +762,20 @@ static int hwicap_of_probe(struct platform_device *op,
id = of_get_property(op->dev.of_node, "port-number", NULL);
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = of_get_property(op->dev.of_node, "xlnx,family", NULL);
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&op->dev, id ? *id : -1, &res, config,
regs);
@@ -802,20 +805,20 @@ static int hwicap_drv_probe(struct platform_device *pdev)
return -ENODEV;
/* It's most likely that we're using V4, if the family is not
- specified */
+ * specified
+ */
regs = &v4_config_registers;
family = pdev->dev.platform_data;
if (family) {
- if (!strcmp(family, "virtex2p")) {
+ if (!strcmp(family, "virtex2p"))
regs = &v2_config_registers;
- } else if (!strcmp(family, "virtex4")) {
+ else if (!strcmp(family, "virtex4"))
regs = &v4_config_registers;
- } else if (!strcmp(family, "virtex5")) {
+ else if (!strcmp(family, "virtex5"))
regs = &v5_config_registers;
- } else if (!strcmp(family, "virtex6")) {
+ else if (!strcmp(family, "virtex6"))
regs = &v6_config_registers;
- }
}
return hwicap_setup(&pdev->dev, pdev->id, res,
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.h b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
index 38b145eaf24d..6b963d1c8ba3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.h
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.h
@@ -62,11 +62,13 @@ struct hwicap_drvdata {
struct hwicap_driver_config {
/* Read configuration data given by size into the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*get_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Write configuration data given by size from the data buffer.
- Return 0 if successful. */
+ * Return 0 if successful.
+ */
int (*set_configuration)(struct hwicap_drvdata *drvdata, u32 *data,
u32 size);
/* Get the status register, bit pattern given by:
@@ -193,11 +195,12 @@ struct config_registers {
* hwicap_type_1_read - Generates a Type 1 read packet header.
* @reg: is the address of the register to be read back.
*
+ * Return:
* Generates a Type 1 read packet header, which is used to indirectly
* read registers in the configuration logic. This packet must then
* be sent through the icap device, and a return packet received with
* the information.
- **/
+ */
static inline u32 hwicap_type_1_read(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
@@ -208,7 +211,9 @@ static inline u32 hwicap_type_1_read(u32 reg)
/**
* hwicap_type_1_write - Generates a Type 1 write packet header
* @reg: is the address of the register to be read back.
- **/
+ *
+ * Return: Type 1 write packet header
+ */
static inline u32 hwicap_type_1_write(u32 reg)
{
return (XHI_TYPE_1 << XHI_TYPE_SHIFT) |
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 68ca2d9fcd73..1c4e1aa6767e 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -31,6 +31,13 @@ config COMMON_CLK_WM831X
source "drivers/clk/versatile/Kconfig"
+config CLK_HSDK
+ bool "PLL Driver for HSDK platform"
+ depends on OF || COMPILE_TEST
+ ---help---
+ This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
+ control.
+
config COMMON_CLK_MAX77686
tristate "Clock driver for Maxim 77620/77686/77802 MFD"
depends on MFD_MAX77686 || MFD_MAX77620 || COMPILE_TEST
@@ -39,10 +46,10 @@ config COMMON_CLK_MAX77686
clock.
config COMMON_CLK_RK808
- tristate "Clock driver for RK808/RK818"
+ tristate "Clock driver for RK805/RK808/RK818"
depends on MFD_RK808
---help---
- This driver supports RK808 and RK818 crystal oscillator clock. These
+ This driver supports RK805, RK808 and RK818 crystal oscillator clock. These
multi-function devices have two fixed-rate oscillators,
clocked at 32KHz each. Clkout1 is always on, Clkout2 can off
by control register.
@@ -210,14 +217,14 @@ config COMMON_CLK_OXNAS
Support for the OXNAS SoC Family clocks.
config COMMON_CLK_VC5
- tristate "Clock driver for IDT VersaClock5 devices"
+ tristate "Clock driver for IDT VersaClock 5,6 devices"
depends on I2C
depends on OF
select REGMAP_I2C
help
---help---
- This driver supports the IDT VersaClock5 programmable clock
- generator.
+ This driver supports the IDT VersaClock 5 and VersaClock 6
+ programmable clock generators.
source "drivers/clk/bcm/Kconfig"
source "drivers/clk/hisilicon/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index cd376b3fb47a..c99f363826f0 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -27,8 +27,8 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
+obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
-obj-$(CONFIG_ARCH_MB86S7X) += clk-mb86s7x.o
obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o
obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o
obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o
@@ -44,6 +44,7 @@ obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o
obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
+obj-$(CONFIG_ARCH_STM32) += clk-stm32h7.o
obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile
index 13e67bd35cff..c68947b65a4c 100644
--- a/drivers/clk/at91/Makefile
+++ b/drivers/clk/at91/Makefile
@@ -6,6 +6,7 @@ obj-y += pmc.o sckc.o
obj-y += clk-slow.o clk-main.o clk-pll.o clk-plldiv.o clk-master.o
obj-y += clk-system.o clk-peripheral.o clk-programmable.o
+obj-$(CONFIG_HAVE_AT91_AUDIO_PLL) += clk-audio-pll.o
obj-$(CONFIG_HAVE_AT91_UTMI) += clk-utmi.o
obj-$(CONFIG_HAVE_AT91_USB_CLK) += clk-usb.o
obj-$(CONFIG_HAVE_AT91_SMD) += clk-smd.o
diff --git a/drivers/clk/at91/clk-audio-pll.c b/drivers/clk/at91/clk-audio-pll.c
new file mode 100644
index 000000000000..da7bafcfbe70
--- /dev/null
+++ b/drivers/clk/at91/clk-audio-pll.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2016 Atmel Corporation,
+ * Songjun Wu <songjun.wu@atmel.com>,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ * Copyright (C) 2017 Free Electrons,
+ * Quentin Schulz <quentin.schulz@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The Sama5d2 SoC has two audio PLLs (PMC and PAD) that shares the same parent
+ * (FRAC). FRAC can output between 620 and 700MHz and only multiply the rate of
+ * its own parent. PMC and PAD can then divide the FRAC rate to best match the
+ * asked rate.
+ *
+ * Traits of FRAC clock:
+ * enable - clk_enable writes nd, fracr parameters and enables PLL
+ * rate - rate is adjustable.
+ * clk->rate = parent->rate * ((nd + 1) + (fracr / 2^22))
+ * parent - fixed parent. No clk_set_parent support
+ *
+ * Traits of PMC clock:
+ * enable - clk_enable writes qdpmc, and enables PMC output
+ * rate - rate is adjustable.
+ * clk->rate = parent->rate / (qdpmc + 1)
+ * parent - fixed parent. No clk_set_parent support
+ *
+ * Traits of PAD clock:
+ * enable - clk_enable writes divisors and enables PAD output
+ * rate - rate is adjustable.
+ * clk->rate = parent->rate / (qdaudio * div))
+ * parent - fixed parent. No clk_set_parent support
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/at91_pmc.h>
+#include <linux/of.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define AUDIO_PLL_DIV_FRAC BIT(22)
+#define AUDIO_PLL_ND_MAX (AT91_PMC_AUDIO_PLL_ND_MASK >> \
+ AT91_PMC_AUDIO_PLL_ND_OFFSET)
+
+#define AUDIO_PLL_QDPAD(qd, div) ((AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV(qd) & \
+ AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MASK) | \
+ (AT91_PMC_AUDIO_PLL_QDPAD_DIV(div) & \
+ AT91_PMC_AUDIO_PLL_QDPAD_DIV_MASK))
+
+#define AUDIO_PLL_QDPMC_MAX (AT91_PMC_AUDIO_PLL_QDPMC_MASK >> \
+ AT91_PMC_AUDIO_PLL_QDPMC_OFFSET)
+
+#define AUDIO_PLL_FOUT_MIN 620000000UL
+#define AUDIO_PLL_FOUT_MAX 700000000UL
+
+struct clk_audio_frac {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 fracr;
+ u8 nd;
+};
+
+struct clk_audio_pad {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 qdaudio;
+ u8 div;
+};
+
+struct clk_audio_pmc {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u8 qdpmc;
+};
+
+#define to_clk_audio_frac(hw) container_of(hw, struct clk_audio_frac, hw)
+#define to_clk_audio_pad(hw) container_of(hw, struct clk_audio_pad, hw)
+#define to_clk_audio_pmc(hw) container_of(hw, struct clk_audio_pmc, hw)
+
+static int clk_audio_pll_frac_enable(struct clk_hw *hw)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_RESETN, 0);
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_RESETN,
+ AT91_PMC_AUDIO_PLL_RESETN);
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL1,
+ AT91_PMC_AUDIO_PLL_FRACR_MASK, frac->fracr);
+
+ /*
+ * reset and enable have to be done in 2 separated writes
+ * for AT91_PMC_AUDIO_PLL0
+ */
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PLLEN |
+ AT91_PMC_AUDIO_PLL_ND_MASK,
+ AT91_PMC_AUDIO_PLL_PLLEN |
+ AT91_PMC_AUDIO_PLL_ND(frac->nd));
+
+ return 0;
+}
+
+static int clk_audio_pll_pad_enable(struct clk_hw *hw)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+
+ regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL1,
+ AT91_PMC_AUDIO_PLL_QDPAD_MASK,
+ AUDIO_PLL_QDPAD(apad_ck->qdaudio, apad_ck->div));
+ regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PADEN, AT91_PMC_AUDIO_PLL_PADEN);
+
+ return 0;
+}
+
+static int clk_audio_pll_pmc_enable(struct clk_hw *hw)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+
+ regmap_update_bits(apmc_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PMCEN |
+ AT91_PMC_AUDIO_PLL_QDPMC_MASK,
+ AT91_PMC_AUDIO_PLL_PMCEN |
+ AT91_PMC_AUDIO_PLL_QDPMC(apmc_ck->qdpmc));
+ return 0;
+}
+
+static void clk_audio_pll_frac_disable(struct clk_hw *hw)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PLLEN, 0);
+ /* do it in 2 separated writes */
+ regmap_update_bits(frac->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_RESETN, 0);
+}
+
+static void clk_audio_pll_pad_disable(struct clk_hw *hw)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+
+ regmap_update_bits(apad_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PADEN, 0);
+}
+
+static void clk_audio_pll_pmc_disable(struct clk_hw *hw)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+
+ regmap_update_bits(apmc_ck->regmap, AT91_PMC_AUDIO_PLL0,
+ AT91_PMC_AUDIO_PLL_PMCEN, 0);
+}
+
+static unsigned long clk_audio_pll_fout(unsigned long parent_rate,
+ unsigned long nd, unsigned long fracr)
+{
+ unsigned long long fr = (unsigned long long)parent_rate * fracr;
+
+ pr_debug("A PLL: %s, fr = %llu\n", __func__, fr);
+
+ fr = DIV_ROUND_CLOSEST_ULL(fr, AUDIO_PLL_DIV_FRAC);
+
+ pr_debug("A PLL: %s, fr = %llu\n", __func__, fr);
+
+ return parent_rate * (nd + 1) + fr;
+}
+
+static unsigned long clk_audio_pll_frac_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+ unsigned long fout;
+
+ fout = clk_audio_pll_fout(parent_rate, frac->nd, frac->fracr);
+
+ pr_debug("A PLL: %s, fout = %lu (nd = %u, fracr = %lu)\n", __func__,
+ fout, frac->nd, (unsigned long)frac->fracr);
+
+ return fout;
+}
+
+static unsigned long clk_audio_pll_pad_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+ unsigned long apad_rate = 0;
+
+ if (apad_ck->qdaudio && apad_ck->div)
+ apad_rate = parent_rate / (apad_ck->qdaudio * apad_ck->div);
+
+ pr_debug("A PLL/PAD: %s, apad_rate = %lu (div = %u, qdaudio = %u)\n",
+ __func__, apad_rate, apad_ck->div, apad_ck->qdaudio);
+
+ return apad_rate;
+}
+
+static unsigned long clk_audio_pll_pmc_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+ unsigned long apmc_rate = 0;
+
+ apmc_rate = parent_rate / (apmc_ck->qdpmc + 1);
+
+ pr_debug("A PLL/PMC: %s, apmc_rate = %lu (qdpmc = %u)\n", __func__,
+ apmc_rate, apmc_ck->qdpmc);
+
+ return apmc_rate;
+}
+
+static int clk_audio_pll_frac_compute_frac(unsigned long rate,
+ unsigned long parent_rate,
+ unsigned long *nd,
+ unsigned long *fracr)
+{
+ unsigned long long tmp, rem;
+
+ if (!rate)
+ return -EINVAL;
+
+ tmp = rate;
+ rem = do_div(tmp, parent_rate);
+ if (!tmp || tmp >= AUDIO_PLL_ND_MAX)
+ return -EINVAL;
+
+ *nd = tmp - 1;
+
+ tmp = rem * AUDIO_PLL_DIV_FRAC;
+ tmp = DIV_ROUND_CLOSEST_ULL(tmp, parent_rate);
+ if (tmp > AT91_PMC_AUDIO_PLL_FRACR_MASK)
+ return -EINVAL;
+
+ /* we can cast here as we verified the bounds just above */
+ *fracr = (unsigned long)tmp;
+
+ return 0;
+}
+
+static int clk_audio_pll_frac_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ unsigned long fracr, nd;
+ int ret;
+
+ pr_debug("A PLL: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ req->rate, req->best_parent_rate);
+
+ req->rate = clamp(req->rate, AUDIO_PLL_FOUT_MIN, AUDIO_PLL_FOUT_MAX);
+
+ req->min_rate = max(req->min_rate, AUDIO_PLL_FOUT_MIN);
+ req->max_rate = min(req->max_rate, AUDIO_PLL_FOUT_MAX);
+
+ ret = clk_audio_pll_frac_compute_frac(req->rate, req->best_parent_rate,
+ &nd, &fracr);
+ if (ret)
+ return ret;
+
+ req->rate = clk_audio_pll_fout(req->best_parent_rate, nd, fracr);
+
+ req->best_parent_hw = clk_hw_get_parent(hw);
+
+ pr_debug("A PLL: %s, best_rate = %lu (nd = %lu, fracr = %lu)\n",
+ __func__, req->rate, nd, fracr);
+
+ return 0;
+}
+
+static long clk_audio_pll_pad_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *pclk = clk_hw_get_parent(hw);
+ long best_rate = -EINVAL;
+ unsigned long best_parent_rate;
+ unsigned long tmp_qd;
+ u32 div;
+ long tmp_rate;
+ int tmp_diff;
+ int best_diff = -1;
+
+ pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, *parent_rate);
+
+ /*
+ * Rate divisor is actually made of two different divisors, multiplied
+ * between themselves before dividing the rate.
+ * tmp_qd goes from 1 to 31 and div is either 2 or 3.
+ * In order to avoid testing twice the rate divisor (e.g. divisor 12 can
+ * be found with (tmp_qd, div) = (2, 6) or (3, 4)), we remove any loop
+ * for a rate divisor when div is 2 and tmp_qd is a multiple of 3.
+ * We cannot inverse it (condition div is 3 and tmp_qd is even) or we
+ * would miss some rate divisor that aren't reachable with div being 2
+ * (e.g. rate divisor 90 is made with div = 3 and tmp_qd = 30, thus
+ * tmp_qd is even so we skip it because we think div 2 could make this
+ * rate divisor which isn't possible since tmp_qd has to be <= 31).
+ */
+ for (tmp_qd = 1; tmp_qd < AT91_PMC_AUDIO_PLL_QDPAD_EXTDIV_MAX; tmp_qd++)
+ for (div = 2; div <= 3; div++) {
+ if (div == 2 && tmp_qd % 3 == 0)
+ continue;
+
+ best_parent_rate = clk_hw_round_rate(pclk,
+ rate * tmp_qd * div);
+ tmp_rate = best_parent_rate / (div * tmp_qd);
+ tmp_diff = abs(rate - tmp_rate);
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ *parent_rate = best_parent_rate;
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ }
+ }
+
+ pr_debug("A PLL/PAD: %s, best_rate = %ld, best_parent_rate = %lu\n",
+ __func__, best_rate, best_parent_rate);
+
+ return best_rate;
+}
+
+static long clk_audio_pll_pmc_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_hw *pclk = clk_hw_get_parent(hw);
+ long best_rate = -EINVAL;
+ unsigned long best_parent_rate = 0;
+ u32 tmp_qd = 0, div;
+ long tmp_rate;
+ int tmp_diff;
+ int best_diff = -1;
+
+ pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, *parent_rate);
+
+ for (div = 1; div <= AUDIO_PLL_QDPMC_MAX; div++) {
+ best_parent_rate = clk_round_rate(pclk->clk, rate * div);
+ tmp_rate = best_parent_rate / div;
+ tmp_diff = abs(rate - tmp_rate);
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ *parent_rate = best_parent_rate;
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ tmp_qd = div;
+ }
+ }
+
+ pr_debug("A PLL/PMC: %s, best_rate = %ld, best_parent_rate = %lu (qd = %d)\n",
+ __func__, best_rate, *parent_rate, tmp_qd - 1);
+
+ return best_rate;
+}
+
+static int clk_audio_pll_frac_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_audio_frac *frac = to_clk_audio_frac(hw);
+ unsigned long fracr, nd;
+ int ret;
+
+ pr_debug("A PLL: %s, rate = %lu (parent_rate = %lu)\n", __func__, rate,
+ parent_rate);
+
+ if (rate < AUDIO_PLL_FOUT_MIN || rate > AUDIO_PLL_FOUT_MAX)
+ return -EINVAL;
+
+ ret = clk_audio_pll_frac_compute_frac(rate, parent_rate, &nd, &fracr);
+ if (ret)
+ return ret;
+
+ frac->nd = nd;
+ frac->fracr = fracr;
+
+ return 0;
+}
+
+static int clk_audio_pll_pad_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pad *apad_ck = to_clk_audio_pad(hw);
+ u8 tmp_div;
+
+ pr_debug("A PLL/PAD: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, parent_rate);
+
+ if (!rate)
+ return -EINVAL;
+
+ tmp_div = parent_rate / rate;
+ if (tmp_div % 3 == 0) {
+ apad_ck->qdaudio = tmp_div / 3;
+ apad_ck->div = 3;
+ } else {
+ apad_ck->qdaudio = tmp_div / 2;
+ apad_ck->div = 2;
+ }
+
+ return 0;
+}
+
+static int clk_audio_pll_pmc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_audio_pmc *apmc_ck = to_clk_audio_pmc(hw);
+
+ if (!rate)
+ return -EINVAL;
+
+ pr_debug("A PLL/PMC: %s, rate = %lu (parent_rate = %lu)\n", __func__,
+ rate, parent_rate);
+
+ apmc_ck->qdpmc = parent_rate / rate - 1;
+
+ return 0;
+}
+
+static const struct clk_ops audio_pll_frac_ops = {
+ .enable = clk_audio_pll_frac_enable,
+ .disable = clk_audio_pll_frac_disable,
+ .recalc_rate = clk_audio_pll_frac_recalc_rate,
+ .determine_rate = clk_audio_pll_frac_determine_rate,
+ .set_rate = clk_audio_pll_frac_set_rate,
+};
+
+static const struct clk_ops audio_pll_pad_ops = {
+ .enable = clk_audio_pll_pad_enable,
+ .disable = clk_audio_pll_pad_disable,
+ .recalc_rate = clk_audio_pll_pad_recalc_rate,
+ .round_rate = clk_audio_pll_pad_round_rate,
+ .set_rate = clk_audio_pll_pad_set_rate,
+};
+
+static const struct clk_ops audio_pll_pmc_ops = {
+ .enable = clk_audio_pll_pmc_enable,
+ .disable = clk_audio_pll_pmc_disable,
+ .recalc_rate = clk_audio_pll_pmc_recalc_rate,
+ .round_rate = clk_audio_pll_pmc_round_rate,
+ .set_rate = clk_audio_pll_pmc_set_rate,
+};
+
+static int of_sama5d2_clk_audio_pll_setup(struct device_node *np,
+ struct clk_init_data *init,
+ struct clk_hw *hw,
+ struct regmap **clk_audio_regmap)
+{
+ struct regmap *regmap;
+ const char *parent_names[1];
+ int ret;
+
+ regmap = syscon_node_to_regmap(of_get_parent(np));
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ init->name = np->name;
+ of_clk_parent_fill(np, parent_names, 1);
+ init->parent_names = parent_names;
+ init->num_parents = 1;
+
+ hw->init = init;
+ *clk_audio_regmap = regmap;
+
+ ret = clk_hw_register(NULL, hw);
+ if (ret)
+ return ret;
+
+ return of_clk_add_hw_provider(np, of_clk_hw_simple_get, hw);
+}
+
+static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
+{
+ struct clk_audio_frac *frac_ck;
+ struct clk_init_data init = {};
+
+ frac_ck = kzalloc(sizeof(*frac_ck), GFP_KERNEL);
+ if (!frac_ck)
+ return;
+
+ init.ops = &audio_pll_frac_ops;
+ init.flags = CLK_SET_RATE_GATE;
+
+ if (of_sama5d2_clk_audio_pll_setup(np, &init, &frac_ck->hw,
+ &frac_ck->regmap))
+ kfree(frac_ck);
+}
+
+static void __init of_sama5d2_clk_audio_pll_pad_setup(struct device_node *np)
+{
+ struct clk_audio_pad *apad_ck;
+ struct clk_init_data init = {};
+
+ apad_ck = kzalloc(sizeof(*apad_ck), GFP_KERNEL);
+ if (!apad_ck)
+ return;
+
+ init.ops = &audio_pll_pad_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
+
+ if (of_sama5d2_clk_audio_pll_setup(np, &init, &apad_ck->hw,
+ &apad_ck->regmap))
+ kfree(apad_ck);
+}
+
+static void __init of_sama5d2_clk_audio_pll_pmc_setup(struct device_node *np)
+{
+ struct clk_audio_pad *apmc_ck;
+ struct clk_init_data init = {};
+
+ apmc_ck = kzalloc(sizeof(*apmc_ck), GFP_KERNEL);
+ if (!apmc_ck)
+ return;
+
+ init.ops = &audio_pll_pmc_ops;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
+
+ if (of_sama5d2_clk_audio_pll_setup(np, &init, &apmc_ck->hw,
+ &apmc_ck->regmap))
+ kfree(apmc_ck);
+}
+
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_frac_setup,
+ "atmel,sama5d2-clk-audio-pll-frac",
+ of_sama5d2_clk_audio_pll_frac_setup);
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pad_setup,
+ "atmel,sama5d2-clk-audio-pll-pad",
+ of_sama5d2_clk_audio_pll_pad_setup);
+CLK_OF_DECLARE(of_sama5d2_clk_audio_pll_pmc_setup,
+ "atmel,sama5d2-clk-audio-pll-pmc",
+ of_sama5d2_clk_audio_pll_pmc_setup);
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index f0b7ae904ce2..33481368740e 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -26,6 +26,13 @@
#define GENERATED_SOURCE_MAX 6
#define GENERATED_MAX_DIV 255
+#define GCK_ID_SSC0 43
+#define GCK_ID_SSC1 44
+#define GCK_ID_I2S0 54
+#define GCK_ID_I2S1 55
+#define GCK_ID_CLASSD 59
+#define GCK_INDEX_DT_AUDIO_PLL 5
+
struct clk_generated {
struct clk_hw hw;
struct regmap *regmap;
@@ -34,6 +41,7 @@ struct clk_generated {
u32 id;
u32 gckdiv;
u8 parent_id;
+ bool audio_pll_allowed;
};
#define to_clk_generated(hw) \
@@ -99,21 +107,41 @@ clk_generated_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, gck->gckdiv + 1);
}
+static void clk_generated_best_diff(struct clk_rate_request *req,
+ struct clk_hw *parent,
+ unsigned long parent_rate, u32 div,
+ int *best_diff, long *best_rate)
+{
+ unsigned long tmp_rate;
+ int tmp_diff;
+
+ if (!div)
+ tmp_rate = parent_rate;
+ else
+ tmp_rate = parent_rate / div;
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff > tmp_diff) {
+ *best_rate = tmp_rate;
+ *best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+}
+
static int clk_generated_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_generated *gck = to_clk_generated(hw);
struct clk_hw *parent = NULL;
+ struct clk_rate_request req_parent = *req;
long best_rate = -EINVAL;
- unsigned long tmp_rate, min_rate;
+ unsigned long min_rate, parent_rate;
int best_diff = -1;
- int tmp_diff;
int i;
+ u32 div;
- for (i = 0; i < clk_hw_get_num_parents(hw); i++) {
- u32 div;
- unsigned long parent_rate;
-
+ for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) {
parent = clk_hw_get_parent_by_index(hw, i);
if (!parent)
continue;
@@ -124,25 +152,43 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
(gck->range.max && min_rate > gck->range.max))
continue;
- for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
- tmp_rate = DIV_ROUND_CLOSEST(parent_rate, div);
- tmp_diff = abs(req->rate - tmp_rate);
+ div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
- if (best_diff < 0 || best_diff > tmp_diff) {
- best_rate = tmp_rate;
- best_diff = tmp_diff;
- req->best_parent_rate = parent_rate;
- req->best_parent_hw = parent;
- }
+ clk_generated_best_diff(req, parent, parent_rate, div,
+ &best_diff, &best_rate);
- if (!best_diff || tmp_rate < req->rate)
- break;
- }
+ if (!best_diff)
+ break;
+ }
+
+ /*
+ * The audio_pll rate can be modified, unlike the five others clocks
+ * that should never be altered.
+ * The audio_pll can technically be used by multiple consumers. However,
+ * with the rate locking, the first consumer to enable to clock will be
+ * the one definitely setting the rate of the clock.
+ * Since audio IPs are most likely to request the same rate, we enforce
+ * that the only clks able to modify gck rate are those of audio IPs.
+ */
+
+ if (!gck->audio_pll_allowed)
+ goto end;
+
+ parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL);
+ if (!parent)
+ goto end;
+
+ for (div = 1; div < GENERATED_MAX_DIV + 2; div++) {
+ req_parent.rate = req->rate * div;
+ __clk_determine_rate(parent, &req_parent);
+ clk_generated_best_diff(req, parent, req_parent.rate, div,
+ &best_diff, &best_rate);
if (!best_diff)
break;
}
+end:
pr_debug("GCLK: %s, best_rate = %ld, parent clk: %s @ %ld\n",
__func__, best_rate,
__clk_get_name((req->best_parent_hw)->clk),
@@ -252,7 +298,8 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
init.ops = &generated_ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;
gck->id = id;
gck->hw.init = &init;
@@ -284,6 +331,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
struct device_node *gcknp;
struct clk_range range = CLK_RANGE(0, 0);
struct regmap *regmap;
+ struct clk_generated *gck;
num_parents = of_clk_get_parent_count(np);
if (num_parents == 0 || num_parents > GENERATED_SOURCE_MAX)
@@ -315,6 +363,21 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np)
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, name,
parent_names, num_parents,
id, &range);
+
+ gck = to_clk_generated(hw);
+
+ if (of_device_is_compatible(np,
+ "atmel,sama5d2-clk-generated")) {
+ if (gck->id == GCK_ID_SSC0 || gck->id == GCK_ID_SSC1 ||
+ gck->id == GCK_ID_I2S0 || gck->id == GCK_ID_I2S1 ||
+ gck->id == GCK_ID_CLASSD)
+ gck->audio_pll_allowed = true;
+ else
+ gck->audio_pll_allowed = false;
+ } else {
+ gck->audio_pll_allowed = false;
+ }
+
if (IS_ERR(hw))
continue;
diff --git a/drivers/clk/axs10x/Makefile b/drivers/clk/axs10x/Makefile
index 01996b871b06..d747deafbf1e 100644
--- a/drivers/clk/axs10x/Makefile
+++ b/drivers/clk/axs10x/Makefile
@@ -1 +1,2 @@
obj-y += i2s_pll_clock.o
+obj-y += pll_clock.o
diff --git a/drivers/clk/axs10x/pll_clock.c b/drivers/clk/axs10x/pll_clock.c
new file mode 100644
index 000000000000..25d8c240ddfb
--- /dev/null
+++ b/drivers/clk/axs10x/pll_clock.c
@@ -0,0 +1,346 @@
+/*
+ * Synopsys AXS10X SDP Generic PLL clock driver
+ *
+ * Copyright (C) 2017 Synopsys
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+/* PLL registers addresses */
+#define PLL_REG_IDIV 0x0
+#define PLL_REG_FBDIV 0x4
+#define PLL_REG_ODIV 0x8
+
+/*
+ * Bit fields of the PLL IDIV/FBDIV/ODIV registers:
+ * ________________________________________________________________________
+ * |31 15| 14 | 13 | 12 |11 6|5 0|
+ * |-------RESRVED------|-NOUPDATE-|-BYPASS-|-EDGE-|--HIGHTIME--|--LOWTIME--|
+ * |____________________|__________|________|______|____________|___________|
+ *
+ * Following macros determine the way of access to these registers
+ * They should be set up only using the macros.
+ * reg should be an u32 variable.
+ */
+
+#define PLL_REG_GET_LOW(reg) \
+ (((reg) & (0x3F << 0)) >> 0)
+#define PLL_REG_GET_HIGH(reg) \
+ (((reg) & (0x3F << 6)) >> 6)
+#define PLL_REG_GET_EDGE(reg) \
+ (((reg) & (BIT(12))) ? 1 : 0)
+#define PLL_REG_GET_BYPASS(reg) \
+ (((reg) & (BIT(13))) ? 1 : 0)
+#define PLL_REG_GET_NOUPD(reg) \
+ (((reg) & (BIT(14))) ? 1 : 0)
+#define PLL_REG_GET_PAD(reg) \
+ (((reg) & (0x1FFFF << 15)) >> 15)
+
+#define PLL_REG_SET_LOW(reg, value) \
+ { reg |= (((value) & 0x3F) << 0); }
+#define PLL_REG_SET_HIGH(reg, value) \
+ { reg |= (((value) & 0x3F) << 6); }
+#define PLL_REG_SET_EDGE(reg, value) \
+ { reg |= (((value) & 0x01) << 12); }
+#define PLL_REG_SET_BYPASS(reg, value) \
+ { reg |= (((value) & 0x01) << 13); }
+#define PLL_REG_SET_NOUPD(reg, value) \
+ { reg |= (((value) & 0x01) << 14); }
+#define PLL_REG_SET_PAD(reg, value) \
+ { reg |= (((value) & 0x1FFFF) << 15); }
+
+#define PLL_LOCK BIT(0)
+#define PLL_ERROR BIT(1)
+#define PLL_MAX_LOCK_TIME 100 /* 100 us */
+
+struct axs10x_pll_cfg {
+ u32 rate;
+ u32 idiv;
+ u32 fbdiv;
+ u32 odiv;
+};
+
+static const struct axs10x_pll_cfg arc_pll_cfg[] = {
+ { 33333333, 1, 1, 1 },
+ { 50000000, 1, 30, 20 },
+ { 75000000, 2, 45, 10 },
+ { 90000000, 2, 54, 10 },
+ { 100000000, 1, 30, 10 },
+ { 125000000, 2, 45, 6 },
+ {}
+};
+
+static const struct axs10x_pll_cfg pgu_pll_cfg[] = {
+ { 25200000, 1, 84, 90 },
+ { 50000000, 1, 100, 54 },
+ { 74250000, 1, 44, 16 },
+ {}
+};
+
+struct axs10x_pll_clk {
+ struct clk_hw hw;
+ void __iomem *base;
+ void __iomem *lock;
+ const struct axs10x_pll_cfg *pll_cfg;
+ struct device *dev;
+};
+
+static inline void axs10x_pll_write(struct axs10x_pll_clk *clk, u32 reg,
+ u32 val)
+{
+ iowrite32(val, clk->base + reg);
+}
+
+static inline u32 axs10x_pll_read(struct axs10x_pll_clk *clk, u32 reg)
+{
+ return ioread32(clk->base + reg);
+}
+
+static inline struct axs10x_pll_clk *to_axs10x_pll_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct axs10x_pll_clk, hw);
+}
+
+static inline u32 axs10x_div_get_value(u32 reg)
+{
+ if (PLL_REG_GET_BYPASS(reg))
+ return 1;
+
+ return PLL_REG_GET_HIGH(reg) + PLL_REG_GET_LOW(reg);
+}
+
+static inline u32 axs10x_encode_div(unsigned int id, int upd)
+{
+ u32 div = 0;
+
+ PLL_REG_SET_LOW(div, (id % 2 == 0) ? id >> 1 : (id >> 1) + 1);
+ PLL_REG_SET_HIGH(div, id >> 1);
+ PLL_REG_SET_EDGE(div, id % 2);
+ PLL_REG_SET_BYPASS(div, id == 1 ? 1 : 0);
+ PLL_REG_SET_NOUPD(div, upd == 0 ? 1 : 0);
+
+ return div;
+}
+
+static unsigned long axs10x_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u64 rate;
+ u32 idiv, fbdiv, odiv;
+ struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
+
+ idiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_IDIV));
+ fbdiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_FBDIV));
+ odiv = axs10x_div_get_value(axs10x_pll_read(clk, PLL_REG_ODIV));
+
+ rate = (u64)parent_rate * fbdiv;
+ do_div(rate, idiv * odiv);
+
+ return rate;
+}
+
+static long axs10x_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+ long best_rate;
+ struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
+ const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg;
+
+ if (pll_cfg[0].rate == 0)
+ return -EINVAL;
+
+ best_rate = pll_cfg[0].rate;
+
+ for (i = 1; pll_cfg[i].rate != 0; i++) {
+ if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ best_rate = pll_cfg[i].rate;
+ }
+
+ return best_rate;
+}
+
+static int axs10x_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+ struct axs10x_pll_clk *clk = to_axs10x_pll_clk(hw);
+ const struct axs10x_pll_cfg *pll_cfg = clk->pll_cfg;
+
+ for (i = 0; pll_cfg[i].rate != 0; i++) {
+ if (pll_cfg[i].rate == rate) {
+ axs10x_pll_write(clk, PLL_REG_IDIV,
+ axs10x_encode_div(pll_cfg[i].idiv, 0));
+ axs10x_pll_write(clk, PLL_REG_FBDIV,
+ axs10x_encode_div(pll_cfg[i].fbdiv, 0));
+ axs10x_pll_write(clk, PLL_REG_ODIV,
+ axs10x_encode_div(pll_cfg[i].odiv, 1));
+
+ /*
+ * Wait until CGU relocks and check error status.
+ * If after timeout CGU is unlocked yet return error
+ */
+ udelay(PLL_MAX_LOCK_TIME);
+ if (!(ioread32(clk->lock) & PLL_LOCK))
+ return -ETIMEDOUT;
+
+ if (ioread32(clk->lock) & PLL_ERROR)
+ return -EINVAL;
+
+ return 0;
+ }
+ }
+
+ dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
+ parent_rate);
+ return -EINVAL;
+}
+
+static const struct clk_ops axs10x_pll_ops = {
+ .recalc_rate = axs10x_pll_recalc_rate,
+ .round_rate = axs10x_pll_round_rate,
+ .set_rate = axs10x_pll_set_rate,
+};
+
+static int axs10x_pll_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const char *parent_name;
+ struct axs10x_pll_clk *pll_clk;
+ struct resource *mem;
+ struct clk_init_data init = { };
+ int ret;
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_clk->base = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->base))
+ return PTR_ERR(pll_clk->base);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pll_clk->lock = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->lock))
+ return PTR_ERR(pll_clk->lock);
+
+ init.name = dev->of_node->name;
+ init.ops = &axs10x_pll_ops;
+ parent_name = of_clk_get_parent_name(dev->of_node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+ pll_clk->hw.init = &init;
+ pll_clk->dev = dev;
+ pll_clk->pll_cfg = of_device_get_match_data(dev);
+
+ if (!pll_clk->pll_cfg) {
+ dev_err(dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret) {
+ dev_err(dev, "failed to register %s clock\n", init.name);
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
+ &pll_clk->hw);
+}
+
+static int axs10x_pll_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static void __init of_axs10x_pll_clk_setup(struct device_node *node)
+{
+ const char *parent_name;
+ struct axs10x_pll_clk *pll_clk;
+ struct clk_init_data init = { };
+ int ret;
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return;
+
+ pll_clk->base = of_iomap(node, 0);
+ if (!pll_clk->base) {
+ pr_err("failed to map pll div registers\n");
+ goto err_free_pll_clk;
+ }
+
+ pll_clk->lock = of_iomap(node, 1);
+ if (!pll_clk->lock) {
+ pr_err("failed to map pll lock register\n");
+ goto err_unmap_base;
+ }
+
+ init.name = node->name;
+ init.ops = &axs10x_pll_ops;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ init.num_parents = parent_name ? 1 : 0;
+ pll_clk->hw.init = &init;
+ pll_clk->pll_cfg = arc_pll_cfg;
+
+ ret = clk_hw_register(NULL, &pll_clk->hw);
+ if (ret) {
+ pr_err("failed to register %s clock\n", node->name);
+ goto err_unmap_lock;
+ }
+
+ ret = of_clk_add_hw_provider(node, of_clk_hw_simple_get, &pll_clk->hw);
+ if (ret) {
+ pr_err("failed to add hw provider for %s clock\n", node->name);
+ goto err_unregister_clk;
+ }
+
+ return;
+
+err_unregister_clk:
+ clk_hw_unregister(&pll_clk->hw);
+err_unmap_lock:
+ iounmap(pll_clk->lock);
+err_unmap_base:
+ iounmap(pll_clk->base);
+err_free_pll_clk:
+ kfree(pll_clk);
+}
+CLK_OF_DECLARE(axs10x_pll_clock, "snps,axs10x-arc-pll-clock",
+ of_axs10x_pll_clk_setup);
+
+static const struct of_device_id axs10x_pll_clk_id[] = {
+ { .compatible = "snps,axs10x-pgu-pll-clock", .data = &pgu_pll_cfg},
+ { }
+};
+MODULE_DEVICE_TABLE(of, axs10x_pll_clk_id);
+
+static struct platform_driver axs10x_pll_clk_driver = {
+ .driver = {
+ .name = "axs10x-pll-clock",
+ .of_match_table = axs10x_pll_clk_id,
+ },
+ .probe = axs10x_pll_clk_probe,
+ .remove = axs10x_pll_clk_remove,
+};
+builtin_platform_driver(axs10x_pll_clk_driver);
+
+MODULE_AUTHOR("Vlad Zakharov <vzakhar@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys AXS10X SDP Generic PLL Clock Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c
index 1d99292e2039..e7331ace0337 100644
--- a/drivers/clk/berlin/bg2.c
+++ b/drivers/clk/berlin/bg2.c
@@ -679,8 +679,7 @@ static void __init berlin2_clock_setup(struct device_node *np)
if (!IS_ERR(hws[n]))
continue;
- pr_err("%s: Unable to register leaf clock %d\n",
- np->full_name, n);
+ pr_err("%pOF: Unable to register leaf clock %d\n", np, n);
goto bg2_fail;
}
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 3b784b593afd..67c270b143f7 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -304,14 +304,14 @@ static void __init berlin2q_clock_setup(struct device_node *np)
gbase = of_iomap(parent_np, 0);
if (!gbase) {
- pr_err("%s: Unable to map global base\n", np->full_name);
+ pr_err("%pOF: Unable to map global base\n", np);
return;
}
/* BG2Q CPU PLL is not part of global registers */
cpupll_base = of_iomap(parent_np, 1);
if (!cpupll_base) {
- pr_err("%s: Unable to map cpupll base\n", np->full_name);
+ pr_err("%pOF: Unable to map cpupll base\n", np);
iounmap(gbase);
return;
}
@@ -376,8 +376,7 @@ static void __init berlin2q_clock_setup(struct device_node *np)
if (!IS_ERR(hws[n]))
continue;
- pr_err("%s: Unable to register leaf clock %d\n",
- np->full_name, n);
+ pr_err("%pOF: Unable to register leaf clock %d\n", np, n);
goto bg2q_fail;
}
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
index ea8568536193..bf0582cbbf38 100644
--- a/drivers/clk/clk-asm9260.c
+++ b/drivers/clk/clk-asm9260.c
@@ -338,8 +338,8 @@ static void __init asm9260_acc_init(struct device_node *np)
if (!IS_ERR(hws[n]))
continue;
- pr_err("%s: Unable to register leaf clock %d\n",
- np->full_name, n);
+ pr_err("%pOF: Unable to register leaf clock %d\n",
+ np, n);
goto fail;
}
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
index 7ec36722f8ab..49819b546134 100644
--- a/drivers/clk/clk-conf.c
+++ b/drivers/clk/clk-conf.c
@@ -23,8 +23,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
"#clock-cells");
if (num_parents == -EINVAL)
- pr_err("clk: invalid value of clock-parents property at %s\n",
- node->full_name);
+ pr_err("clk: invalid value of clock-parents property at %pOF\n",
+ node);
for (index = 0; index < num_parents; index++) {
rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
@@ -41,8 +41,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
pclk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(pclk)) {
if (PTR_ERR(pclk) != -EPROBE_DEFER)
- pr_warn("clk: couldn't get parent clock %d for %s\n",
- index, node->full_name);
+ pr_warn("clk: couldn't get parent clock %d for %pOF\n",
+ index, node);
return PTR_ERR(pclk);
}
@@ -57,8 +57,8 @@ static int __set_clk_parents(struct device_node *node, bool clk_supplier)
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_warn("clk: couldn't get assigned clock %d for %s\n",
- index, node->full_name);
+ pr_warn("clk: couldn't get assigned clock %d for %pOF\n",
+ index, node);
rc = PTR_ERR(clk);
goto err;
}
@@ -102,8 +102,8 @@ static int __set_clk_rates(struct device_node *node, bool clk_supplier)
clk = of_clk_get_from_provider(&clkspec);
if (IS_ERR(clk)) {
if (PTR_ERR(clk) != -EPROBE_DEFER)
- pr_warn("clk: couldn't get clock %d for %s\n",
- index, node->full_name);
+ pr_warn("clk: couldn't get clock %d for %pOF\n",
+ index, node);
return PTR_ERR(clk);
}
diff --git a/drivers/clk/clk-cs2000-cp.c b/drivers/clk/clk-cs2000-cp.c
index c54baede4d68..e8ea81c30f0c 100644
--- a/drivers/clk/clk-cs2000-cp.c
+++ b/drivers/clk/clk-cs2000-cp.c
@@ -343,6 +343,15 @@ static int cs2000_set_rate(struct clk_hw *hw,
return __cs2000_set_rate(priv, ch, rate, parent_rate);
}
+static int cs2000_set_saved_rate(struct cs2000_priv *priv)
+{
+ int ch = 0; /* it uses ch0 only at this point */
+
+ return __cs2000_set_rate(priv, ch,
+ priv->saved_rate,
+ priv->saved_parent_rate);
+}
+
static int cs2000_enable(struct clk_hw *hw)
{
struct cs2000_priv *priv = hw_to_priv(hw);
@@ -535,11 +544,8 @@ probe_err:
static int cs2000_resume(struct device *dev)
{
struct cs2000_priv *priv = dev_get_drvdata(dev);
- int ch = 0; /* it uses ch0 only at this point */
- return __cs2000_set_rate(priv, ch,
- priv->saved_rate,
- priv->saved_parent_rate);
+ return cs2000_set_saved_rate(priv);
}
static const struct dev_pm_ops cs2000_pm_ops = {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 9bb472cccca6..4ed516cb7276 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -385,12 +385,14 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct clk_divider *divider = to_clk_divider(hw);
- unsigned int value;
+ int value;
unsigned long flags = 0;
u32 val;
value = divider_get_val(rate, parent_rate, divider->table,
divider->width, divider->flags);
+ if (value < 0)
+ return value;
if (divider->lock)
spin_lock_irqsave(divider->lock, flags);
@@ -403,7 +405,7 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
val = clk_readl(divider->reg);
val &= ~(div_mask(divider->width) << divider->shift);
}
- val |= value << divider->shift;
+ val |= (u32)value << divider->shift;
clk_writel(val, divider->reg);
if (divider->lock)
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index aab904618eb6..fdf625fb10fa 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -49,16 +49,12 @@ static unsigned long clk_fd_recalc_rate(struct clk_hw *hw,
return ret;
}
-static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static void clk_fd_general_approximation(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate,
+ unsigned long *m, unsigned long *n)
{
struct clk_fractional_divider *fd = to_clk_fd(hw);
unsigned long scale;
- unsigned long m, n;
- u64 ret;
-
- if (!rate || rate >= *parent_rate)
- return *parent_rate;
/*
* Get rate closer to *parent_rate to guarantee there is no overflow
@@ -71,7 +67,23 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
rational_best_approximation(rate, *parent_rate,
GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
- &m, &n);
+ m, n);
+}
+
+static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct clk_fractional_divider *fd = to_clk_fd(hw);
+ unsigned long m, n;
+ u64 ret;
+
+ if (!rate || rate >= *parent_rate)
+ return *parent_rate;
+
+ if (fd->approximation)
+ fd->approximation(hw, rate, parent_rate, &m, &n);
+ else
+ clk_fd_general_approximation(hw, rate, parent_rate, &m, &n);
ret = (u64)*parent_rate * m;
do_div(ret, n);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 4e0c054a787c..dd82485e09a1 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -86,7 +86,7 @@ static void clk_gate_disable(struct clk_hw *hw)
clk_gate_endisable(hw, 0);
}
-static int clk_gate_is_enabled(struct clk_hw *hw)
+int clk_gate_is_enabled(struct clk_hw *hw)
{
u32 reg;
struct clk_gate *gate = to_clk_gate(hw);
@@ -101,6 +101,7 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
return reg ? 1 : 0;
}
+EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
const struct clk_ops clk_gate_ops = {
.enable = clk_gate_enable,
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index b4cf2f699a21..f940e5af845b 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -37,7 +37,6 @@ static DEFINE_SPINLOCK(gemini_clk_lock);
#define GEMINI_GLOBAL_MISC_CONTROL 0x30
#define PCI_CLK_66MHZ BIT(18)
-#define PCI_CLK_OE BIT(17)
#define GEMINI_GLOBAL_CLOCK_CONTROL 0x34
#define PCI_CLKRUN_EN BIT(16)
@@ -159,9 +158,6 @@ static int gemini_pci_enable(struct clk_hw *hw)
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
0, PCI_CLKRUN_EN);
- regmap_update_bits(pciclk->map,
- GEMINI_GLOBAL_MISC_CONTROL,
- 0, PCI_CLK_OE);
return 0;
}
@@ -169,9 +165,6 @@ static void gemini_pci_disable(struct clk_hw *hw)
{
struct clk_gemini_pci *pciclk = to_pciclk(hw);
- regmap_update_bits(pciclk->map,
- GEMINI_GLOBAL_MISC_CONTROL,
- PCI_CLK_OE, 0);
regmap_update_bits(pciclk->map, GEMINI_GLOBAL_CLOCK_CONTROL,
PCI_CLKRUN_EN, 0);
}
diff --git a/drivers/clk/clk-hsdk-pll.c b/drivers/clk/clk-hsdk-pll.c
new file mode 100644
index 000000000000..bbf237173b37
--- /dev/null
+++ b/drivers/clk/clk-hsdk-pll.c
@@ -0,0 +1,431 @@
+/*
+ * Synopsys HSDK SDP Generic PLL clock driver
+ *
+ * Copyright (C) 2017 Synopsys
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CGU_PLL_CTRL 0x000 /* ARC PLL control register */
+#define CGU_PLL_STATUS 0x004 /* ARC PLL status register */
+#define CGU_PLL_FMEAS 0x008 /* ARC PLL frequency measurement register */
+#define CGU_PLL_MON 0x00C /* ARC PLL monitor register */
+
+#define CGU_PLL_CTRL_ODIV_SHIFT 2
+#define CGU_PLL_CTRL_IDIV_SHIFT 4
+#define CGU_PLL_CTRL_FBDIV_SHIFT 9
+#define CGU_PLL_CTRL_BAND_SHIFT 20
+
+#define CGU_PLL_CTRL_ODIV_MASK GENMASK(3, CGU_PLL_CTRL_ODIV_SHIFT)
+#define CGU_PLL_CTRL_IDIV_MASK GENMASK(8, CGU_PLL_CTRL_IDIV_SHIFT)
+#define CGU_PLL_CTRL_FBDIV_MASK GENMASK(15, CGU_PLL_CTRL_FBDIV_SHIFT)
+
+#define CGU_PLL_CTRL_PD BIT(0)
+#define CGU_PLL_CTRL_BYPASS BIT(1)
+
+#define CGU_PLL_STATUS_LOCK BIT(0)
+#define CGU_PLL_STATUS_ERR BIT(1)
+
+#define HSDK_PLL_MAX_LOCK_TIME 100 /* 100 us */
+
+#define CGU_PLL_SOURCE_MAX 1
+
+#define CORE_IF_CLK_THRESHOLD_HZ 500000000
+#define CREG_CORE_IF_CLK_DIV_1 0x0
+#define CREG_CORE_IF_CLK_DIV_2 0x1
+
+struct hsdk_pll_cfg {
+ u32 rate;
+ u32 idiv;
+ u32 fbdiv;
+ u32 odiv;
+ u32 band;
+};
+
+static const struct hsdk_pll_cfg asdt_pll_cfg[] = {
+ { 100000000, 0, 11, 3, 0 },
+ { 133000000, 0, 15, 3, 0 },
+ { 200000000, 1, 47, 3, 0 },
+ { 233000000, 1, 27, 2, 0 },
+ { 300000000, 1, 35, 2, 0 },
+ { 333000000, 1, 39, 2, 0 },
+ { 400000000, 1, 47, 2, 0 },
+ { 500000000, 0, 14, 1, 0 },
+ { 600000000, 0, 17, 1, 0 },
+ { 700000000, 0, 20, 1, 0 },
+ { 800000000, 0, 23, 1, 0 },
+ { 900000000, 1, 26, 0, 0 },
+ { 1000000000, 1, 29, 0, 0 },
+ { 1100000000, 1, 32, 0, 0 },
+ { 1200000000, 1, 35, 0, 0 },
+ { 1300000000, 1, 38, 0, 0 },
+ { 1400000000, 1, 41, 0, 0 },
+ { 1500000000, 1, 44, 0, 0 },
+ { 1600000000, 1, 47, 0, 0 },
+ {}
+};
+
+static const struct hsdk_pll_cfg hdmi_pll_cfg[] = {
+ { 297000000, 0, 21, 2, 0 },
+ { 540000000, 0, 19, 1, 0 },
+ { 594000000, 0, 21, 1, 0 },
+ {}
+};
+
+struct hsdk_pll_clk {
+ struct clk_hw hw;
+ void __iomem *regs;
+ void __iomem *spec_regs;
+ const struct hsdk_pll_devdata *pll_devdata;
+ struct device *dev;
+};
+
+struct hsdk_pll_devdata {
+ const struct hsdk_pll_cfg *pll_cfg;
+ int (*update_rate)(struct hsdk_pll_clk *clk, unsigned long rate,
+ const struct hsdk_pll_cfg *cfg);
+};
+
+static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *, unsigned long,
+ const struct hsdk_pll_cfg *);
+static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *, unsigned long,
+ const struct hsdk_pll_cfg *);
+
+static const struct hsdk_pll_devdata core_pll_devdata = {
+ .pll_cfg = asdt_pll_cfg,
+ .update_rate = hsdk_pll_core_update_rate,
+};
+
+static const struct hsdk_pll_devdata sdt_pll_devdata = {
+ .pll_cfg = asdt_pll_cfg,
+ .update_rate = hsdk_pll_comm_update_rate,
+};
+
+static const struct hsdk_pll_devdata hdmi_pll_devdata = {
+ .pll_cfg = hdmi_pll_cfg,
+ .update_rate = hsdk_pll_comm_update_rate,
+};
+
+static inline void hsdk_pll_write(struct hsdk_pll_clk *clk, u32 reg, u32 val)
+{
+ iowrite32(val, clk->regs + reg);
+}
+
+static inline u32 hsdk_pll_read(struct hsdk_pll_clk *clk, u32 reg)
+{
+ return ioread32(clk->regs + reg);
+}
+
+static inline void hsdk_pll_set_cfg(struct hsdk_pll_clk *clk,
+ const struct hsdk_pll_cfg *cfg)
+{
+ u32 val = 0;
+
+ /* Powerdown and Bypass bits should be cleared */
+ val |= cfg->idiv << CGU_PLL_CTRL_IDIV_SHIFT;
+ val |= cfg->fbdiv << CGU_PLL_CTRL_FBDIV_SHIFT;
+ val |= cfg->odiv << CGU_PLL_CTRL_ODIV_SHIFT;
+ val |= cfg->band << CGU_PLL_CTRL_BAND_SHIFT;
+
+ dev_dbg(clk->dev, "write configurarion: %#x\n", val);
+
+ hsdk_pll_write(clk, CGU_PLL_CTRL, val);
+}
+
+static inline bool hsdk_pll_is_locked(struct hsdk_pll_clk *clk)
+{
+ return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK);
+}
+
+static inline bool hsdk_pll_is_err(struct hsdk_pll_clk *clk)
+{
+ return !!(hsdk_pll_read(clk, CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR);
+}
+
+static inline struct hsdk_pll_clk *to_hsdk_pll_clk(struct clk_hw *hw)
+{
+ return container_of(hw, struct hsdk_pll_clk, hw);
+}
+
+static unsigned long hsdk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ u32 val;
+ u64 rate;
+ u32 idiv, fbdiv, odiv;
+ struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
+
+ val = hsdk_pll_read(clk, CGU_PLL_CTRL);
+
+ dev_dbg(clk->dev, "current configurarion: %#x\n", val);
+
+ /* Check if PLL is disabled */
+ if (val & CGU_PLL_CTRL_PD)
+ return 0;
+
+ /* Check if PLL is bypassed */
+ if (val & CGU_PLL_CTRL_BYPASS)
+ return parent_rate;
+
+ /* input divider = reg.idiv + 1 */
+ idiv = 1 + ((val & CGU_PLL_CTRL_IDIV_MASK) >> CGU_PLL_CTRL_IDIV_SHIFT);
+ /* fb divider = 2*(reg.fbdiv + 1) */
+ fbdiv = 2 * (1 + ((val & CGU_PLL_CTRL_FBDIV_MASK) >> CGU_PLL_CTRL_FBDIV_SHIFT));
+ /* output divider = 2^(reg.odiv) */
+ odiv = 1 << ((val & CGU_PLL_CTRL_ODIV_MASK) >> CGU_PLL_CTRL_ODIV_SHIFT);
+
+ rate = (u64)parent_rate * fbdiv;
+ do_div(rate, idiv * odiv);
+
+ return rate;
+}
+
+static long hsdk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int i;
+ unsigned long best_rate;
+ struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
+ const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
+
+ if (pll_cfg[0].rate == 0)
+ return -EINVAL;
+
+ best_rate = pll_cfg[0].rate;
+
+ for (i = 1; pll_cfg[i].rate != 0; i++) {
+ if (abs(rate - pll_cfg[i].rate) < abs(rate - best_rate))
+ best_rate = pll_cfg[i].rate;
+ }
+
+ dev_dbg(clk->dev, "chosen best rate: %lu\n", best_rate);
+
+ return best_rate;
+}
+
+static int hsdk_pll_comm_update_rate(struct hsdk_pll_clk *clk,
+ unsigned long rate,
+ const struct hsdk_pll_cfg *cfg)
+{
+ hsdk_pll_set_cfg(clk, cfg);
+
+ /*
+ * Wait until CGU relocks and check error status.
+ * If after timeout CGU is unlocked yet return error.
+ */
+ udelay(HSDK_PLL_MAX_LOCK_TIME);
+ if (!hsdk_pll_is_locked(clk))
+ return -ETIMEDOUT;
+
+ if (hsdk_pll_is_err(clk))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hsdk_pll_core_update_rate(struct hsdk_pll_clk *clk,
+ unsigned long rate,
+ const struct hsdk_pll_cfg *cfg)
+{
+ /*
+ * When core clock exceeds 500MHz, the divider for the interface
+ * clock must be programmed to div-by-2.
+ */
+ if (rate > CORE_IF_CLK_THRESHOLD_HZ)
+ iowrite32(CREG_CORE_IF_CLK_DIV_2, clk->spec_regs);
+
+ hsdk_pll_set_cfg(clk, cfg);
+
+ /*
+ * Wait until CGU relocks and check error status.
+ * If after timeout CGU is unlocked yet return error.
+ */
+ udelay(HSDK_PLL_MAX_LOCK_TIME);
+ if (!hsdk_pll_is_locked(clk))
+ return -ETIMEDOUT;
+
+ if (hsdk_pll_is_err(clk))
+ return -EINVAL;
+
+ /*
+ * Program divider to div-by-1 if we succesfuly set core clock below
+ * 500MHz threshold.
+ */
+ if (rate <= CORE_IF_CLK_THRESHOLD_HZ)
+ iowrite32(CREG_CORE_IF_CLK_DIV_1, clk->spec_regs);
+
+ return 0;
+}
+
+static int hsdk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ int i;
+ struct hsdk_pll_clk *clk = to_hsdk_pll_clk(hw);
+ const struct hsdk_pll_cfg *pll_cfg = clk->pll_devdata->pll_cfg;
+
+ for (i = 0; pll_cfg[i].rate != 0; i++) {
+ if (pll_cfg[i].rate == rate) {
+ return clk->pll_devdata->update_rate(clk, rate,
+ &pll_cfg[i]);
+ }
+ }
+
+ dev_err(clk->dev, "invalid rate=%ld, parent_rate=%ld\n", rate,
+ parent_rate);
+
+ return -EINVAL;
+}
+
+static const struct clk_ops hsdk_pll_ops = {
+ .recalc_rate = hsdk_pll_recalc_rate,
+ .round_rate = hsdk_pll_round_rate,
+ .set_rate = hsdk_pll_set_rate,
+};
+
+static int hsdk_pll_clk_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *mem;
+ const char *parent_name;
+ unsigned int num_parents;
+ struct hsdk_pll_clk *pll_clk;
+ struct clk_init_data init = { };
+ struct device *dev = &pdev->dev;
+
+ pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return -ENOMEM;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pll_clk->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(pll_clk->regs))
+ return PTR_ERR(pll_clk->regs);
+
+ init.name = dev->of_node->name;
+ init.ops = &hsdk_pll_ops;
+ parent_name = of_clk_get_parent_name(dev->of_node, 0);
+ init.parent_names = &parent_name;
+ num_parents = of_clk_get_parent_count(dev->of_node);
+ if (num_parents == 0 || num_parents > CGU_PLL_SOURCE_MAX) {
+ dev_err(dev, "wrong clock parents number: %u\n", num_parents);
+ return -EINVAL;
+ }
+ init.num_parents = num_parents;
+
+ pll_clk->hw.init = &init;
+ pll_clk->dev = dev;
+ pll_clk->pll_devdata = of_device_get_match_data(dev);
+
+ if (!pll_clk->pll_devdata) {
+ dev_err(dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = devm_clk_hw_register(dev, &pll_clk->hw);
+ if (ret) {
+ dev_err(dev, "failed to register %s clock\n", init.name);
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(dev->of_node, of_clk_hw_simple_get,
+ &pll_clk->hw);
+}
+
+static int hsdk_pll_clk_remove(struct platform_device *pdev)
+{
+ of_clk_del_provider(pdev->dev.of_node);
+ return 0;
+}
+
+static void __init of_hsdk_pll_clk_setup(struct device_node *node)
+{
+ int ret;
+ const char *parent_name;
+ unsigned int num_parents;
+ struct hsdk_pll_clk *pll_clk;
+ struct clk_init_data init = { };
+
+ pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL);
+ if (!pll_clk)
+ return;
+
+ pll_clk->regs = of_iomap(node, 0);
+ if (!pll_clk->regs) {
+ pr_err("failed to map pll registers\n");
+ goto err_free_pll_clk;
+ }
+
+ pll_clk->spec_regs = of_iomap(node, 1);
+ if (!pll_clk->spec_regs) {
+ pr_err("failed to map pll registers\n");
+ goto err_unmap_comm_regs;
+ }
+
+ init.name = node->name;
+ init.ops = &hsdk_pll_ops;
+ parent_name = of_clk_get_parent_name(node, 0);
+ init.parent_names = &parent_name;
+ num_parents = of_clk_get_parent_count(node);
+ if (num_parents > CGU_PLL_SOURCE_MAX) {
+ pr_err("too much clock parents: %u\n", num_parents);
+ goto err_unmap_spec_regs;
+ }